repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
mixib/brightway2-calc
[ "0fa409b6e7bccbef2a220dd6a811356798518ebe" ]
[ "bw2calc/graph_traversal.py" ]
[ "from . import spsolve\nfrom heapq import heappush, heappop\nimport numpy as np\nimport warnings\n\n\nclass GraphTraversal:\n \"\"\"\nTraverse a supply chain, following paths of greatest impact.\n\nThis implementation uses a queue of datasets to assess. As the supply chain is traversed, datasets inputs are added to a list sorted by LCA score. Each activity in the sorted list is assessed, and added to the supply chain graph, as long as its impact is above a certain threshold, and the maximum number of calculations has not been exceeded.\n\nBecause the next dataset assessed is chosen by its impact, not its position in the graph, this is neither a breadth-first nor a depth-first search, but rather \"importance-first\".\n\nThis class is written in a functional style - no variables are stored in *self*, only methods.\n\nShould be used by calling the ``calculate`` method.\n\n.. warning:: Graph traversal with multioutput processes only works when other inputs are substituted (see `Multioutput processes in LCA <http://chris.mutel.org/multioutput.html>`__ for a description of multiputput process math in LCA).\n\n \"\"\"\n\n def calculate(self, lca, cutoff=0.005, max_calc=1e5, skip_coproducts=False):\n \"\"\"\nTraverse the supply chain graph.\n\nArgs:\n * *lca* (dict): An instance of ``bw2calc.lca.LCA``.\n * *cutoff* (float, default=0.005): Cutoff criteria to stop LCA calculations. Relative score of total, i.e. 0.005 will cutoff if a dataset has a score less than 0.5 percent of the total.\n * *max_calc* (int, default=10000): Maximum number of LCA calculations to perform.\n\nReturns:\n Dictionary of nodes, edges, and number of LCA calculations.\n\n \"\"\"\n if not hasattr(lca, \"supply_array\"):\n lca.lci()\n if not hasattr(lca, \"characterized_inventory\"):\n lca.lcia()\n\n supply = lca.supply_array.copy()\n score = lca.score\n\n if score == 0:\n raise ValueError(\"Zero total LCA score makes traversal impossible\")\n\n # Create matrix of LCIA CFs times biosphere flows, as these don't\n # change. This is also the unit score of each activity.\n characterized_biosphere = np.array(\n (lca.characterization_matrix * lca.biosphere_matrix).sum(axis=0)\n ).ravel()\n\n heap, nodes, edges = self.initialize_heap(lca, supply, characterized_biosphere)\n nodes, edges, counter = self.traverse(\n heap,\n nodes,\n edges,\n 0,\n max_calc,\n cutoff,\n score,\n supply,\n characterized_biosphere,\n lca,\n skip_coproducts,\n )\n\n return {\n \"nodes\": nodes,\n \"edges\": edges,\n \"counter\": counter,\n }\n\n def initialize_heap(self, lca, supply, characterized_biosphere):\n \"\"\"\nCreate a `priority queue <http://docs.python.org/2/library/heapq.html>`_ or ``heap`` to store inventory datasets, sorted by LCA score.\n\nPopulates the heap with each activity in ``demand``. Initial nodes are the *functional unit*, i.e. the complete demand, and each activity in the *functional unit*. Initial edges are inputs from each activity into the *functional unit*.\n\nThe *functional unit* is an abstract dataset (as it doesn't exist in the matrix), and is assigned the index ``-1``.\n\n \"\"\"\n heap, edges = [], []\n nodes = {-1: {\"amount\": 1, \"cum\": lca.score, \"ind\": 1e-6 * lca.score}}\n for index, amount in enumerate(lca.demand_array):\n if amount == 0:\n continue\n cum_score = self.cumulative_score(\n index, supply, characterized_biosphere, lca\n )\n heappush(heap, (abs(1 / cum_score), index))\n nodes[index] = {\n \"amount\": float(supply[index]),\n \"cum\": cum_score,\n \"ind\": self.unit_score(index, supply, characterized_biosphere),\n }\n edges.append(\n {\n \"to\": -1,\n \"from\": index,\n \"amount\": amount,\n \"exc_amount\": amount,\n \"impact\": cum_score * amount / float(supply[index]),\n }\n )\n return heap, nodes, edges\n\n def cumulative_score(self, index, supply, characterized_biosphere, lca):\n \"\"\"Compute cumulative LCA score for a given activity\"\"\"\n demand = np.zeros((supply.shape[0],))\n demand[index] = (supply[index] *\n # Normalize by the production amount\n lca.technosphere_matrix[index, index])\n return float((characterized_biosphere * spsolve(lca.technosphere_matrix, demand)).sum())\n\n def unit_score(self, index, supply, characterized_biosphere):\n \"\"\"Compute the LCA impact caused by the direct emissions and resource consumption of a given activity\"\"\"\n return float(characterized_biosphere[index] * supply[index])\n\n def traverse(\n self,\n heap,\n nodes,\n edges,\n counter,\n max_calc,\n cutoff,\n total_score,\n supply,\n characterized_biosphere,\n lca,\n skip_coproducts,\n ):\n \"\"\"\nBuild a directed graph by traversing the supply chain.\n\nNode ids are actually technosphere row/col indices, which makes lookup easier.\n\nReturns:\n (nodes, edges, number of calculations)\n\n \"\"\"\n # static_databases = {name for name in databases if databases[name].get(\"static\")}\n # reverse = lca.dicts.activity.reversed\n\n while heap:\n if counter >= max_calc:\n warnings.warn(\"Stopping traversal due to calculation count.\")\n break\n parent_index = heappop(heap)[1]\n # Skip links from static databases\n # if static_databases and reverse[parent_index][0] in static_databases:\n # continue\n\n # Assume that this activity produces its reference product\n scale_value = lca.technosphere_matrix[parent_index, parent_index]\n if scale_value == 0:\n raise ValueError(\n \"Can't rescale activities that produce zero reference product\"\n )\n col = lca.technosphere_matrix[:, parent_index].tocoo()\n # Multiply by -1 because technosphere values are negative\n # (consumption of inputs) and rescale\n children = [\n (int(col.row[i]), float(-1 * col.data[i] / scale_value))\n for i in range(col.row.shape[0])\n ]\n for activity, amount in children:\n # Skip values on technosphere diagonal\n if activity == parent_index:\n continue\n # Skip negative coproducts\n if skip_coproducts and amount <= 0:\n continue\n counter += 1\n cumulative_score = self.cumulative_score(\n activity, supply, characterized_biosphere, lca\n )\n if abs(cumulative_score) < abs(total_score * cutoff):\n continue\n\n # flow between activity and parent (Multiply by -1 because technosphere values are negative)\n flow = (\n -1.0\n * lca.technosphere_matrix[activity, parent_index]\n * supply[parent_index]\n )\n total_activity_output = (\n lca.technosphere_matrix[activity, activity] * supply[activity]\n )\n\n # Edge format is (to, from, mass amount, cumulative impact)\n edges.append(\n {\n \"to\": parent_index,\n \"from\": activity,\n # Amount of this link * amount of parent demanding link\n \"amount\": flow,\n # Raw exchange value\n \"exc_amount\": amount,\n # Impact related to this flow\n \"impact\": flow / total_activity_output * cumulative_score,\n }\n )\n # Want multiple incoming edges, but don't add existing node\n if activity in nodes:\n continue\n nodes[activity] = {\n # Total amount of this flow supplied\n \"amount\": total_activity_output,\n # Cumulative score from all flows of this activity\n \"cum\": cumulative_score,\n # Individual score attributable to environmental flows\n # coming directory from or to this activity\n \"ind\": self.unit_score(activity, supply, characterized_biosphere),\n }\n heappush(heap, (abs(1 / cumulative_score), activity))\n\n return nodes, edges, counter\n" ]
[ [ "numpy.zeros" ] ]
vishalbelsare/emmental-tutorials
[ "5920cb71de07bfdb717e46ddfbe76457e8868fa7" ]
[ "data_augmentation/eda/image/modules/soft_cross_entropy_loss.py" ]
[ "from typing import List\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch import Tensor\n\n\nclass SoftCrossEntropyLoss(nn.Module):\n \"\"\"\n Calculate the CrossEntropyLoss with soft targets.\n\n :param weight: Weight to assign to each of the classes. Default: None\n :type weight: list of float\n :param reduction: The way to reduce the losses: 'none' | 'mean' | 'sum'.\n 'none': no reduction,\n 'mean': the mean of the losses,\n 'sum': the sum of the losses.\n :type reduction: str\n \"\"\"\n\n def __init__(self, weight: List[float] = None, reduction: str = \"mean\"):\n super().__init__()\n if weight is None:\n self.weight = None\n else:\n self.register_buffer(\"weight\", torch.Tensor(weight))\n\n self.reduction = reduction\n\n def forward(self, input: Tensor, target: Tensor) -> Tensor: # type:ignore\n \"\"\"\n Calculate the loss.\n\n :param input: prediction logits\n :param target: target probabilities\n :return: loss\n \"\"\"\n\n n, k = input.shape\n losses = input.new_zeros(n)\n\n for i in range(k):\n cls_idx = input.new_full((n,), i, dtype=torch.long)\n loss = F.cross_entropy(input, cls_idx, reduction=\"none\")\n if self.weight is not None:\n loss = loss * self.weight[i]\n losses += target[:, i].float() * loss\n\n if self.reduction == \"mean\":\n losses = losses.mean()\n elif self.reduction == \"sum\":\n losses = losses.sum()\n elif self.reduction != \"none\":\n raise ValueError(f\"Unrecognized reduction: {self.reduction}\")\n\n return losses\n" ]
[ [ "torch.nn.functional.cross_entropy", "torch.Tensor" ] ]
muthissar/homework
[ "9ee6361183da84f58e8b4842cc2c6047f7d743e1", "9ee6361183da84f58e8b4842cc2c6047f7d743e1" ]
[ "hw3/train_ac_f18.py", "hw3/dqn.py" ]
[ "\"\"\"\nOriginal code from John Schulman for CS294 Deep Reinforcement Learning Spring 2017\nAdapted for CS294-112 Fall 2017 by Abhishek Gupta and Joshua Achiam\nAdapted for CS294-112 Fall 2018 by Soroush Nasiriany, Sid Reddy, and Greg Kahn\n\"\"\"\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_probability as tfp\nimport gym\nimport logz\nimport os\nimport time\nimport inspect\nfrom multiprocessing import Process\n\n#============================================================================================#\n# Utilities\n#============================================================================================#\n\ndef build_mlp(input_placeholder, output_size, scope, n_layers, size, activation=tf.tanh, output_activation=None):\n \"\"\"\n Builds a feedforward neural network\n \n arguments:\n input_placeholder: placeholder variable for the state (batch_size, input_size)\n output_size: size of the output layer\n scope: variable scope of the network\n n_layers: number of hidden layers\n size: dimension of the hidden layer\n activation: activation of the hidden layers\n output_activation: activation of the ouput layers\n\n returns:\n output placeholder of the network (the result of a forward pass) \n\n Hint: use tf.layers.dense \n \"\"\"\n # YOUR CODE HERE\n with tf.variable_scope(scope):\n layer = input_placeholder\n for _ in range(n_layers):\n layer = tf.layers.dense(\n inputs=layer,\n units=size,\n activation=activation\n )\n output_placeholder = tf.layers.dense(\n inputs=layer,\n units=output_size,\n activation=output_activation\n )\n\n return output_placeholder\n\ndef pathlength(path):\n return len(path[\"reward\"])\n\ndef setup_logger(logdir, locals_):\n # Configure output directory for logging\n logz.configure_output_dir(logdir)\n # Log experimental parameters\n args = inspect.getargspec(train_AC)[0]\n params = {k: locals_[k] if k in locals_ else None for k in args}\n logz.save_params(params)\n\n#============================================================================================#\n# Actor Critic\n#============================================================================================#\n\nclass Agent(object):\n def __init__(self, computation_graph_args, sample_trajectory_args, estimate_advantage_args):\n super(Agent, self).__init__()\n self.ob_dim = computation_graph_args['ob_dim']\n self.ac_dim = computation_graph_args['ac_dim']\n self.discrete = computation_graph_args['discrete']\n self.size = computation_graph_args['size']\n self.n_layers = computation_graph_args['n_layers']\n self.learning_rate = computation_graph_args['learning_rate']\n self.num_target_updates = computation_graph_args['num_target_updates']\n self.num_grad_steps_per_target_update = computation_graph_args['num_grad_steps_per_target_update']\n\n self.animate = sample_trajectory_args['animate']\n self.max_path_length = sample_trajectory_args['max_path_length']\n self.min_timesteps_per_batch = sample_trajectory_args['min_timesteps_per_batch']\n\n self.gamma = estimate_advantage_args['gamma']\n self.normalize_advantages = estimate_advantage_args['normalize_advantages']\n\n def init_tf_sess(self):\n tf_config = tf.ConfigProto(inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)\n tf_config.gpu_options.allow_growth = True # may need if using GPU\n self.sess = tf.Session(config=tf_config)\n self.sess.__enter__() # equivalent to `with self.sess:`\n tf.global_variables_initializer().run() #pylint: disable=E1101\n\n def define_placeholders(self):\n \"\"\"\n Placeholders for batch batch observations / actions / advantages in actor critic\n loss function.\n See Agent.build_computation_graph for notation\n\n returns:\n sy_ob_no: placeholder for observations\n sy_ac_na: placeholder for actions\n sy_adv_n: placeholder for advantages\n \"\"\"\n sy_ob_no = tf.placeholder(shape=[None, self.ob_dim], name=\"ob\", dtype=tf.float32)\n if self.discrete:\n sy_ac_na = tf.placeholder(shape=[None], name=\"ac\", dtype=tf.int32) \n else:\n sy_ac_na = tf.placeholder(shape=[None, self.ac_dim], name=\"ac\", dtype=tf.float32) \n # YOUR CODE HERE\n #TODO: for now single value\n sy_adv_n = tf.placeholder(shape=[None], name=\"adv\", dtype=tf.float32)\n return sy_ob_no, sy_ac_na, sy_adv_n\n\n def policy_forward_pass(self, sy_ob_no):\n \"\"\" Constructs the symbolic operation for the policy network outputs,\n which are the parameters of the policy distribution p(a|s)\n\n arguments:\n sy_ob_no: (batch_size, self.ob_dim)\n\n returns:\n the parameters of the policy.\n\n if discrete, the parameters are the logits of a categorical distribution\n over the actions\n sy_logits_na: (batch_size, self.ac_dim)\n\n if continuous, the parameters are a tuple (mean, log_std) of a Gaussian\n distribution over actions. log_std should just be a trainable\n variable, not a network output.\n sy_mean: (batch_size, self.ac_dim)\n sy_logstd: (self.ac_dim,)\n\n Hint: use the 'build_mlp' function to output the logits (in the discrete case)\n and the mean (in the continuous case).\n Pass in self.n_layers for the 'n_layers' argument, and\n pass in self.size for the 'size' argument.\n \"\"\"\n #raise NotImplementedError\n if self.discrete:\n # YOUR_HW2 CODE_HERE\n network = build_mlp(input_placeholder = sy_ob_no,\n output_size = self.ac_dim,\n scope=\"nn_policy_discrete\",\n n_layers = self.n_layers,\n size=self.size\n )\n sy_logits_na = network\n return sy_logits_na\n else:\n # YOUR_HW2 CODE_HERE\n network = build_mlp(input_placeholder = sy_ob_no,\n output_size = self.ac_dim,\n scope=\"nn_policy_continous_mean\",\n n_layers = self.n_layers,\n size=self.size\n )\n sy_mean = network\n sy_logstd = tf.get_variable(\n \"policy_continous_logstd\", \n shape=[self.ac_dim],\n trainable=True,\n dtype=tf.float32,\n initializer=tf.constant_initializer(np.log(1))\n #initializer=tf.constant_initializer(np.log(0.01))\n #initializer=tf.constant_initializer(np.log(np.sqrt(10)))\n )\n\n return (sy_mean, sy_logstd)\n\n def sample_action(self, policy_parameters):\n \"\"\" Constructs a symbolic operation for stochastically sampling from the policy\n distribution\n\n arguments:\n policy_parameters\n if discrete: logits of a categorical distribution over actions \n sy_logits_na: (batch_size, self.ac_dim)\n if continuous: (mean, log_std) of a Gaussian distribution over actions\n sy_mean: (batch_size, self.ac_dim)\n sy_logstd: (self.ac_dim,)\n\n returns:\n sy_sampled_ac: \n if discrete: (batch_size)\n if continuous: (batch_size, self.ac_dim)\n\n Hint: for the continuous case, use the reparameterization trick:\n The output from a Gaussian distribution with mean 'mu' and std 'sigma' is\n \n mu + sigma * z, z ~ N(0, I)\n \n This reduces the problem to just sampling z. (Hint: use tf.random_normal!)\n \"\"\"\n #raise NotImplementedError\n if self.discrete:\n sy_logits_na = policy_parameters\n # YOUR_HW2 CODE_HERE\n action_probs = tf.nn.softmax(sy_logits_na,dim=1)\n self.action_probs = action_probs\n sy_sampled_ac = tf.map_fn(lambda probs: tf.cast(tf.distributions.Categorical(probs=probs).sample(),\n tf.float32),\n action_probs,\n parallel_iterations=False)\n sy_sampled_ac = tf.cast(sy_sampled_ac,tf.int32)\n \n \n else:\n sy_mean, sy_logstd = policy_parameters\n # YOUR_HW2 CODE_HERE\n stds = tf.exp(sy_logstd)\n sy_sampled_ac = tf.map_fn(lambda mean: mean + tf.random.normal([self.ac_dim])*stds,sy_mean)\n return sy_sampled_ac\n\n def get_log_prob(self, policy_parameters, sy_ac_na):\n \"\"\" Constructs a symbolic operation for computing the log probability of a set of actions\n that were actually taken according to the policy\n\n arguments:\n policy_parameters\n if discrete: logits of a categorical distribution over actions \n sy_logits_na: (batch_size, self.ac_dim)\n if continuous: (mean, log_std) of a Gaussian distribution over actions\n sy_mean: (batch_size, self.ac_dim)\n sy_logstd: (self.ac_dim,)\n\n sy_ac_na: (batch_size, self.ac_dim)\n\n returns:\n sy_logprob_n: (batch_size)\n\n Hint:\n For the discrete case, use the log probability under a categorical distribution.\n For the continuous case, use the log probability under a multivariate gaussian.\n \"\"\"\n #raise NotImplementedError\n if self.discrete:\n sy_logits_na = policy_parameters\n # YOUR_HW2 CODE_HERE\n sy_logprob_n = -tf.nn.sparse_softmax_cross_entropy_with_logits(labels=sy_ac_na, logits=sy_logits_na)\n else:\n sy_mean, sy_logstd = policy_parameters\n # YOUR_HW2 CODE_HERE\n std = tf.exp(sy_logstd)\n variance = (std*std)\n inverse_variance = 1/variance\n diff = sy_mean -sy_ac_na\n sy_logprob_n = -(tf.reduce_sum(tf.log(variance)) + tf.reduce_sum(inverse_variance *(diff*diff),axis=1))\n return sy_logprob_n\n\n def build_computation_graph(self):\n \"\"\"\n Notes on notation:\n \n Symbolic variables have the prefix sy_, to distinguish them from the numerical values\n that are computed later in the function\n \n Prefixes and suffixes:\n ob - observation \n ac - action\n _no - this tensor should have shape (batch self.size /n/, observation dim)\n _na - this tensor should have shape (batch self.size /n/, action dim)\n _n - this tensor should have shape (batch self.size /n/)\n \n Note: batch self.size /n/ is defined at runtime, and until then, the shape for that axis\n is None\n\n ----------------------------------------------------------------------------------\n loss: a function of self.sy_logprob_n and self.sy_adv_n that we will differentiate\n to get the policy gradient.\n \"\"\"\n self.sy_ob_no, self.sy_ac_na, self.sy_adv_n = self.define_placeholders()\n\n # The policy takes in an observation and produces a distribution over the action space\n self.policy_parameters = self.policy_forward_pass(self.sy_ob_no)\n\n # We can sample actions from this action distribution.\n # This will be called in Agent.sample_trajectory() where we generate a rollout.\n self.sy_sampled_ac = self.sample_action(self.policy_parameters)\n\n # We can also compute the logprob of the actions that were actually taken by the policy\n # This is used in the loss function.\n self.sy_logprob_n = self.get_log_prob(self.policy_parameters, self.sy_ac_na)\n\n self.actor_loss = tf.reduce_sum(-self.sy_logprob_n * self.sy_adv_n)\n self.actor_update_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.actor_loss)\n\n # define the critic\n self.critic_prediction = tf.squeeze(build_mlp(\n self.sy_ob_no,\n 1,\n \"nn_critic\",\n n_layers=self.n_layers,\n size=self.size))\n self.sy_target_n = tf.placeholder(shape=[None], name=\"critic_target\", dtype=tf.float32)\n self.critic_loss = tf.losses.mean_squared_error(self.sy_target_n, self.critic_prediction)\n self.critic_update_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.critic_loss)\n\n def sample_trajectories(self, itr, env):\n # Collect paths until we have enough timesteps\n timesteps_this_batch = 0\n paths = []\n while True:\n animate_this_episode=(len(paths)==0 and (itr % 10 == 0) and self.animate)\n path = self.sample_trajectory(env, animate_this_episode)\n paths.append(path)\n timesteps_this_batch += pathlength(path)\n if timesteps_this_batch > self.min_timesteps_per_batch:\n break\n return paths, timesteps_this_batch\n\n def sample_trajectory(self, env, animate_this_episode):\n ob = env.reset()\n obs, acs, rewards, next_obs, terminals = [], [], [], [], []\n steps = 0\n while True:\n if animate_this_episode:\n env.render()\n time.sleep(0.1)\n obs.append(ob)\n \n ac = self.sess.run(self.sy_sampled_ac, {self.sy_ob_no: [ob]}) # YOUR HW2 CODE HERE\n\n ac = ac[0]\n acs.append(ac)\n ob, rew, done, _ = env.step(ac)\n # add the observation after taking a step to next_obs\n # YOUR CODE HERE\n next_obs.append(ob)\n rewards.append(rew)\n steps += 1\n # If the episode ended, the corresponding terminal value is 1\n # otherwise, it is 0\n # YOUR CODE HERE\n if done or steps > self.max_path_length:\n terminals.append(1)\n break\n else:\n terminals.append(0)\n path = {\"observation\" : np.array(obs, dtype=np.float32), \n \"reward\" : np.array(rewards, dtype=np.float32), \n \"action\" : np.array(acs, dtype=np.float32),\n \"next_observation\": np.array(next_obs, dtype=np.float32),\n \"terminal\": np.array(terminals, dtype=np.float32)}\n return path\n\n def estimate_advantage(self, ob_no, next_ob_no, re_n, terminal_n):\n \"\"\"\n Estimates the advantage function value for each timestep.\n\n let sum_of_path_lengths be the sum of the lengths of the paths sampled from \n Agent.sample_trajectories\n\n arguments:\n ob_no: shape: (sum_of_path_lengths, ob_dim)\n next_ob_no: shape: (sum_of_path_lengths, ob_dim). The observation after taking one step forward\n re_n: length: sum_of_path_lengths. Each element in re_n is a scalar containing\n the reward for each timestep\n terminal_n: length: sum_of_path_lengths. Each element in terminal_n is either 1 if the episode ended\n at that timestep of 0 if the episode did not end\n\n returns:\n adv_n: shape: (sum_of_path_lengths). A single vector for the estimated \n advantages whose length is the sum of the lengths of the paths\n \"\"\"\n # First, estimate the Q value as Q(s, a) = r(s, a) + gamma*V(s')\n # To get the advantage, subtract the V(s) to get A(s, a) = Q(s, a) - V(s)\n # This requires calling the critic twice --- to obtain V(s') when calculating Q(s, a),\n # and V(s) when subtracting the baseline\n # Note: don't forget to use terminal_n to cut off the V(s') term when computing Q(s, a)\n # otherwise the values will grow without bound.\n # YOUR CODE HERE\n v_ob_no = self.sess.run(self.critic_prediction, feed_dict={self.sy_ob_no: ob_no})\n v_next_ob_no = self.sess.run(self.critic_prediction, feed_dict={self.sy_ob_no: next_ob_no})\n v_next_ob_no = v_next_ob_no * (1 - terminal_n)\n \n y = re_n + self.gamma * v_next_ob_no\n \n adv_n = y - v_ob_no\n if self.normalize_advantages:\n #raise NotImplementedError\n adv_n = (adv_n - np.mean(adv_n))/np.std(adv_n+1e-8) # YOUR_HW2 CODE_HERE\n return adv_n\n\n def update_critic(self, ob_no, next_ob_no, re_n, terminal_n):\n \"\"\"\n Update the parameters of the critic.\n\n let sum_of_path_lengths be the sum of the lengths of the paths sampled from\n Agent.sample_trajectories\n let num_paths be the number of paths sampled from Agent.sample_trajectories\n\n arguments:\n ob_no: shape: (sum_of_path_lengths, ob_dim)\n next_ob_no: shape: (sum_of_path_lengths, ob_dim). The observation after taking one step forward\n re_n: length: sum_of_path_lengths. Each element in re_n is a scalar containing\n the reward for each timestep\n terminal_n: length: sum_of_path_lengths. Each element in terminal_n is either 1 if the episode ended\n at that timestep of 0 if the episode did not end\n\n returns:\n nothing\n \"\"\"\n # Use a bootstrapped target values to update the critic\n # Compute the target values r(s, a) + gamma*V(s') by calling the critic to compute V(s')\n # In total, take n=self.num_grad_steps_per_target_update*self.num_target_updates gradient update steps\n # Every self.num_grad_steps_per_target_update steps, recompute the target values\n # by evaluating V(s') on the updated critic\n # Note: don't forget to use terminal_n to cut off the V(s') term when computing the target\n\n # otherwise the values will grow without bound.\n # YOUR CODE HERE\n for _ in range(self.num_target_updates):\n v_next_ob_no = self.sess.run(self.critic_prediction, feed_dict={self.sy_ob_no: next_ob_no})\n v_next_ob_no = v_next_ob_no * (1 - terminal_n)\n y = re_n + self.gamma * v_next_ob_no\n for _ in range(self.num_grad_steps_per_target_update):\n self.sess.run(self.critic_update_op, feed_dict={self.sy_target_n: y, self.sy_ob_no: ob_no})\n\n def update_actor(self, ob_no, ac_na, adv_n):\n \"\"\" \n Update the parameters of the policy.\n\n arguments:\n ob_no: shape: (sum_of_path_lengths, ob_dim)\n ac_na: shape: (sum_of_path_lengths).\n adv_n: shape: (sum_of_path_lengths). A single vector for the estimated\n advantages whose length is the sum of the lengths of the paths\n\n returns:\n nothing\n\n \"\"\"\n self.sess.run(self.actor_update_op,\n feed_dict={self.sy_ob_no: ob_no, self.sy_ac_na: ac_na, self.sy_adv_n: adv_n})\n\n\ndef train_AC(\n exp_name,\n env_name,\n n_iter, \n gamma, \n min_timesteps_per_batch, \n max_path_length,\n learning_rate,\n num_target_updates,\n num_grad_steps_per_target_update,\n animate, \n logdir, \n normalize_advantages,\n seed,\n n_layers,\n size):\n\n start = time.time()\n\n #========================================================================================#\n # Set Up Logger\n #========================================================================================#\n setup_logger(logdir, locals())\n\n #========================================================================================#\n # Set Up Env\n #========================================================================================#\n\n # Make the gym environment\n env = gym.make(env_name)\n\n # Set random seeds\n tf.set_random_seed(seed)\n np.random.seed(seed)\n env.seed(seed)\n\n # Maximum length for episodes\n max_path_length = max_path_length or env.spec.max_episode_steps\n\n # Is this env continuous, or self.discrete?\n discrete = isinstance(env.action_space, gym.spaces.Discrete)\n\n # Observation and action sizes\n ob_dim = env.observation_space.shape[0]\n ac_dim = env.action_space.n if discrete else env.action_space.shape[0]\n\n #========================================================================================#\n # Initialize Agent\n #========================================================================================#\n computation_graph_args = {\n 'n_layers': n_layers,\n 'ob_dim': ob_dim,\n 'ac_dim': ac_dim,\n 'discrete': discrete,\n 'size': size,\n 'learning_rate': learning_rate,\n 'num_target_updates': num_target_updates,\n 'num_grad_steps_per_target_update': num_grad_steps_per_target_update,\n }\n\n sample_trajectory_args = {\n 'animate': animate,\n 'max_path_length': max_path_length,\n 'min_timesteps_per_batch': min_timesteps_per_batch,\n }\n\n estimate_advantage_args = {\n 'gamma': gamma,\n 'normalize_advantages': normalize_advantages,\n }\n\n agent = Agent(computation_graph_args, sample_trajectory_args, estimate_advantage_args) #estimate_return_args\n\n # build computation graph\n agent.build_computation_graph()\n\n # tensorflow: config, session, variable initialization\n agent.init_tf_sess()\n\n #========================================================================================#\n # Training Loop\n #========================================================================================#\n\n total_timesteps = 0\n for itr in range(n_iter):\n print(\"********** Iteration %i ************\"%itr)\n paths, timesteps_this_batch = agent.sample_trajectories(itr, env)\n total_timesteps += timesteps_this_batch\n\n # Build arrays for observation, action for the policy gradient update by concatenating \n # across paths\n ob_no = np.concatenate([path[\"observation\"] for path in paths])\n ac_na = np.concatenate([path[\"action\"] for path in paths])\n re_n = np.concatenate([path[\"reward\"] for path in paths])\n next_ob_no = np.concatenate([path[\"next_observation\"] for path in paths])\n terminal_n = np.concatenate([path[\"terminal\"] for path in paths])\n\n # Call tensorflow operations to:\n # (1) update the critic, by calling agent.update_critic\n # (2) use the updated critic to compute the advantage by, calling agent.estimate_advantage\n # (3) use the estimated advantage values to update the actor, by calling agent.update_actor\n # YOUR CODE HERE\n agent.update_critic(ob_no, next_ob_no, re_n, terminal_n)\n adv_n = agent.estimate_advantage(ob_no, next_ob_no, re_n, terminal_n)\n agent.update_actor(ob_no,ac_na,adv_n)\n #raise NotImplementedError\n\n # Log diagnostics\n returns = [path[\"reward\"].sum() for path in paths]\n ep_lengths = [pathlength(path) for path in paths]\n logz.log_tabular(\"Time\", time.time() - start)\n logz.log_tabular(\"Iteration\", itr)\n logz.log_tabular(\"AverageReturn\", np.mean(returns))\n logz.log_tabular(\"StdReturn\", np.std(returns))\n logz.log_tabular(\"MaxReturn\", np.max(returns))\n logz.log_tabular(\"MinReturn\", np.min(returns))\n logz.log_tabular(\"EpLenMean\", np.mean(ep_lengths))\n logz.log_tabular(\"EpLenStd\", np.std(ep_lengths))\n logz.log_tabular(\"TimestepsThisBatch\", timesteps_this_batch)\n logz.log_tabular(\"TimestepsSoFar\", total_timesteps)\n logz.dump_tabular()\n logz.pickle_tf_vars()\n\n\ndef main():\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('env_name', type=str)\n parser.add_argument('--exp_name', type=str, default='vac')\n parser.add_argument('--render', action='store_true')\n parser.add_argument('--discount', type=float, default=1.0)\n parser.add_argument('--n_iter', '-n', type=int, default=100)\n parser.add_argument('--batch_size', '-b', type=int, default=1000)\n parser.add_argument('--ep_len', '-ep', type=float, default=-1.)\n parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3)\n parser.add_argument('--dont_normalize_advantages', '-dna', action='store_true')\n parser.add_argument('--num_target_updates', '-ntu', type=int, default=10)\n parser.add_argument('--num_grad_steps_per_target_update', '-ngsptu', type=int, default=10)\n parser.add_argument('--seed', type=int, default=1)\n parser.add_argument('--n_experiments', '-e', type=int, default=1)\n parser.add_argument('--n_layers', '-l', type=int, default=2)\n parser.add_argument('--size', '-s', type=int, default=64)\n args = parser.parse_args()\n\n data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data')\n\n if not (os.path.exists(data_path)):\n os.makedirs(data_path)\n logdir = 'ac_' + args.exp_name + '_' + args.env_name + '_' + time.strftime(\"%d-%m-%Y_%H-%M-%S\")\n logdir = os.path.join(data_path, logdir)\n if not(os.path.exists(logdir)):\n os.makedirs(logdir)\n\n max_path_length = args.ep_len if args.ep_len > 0 else None\n\n processes = []\n\n for e in range(args.n_experiments):\n seed = args.seed + 10*e\n print('Running experiment with seed %d'%seed)\n\n def train_func():\n train_AC(\n exp_name=args.exp_name,\n env_name=args.env_name,\n n_iter=args.n_iter,\n gamma=args.discount,\n min_timesteps_per_batch=args.batch_size,\n max_path_length=max_path_length,\n learning_rate=args.learning_rate,\n num_target_updates=args.num_target_updates,\n num_grad_steps_per_target_update=args.num_grad_steps_per_target_update,\n animate=args.render,\n logdir=os.path.join(logdir,'%d'%seed),\n normalize_advantages=not(args.dont_normalize_advantages),\n seed=seed,\n n_layers=args.n_layers,\n size=args.size\n )\n # # Awkward hacky process runs, because Tensorflow does not like\n # # repeatedly calling train_AC in the same thread.\n p = Process(target=train_func, args=tuple())\n p.start()\n processes.append(p)\n # if you comment in the line below, then the loop will block \n # until this process finishes\n # p.join()\n\n for p in processes:\n p.join()\n \n\nif __name__ == \"__main__\":\n main()\n", "import uuid\nimport time\nimport pickle\nimport sys\nimport gym.spaces\nimport itertools\nimport numpy as np\nimport random\nimport tensorflow as tf\nimport tensorflow.contrib.layers as layers\nfrom collections import namedtuple\nfrom dqn_utils import *\n\nOptimizerSpec = namedtuple(\"OptimizerSpec\", [\"constructor\", \"kwargs\", \"lr_schedule\"])\n\n\nclass QLearner(object):\n\n def __init__(\n self,\n env,\n q_func,\n optimizer_spec,\n session,\n exploration=LinearSchedule(1000000, 0.1),\n stopping_criterion=None,\n replay_buffer_size=1000000,\n batch_size=32,\n gamma=0.99,\n learning_starts=50000,\n learning_freq=4,\n frame_history_len=4,\n target_update_freq=10000,\n grad_norm_clipping=10,\n rew_file=None,\n double_q=True,\n lander=False):\n \"\"\"Run Deep Q-learning algorithm.\n\n You can specify your own convnet using q_func.\n\n All schedules are w.r.t. total number of steps taken in the environment.\n\n Parameters\n ----------\n env: gym.Env\n gym environment to train on.\n q_func: function\n Model to use for computing the q function. It should accept the\n following named arguments:\n img_in: tf.Tensor\n tensorflow tensor representing the input image\n num_actions: int\n number of actions\n scope: str\n scope in which all the model related variables\n should be created\n reuse: bool\n whether previously created variables should be reused.\n optimizer_spec: OptimizerSpec\n Specifying the constructor and kwargs, as well as learning rate schedule\n for the optimizer\n session: tf.Session\n tensorflow session to use.\n exploration: rl_algs.deepq.utils.schedules.Schedule\n schedule for probability of chosing random action.\n stopping_criterion: (env, t) -> bool\n should return true when it's ok for the RL algorithm to stop.\n takes in env and the number of steps executed so far.\n replay_buffer_size: int\n How many memories to store in the replay buffer.\n batch_size: int\n How many transitions to sample each time experience is replayed.\n gamma: float\n Discount Factor\n learning_starts: int\n After how many environment steps to start replaying experiences\n learning_freq: int\n How many steps of environment to take between every experience replay\n frame_history_len: int\n How many past frames to include as input to the model.\n target_update_freq: int\n How many experience replay rounds (not steps!) to perform between\n each update to the target Q network\n grad_norm_clipping: float or None\n If not None gradients' norms are clipped to this value.\n double_q: bool\n If True, then use double Q-learning to compute target values. Otherwise, use vanilla DQN.\n https://papers.nips.cc/paper/3964-double-q-learning.pdf\n \"\"\"\n assert type(env.observation_space) == gym.spaces.Box\n assert type(env.action_space) == gym.spaces.Discrete\n\n self.target_update_freq = target_update_freq\n self.optimizer_spec = optimizer_spec\n self.batch_size = batch_size\n self.learning_freq = learning_freq\n self.learning_starts = learning_starts\n self.stopping_criterion = stopping_criterion\n self.env = env\n self.session = session\n self.exploration = exploration\n self.rew_file = str(uuid.uuid4()) + '.pkl' if rew_file is None else rew_file\n self.mean_rew_file = 'mean_rew'+str(uuid.uuid4()) + '.pkl' if rew_file is None else rew_file\n\n ###############\n # BUILD MODEL #\n ###############\n\n if len(self.env.observation_space.shape) == 1:\n # This means we are running on low-dimensional observations (e.g. RAM)\n input_shape = self.env.observation_space.shape\n else:\n img_h, img_w, img_c = self.env.observation_space.shape\n input_shape = (img_h, img_w, frame_history_len * img_c)\n self.num_actions = self.env.action_space.n\n\n # set up placeholders\n # placeholder for current observation (or state)\n self.obs_t_ph = tf.placeholder(\n tf.float32 if lander else tf.uint8, [None] + list(input_shape))\n # placeholder for current action\n self.act_t_ph = tf.placeholder(tf.int32, [None])\n # placeholder for current reward\n self.rew_t_ph = tf.placeholder(tf.float32, [None])\n # placeholder for next observation (or state)\n self.obs_tp1_ph = tf.placeholder(\n tf.float32 if lander else tf.uint8, [None] + list(input_shape))\n # placeholder for end of episode mask\n # this value is 1 if the next state corresponds to the end of an episode,\n # in which case there is no Q-value at the next state; at the end of an\n # episode, only the current state reward contributes to the target, not the\n # next state Q-value (i.e. target is just rew_t_ph, not rew_t_ph + gamma * q_tp1)\n self.done_mask_ph = tf.placeholder(tf.float32, [None])\n\n # casting to float on GPU ensures lower data transfer times.\n if lander:\n obs_t_float = self.obs_t_ph\n obs_tp1_float = self.obs_tp1_ph\n else:\n obs_t_float = tf.cast(self.obs_t_ph, tf.float32) / 255.0\n obs_tp1_float = tf.cast(self.obs_tp1_ph, tf.float32) / 255.0\n\n # Here, you should fill in your own code to compute the Bellman error. This requires\n # evaluating the current and next Q-values and constructing the corresponding error.\n # TensorFlow will differentiate this error for you, you just need to pass it to the\n # optimizer. See assignment text for details.\n # Your code should produce one scalar-valued tensor: total_error\n # This will be passed to the optimizer in the provided code below.\n # Your code should also produce two collections of variables:\n # q_func_vars\n # target_q_func_vars\n # These should hold all of the variables of the Q-function network and target network,\n # respectively. A convenient way to get these is to make use of TF's \"scope\" feature.\n # For example, you can create your Q-function network with the scope \"q_func\" like this:\n # <something> = q_func(obs_t_float, num_actions, scope=\"q_func\", reuse=False)\n # And then you can obtain the variables like this:\n # q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='q_func')\n # Older versions of TensorFlow may require using \"VARIABLES\" instead of \"GLOBAL_VARIABLES\"\n # Tip: use huber_loss (from dqn_utils) instead of squared error when defining self.total_error\n ######\n\n # YOUR CODE HERE\n \n self.q = q_func(obs_t_float, self.num_actions, scope=\"q_func\", reuse=False)\n q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='q_func')\n self.max_action_for_qall = tf.argmax(self.q, axis=-1)\n self.target_q = q_func(obs_tp1_float, self.num_actions, scope=\"target_q_func\", reuse=False)\n self.double_q = double_q\n if double_q:\n self.max_action_index_for_dQ = tf.placeholder(tf.int32, [None])\n q = tf.squeeze(tf.batch_gather(self.target_q, tf.expand_dims(self.max_action_index_for_dQ,axis=1)))\n y = self.rew_t_ph + gamma * (1 - self.done_mask_ph) * q\n else:\n y = self.rew_t_ph + gamma * (1 - self.done_mask_ph) * tf.reduce_max(self.target_q, axis=-1)\n target_q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='target_q_func')\n\n self.total_error = tf.reduce_mean(\n huber_loss(tf.squeeze(tf.batch_gather(self.q, tf.expand_dims(self.act_t_ph, axis=1))) - y))\n ######\n\n # construct optimization op (with gradient clipping)\n self.learning_rate = tf.placeholder(tf.float32, (), name=\"learning_rate\")\n optimizer = self.optimizer_spec.constructor(learning_rate=self.learning_rate, **self.optimizer_spec.kwargs)\n self.train_fn = minimize_and_clip(optimizer, self.total_error,\n var_list=q_func_vars, clip_val=grad_norm_clipping)\n\n # update_target_fn will be called periodically to copy Q network to target Q network\n update_target_fn = []\n for var, var_target in zip(sorted(q_func_vars, key=lambda v: v.name),\n sorted(target_q_func_vars, key=lambda v: v.name)):\n update_target_fn.append(var_target.assign(var))\n self.update_target_fn = tf.group(*update_target_fn)\n\n # construct the replay buffer\n self.replay_buffer = ReplayBuffer(replay_buffer_size, frame_history_len, lander=lander)\n self.replay_buffer_idx = None\n\n ###############\n # RUN ENV #\n ###############\n self.model_initialized = False\n self.num_param_updates = 0\n self.mean_episode_reward = -float('nan')\n self.mean_episode_rewards = []\n self.best_mean_episode_reward = -float('inf')\n self.best_mean_episode_rewards = []\n self.mean_episode_t = []\n self.last_obs = self.env.reset()\n self.log_every_n_steps = 10000\n\n self.start_time = None\n self.t = 0\n\n def stopping_criterion_met(self):\n return self.stopping_criterion is not None and self.stopping_criterion(self.env, self.t)\n\n def step_env(self):\n ### 2. Step the env and store the transition\n # At this point, \"self.last_obs\" contains the latest observation that was\n # recorded from the simulator. Here, your code needs to store this\n # observation and its outcome (reward, next observation, etc.) into\n # the replay buffer while stepping the simulator forward one step.\n # At the end of this block of code, the simulator should have been\n # advanced one step, and the replay buffer should contain one more\n # transition.\n # Specifically, self.last_obs must point to the new latest observation.\n # Useful functions you'll need to call:\n # obs, reward, done, info = env.step(action)\n # this steps the environment forward one step\n # obs = env.reset()\n # this resets the environment if you reached an episode boundary.\n # Don't forget to call env.reset() to get a new observation if done\n # is true!!\n # Note that you cannot use \"self.last_obs\" directly as input\n # into your network, since it needs to be processed to include context\n # from previous frames. You should check out the replay buffer\n # implementation in dqn_utils.py to see what functionality the replay\n # buffer exposes. The replay buffer has a function called\n # encode_recent_observation that will take the latest observation\n # that you pushed into the buffer and compute the corresponding\n # input that should be given to a Q network by appending some\n # previous frames.\n # Don't forget to include epsilon greedy exploration!\n # And remember that the first time you enter this loop, the model\n # may not yet have been initialized (but of course, the first step\n # might as well be random, since you haven't trained your net...)\n\n #####\n\n # YOUR CODE HERE\n self.replay_buffer_idx = self.replay_buffer.next_idx\n self.replay_buffer.store_frame(self.last_obs)\n if not self.model_initialized:\n act = self.env.action_space.sample()\n else:\n if self.exploration.value(self.t) > np.random.sample():\n act = self.env.action_space.sample()\n # print(act)\n else:\n state = self.replay_buffer.encode_recent_observation()\n values = self.session.run(self.q, {\n self.obs_t_ph: state[np.newaxis, ...]})\n act = np.argmax(values)\n # print(values)\n obs, reward, done, info = self.env.step(action=act)\n self.replay_buffer.store_effect(\n idx=self.replay_buffer_idx, action=act, reward=reward, done=done)\n if done:\n # print('DONE')\n obs = self.env.reset()\n self.last_obs = obs\n\n def update_model(self):\n ### 3. Perform experience replay and train the network.\n # note that this is only done if the replay buffer contains enough samples\n # for us to learn something useful -- until then, the model will not be\n # initialized and random actions should be taken\n if (self.t > self.learning_starts and \\\n self.t % self.learning_freq == 0 and \\\n self.replay_buffer.can_sample(self.batch_size)):\n # Here, you should perform training. Training consists of four steps:\n # 3.a: use the replay buffer to sample a batch of transitions (see the\n # replay buffer code for function definition, each batch that you sample\n # should consist of current observations, current actions, rewards,\n # next observations, and done indicator).\n # 3.b: initialize the model if it has not been initialized yet; to do\n # that, call\n # initialize_interdependent_variables(self.session, tf.global_variables(), {\n # self.obs_t_ph: obs_t_batch,\n # self.obs_tp1_ph: obs_tp1_batch,\n # })\n # where obs_t_batch and obs_tp1_batch are the batches of observations at\n # the current and next time step. The boolean variable model_initialized\n # indicates whether or not the model has been initialized.\n # Remember that you have to update the target network too (see 3.d)!\n # 3.c: train the model. To do this, you'll need to use the self.train_fn and\n # self.total_error ops that were created earlier: self.total_error is what you\n # created to compute the total Bellman error in a batch, and self.train_fn\n # will actually perform a gradient step and update the network parameters\n # to reduce total_error. When calling self.session.run on these you'll need to\n # populate the following placeholders:\n # self.obs_t_ph\n # self.act_t_ph\n # self.rew_t_ph\n # self.obs_tp1_ph\n # self.done_mask_ph\n # (this is needed for computing self.total_error)\n # self.learning_rate -- you can get this from self.optimizer_spec.lr_schedule.value(t)\n # (this is needed by the optimizer to choose the learning rate)\n # 3.d: periodically update the target network by calling\n # self.session.run(self.update_target_fn)\n # you should update every target_update_freq steps, and you may find the\n # variable self.num_param_updates useful for this (it was initialized to 0)\n #####\n\n # YOUR CODE HERE\n obs_batch, act_batch, rew_batch, next_obs_batch, done_mask = self.replay_buffer.sample(self.batch_size)\n if not self.model_initialized:\n initialize_interdependent_variables(self.session, tf.global_variables(), {\n self.obs_t_ph: obs_batch,\n self.obs_tp1_ph: next_obs_batch,\n })\n self.session.run(self.update_target_fn)\n self.model_initialized = True\n # 3.c\n if self.double_q:\n # double q case, firstly calculate self.max_action_index_for_dQ's value\n max_action_index_for_dQ = self.session.run(self.max_action_for_qall, feed_dict={\n self.obs_t_ph: next_obs_batch\n })\n self.session.run([self.train_fn, self.total_error], feed_dict={\n self.obs_t_ph: obs_batch,\n self.act_t_ph: act_batch,\n self.rew_t_ph: rew_batch,\n self.obs_tp1_ph: next_obs_batch,\n self.max_action_index_for_dQ: max_action_index_for_dQ,\n self.done_mask_ph: done_mask,\n self.learning_rate: self.optimizer_spec.lr_schedule.value(self.t)\n })\n else:\n self.session.run([self.train_fn, self.total_error], {\n self.obs_t_ph: obs_batch,\n self.act_t_ph: act_batch,\n self.rew_t_ph: rew_batch,\n self.obs_tp1_ph: next_obs_batch,\n self.done_mask_ph: done_mask,\n self.learning_rate: self.optimizer_spec.lr_schedule.value(self.t)\n })\n if (self.num_param_updates % self.target_update_freq) == 0:\n self.session.run(self.update_target_fn)\n\n self.num_param_updates += 1\n\n self.t += 1\n\n def log_progress(self):\n episode_rewards = get_wrapper_by_name(self.env, \"Monitor\").get_episode_rewards()\n\n if len(episode_rewards) > 0:\n self.mean_episode_reward = np.mean(episode_rewards[-100:])\n\n if len(episode_rewards) > 100:\n self.best_mean_episode_reward = max(self.best_mean_episode_reward, self.mean_episode_reward)\n if self.t % self.log_every_n_steps == 0 and self.model_initialized:\n self.mean_episode_rewards.append(self.mean_episode_reward)\n self.best_mean_episode_rewards.append(self.best_mean_episode_reward)\n self.mean_episode_t.append(self.t)\n print(\"Timestep %d\" % (self.t,))\n print(\"mean reward (100 episodes) %f\" % self.mean_episode_reward)\n print(\"best mean reward %f\" % self.best_mean_episode_reward)\n print(\"episodes %d\" % len(episode_rewards))\n print(\"exploration %f\" % self.exploration.value(self.t))\n print(\"learning_rate %f\" % self.optimizer_spec.lr_schedule.value(self.t))\n if self.start_time is not None:\n print(\"running time %f\" % ((time.time() - self.start_time) / 60.))\n\n self.start_time = time.time()\n\n sys.stdout.flush()\n\n with open(self.mean_rew_file, 'wb') as f:\n pickle.dump({\n 'mean_rewards': self.mean_episode_rewards,\n 'best_mean_rewards': self.best_mean_episode_rewards,\n 't': self.mean_episode_t\n }, f, pickle.HIGHEST_PROTOCOL)\n\n with open(self.rew_file, 'wb') as f:\n pickle.dump(episode_rewards, f, pickle.HIGHEST_PROTOCOL)\n\n\ndef learn(*args, **kwargs):\n alg = QLearner(*args, **kwargs)\n while not alg.stopping_criterion_met():\n alg.step_env()\n # at this point, the environment should have been advanced one step (and\n # reset if done was true), and self.last_obs should point to the new latest\n # observation\n alg.update_model()\n alg.log_progress()\n" ]
[ [ "tensorflow.exp", "numpy.min", "numpy.mean", "tensorflow.losses.mean_squared_error", "tensorflow.nn.softmax", "tensorflow.global_variables_initializer", "tensorflow.cast", "tensorflow.set_random_seed", "numpy.concatenate", "numpy.max", "numpy.log", "tensorflow.ConfigProto", "tensorflow.variable_scope", "tensorflow.layers.dense", "numpy.array", "tensorflow.train.AdamOptimizer", "tensorflow.Session", "numpy.std", "tensorflow.log", "tensorflow.placeholder", "tensorflow.reduce_sum", "tensorflow.nn.sparse_softmax_cross_entropy_with_logits", "numpy.random.seed", "tensorflow.random.normal", "tensorflow.distributions.Categorical" ], [ "tensorflow.expand_dims", "tensorflow.group", "tensorflow.argmax", "tensorflow.global_variables", "numpy.mean", "tensorflow.reduce_max", "tensorflow.placeholder", "numpy.argmax", "numpy.random.sample", "tensorflow.get_collection", "tensorflow.cast" ] ]
loganlebanoff/datasets
[ "44649ac4f8fefdbaae0a66918b03ae7dd8169f1e" ]
[ "tensorflow_datasets/core/utils/gcs_utils.py" ]
[ "# coding=utf-8\n# Copyright 2020 The TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Utilities for accessing TFDS GCS buckets.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport concurrent.futures\nimport os\nimport posixpath\nfrom typing import List, Optional\n\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_datasets.core.utils import py_utils\nfrom tensorflow_datasets.core.utils import tqdm_utils\n\nGCS_ROOT_DIR = 'gs://tfds-data'\n\n# for dataset_info/\nGCS_DATASET_INFO_DIR = 'dataset_info'\nGCS_DATASETS_DIR = 'datasets'\n\n\n# TODO(tfds): On windows, gs:// isn't supported.\n# https://github.com/tensorflow/tensorflow/issues/38477\n_is_gcs_disabled = (os.name == 'nt')\n\n\ndef gcs_path(suffix: Optional[str] = None) -> str:\n \"\"\"Returns the GCS URI path.\n\n Args:\n suffix: Eventual relative path in the bucket. If `None`, returns the root\n GCS bucket uri.\n\n Returns:\n path: The GCS uri.\n \"\"\"\n path = GCS_ROOT_DIR\n if suffix:\n path = posixpath.join(path, suffix)\n return path\n\n\n@py_utils.memoize()\ndef gcs_listdir(dir_name: str) -> Optional[List[str]]:\n \"\"\"List all files in the given GCS dir (`['dataset/1.0.0/file0', ...]`).\"\"\"\n root_dir = gcs_path(dir_name)\n if _is_gcs_disabled or not tf.io.gfile.exists(root_dir):\n return None\n return [posixpath.join(dir_name, f) for f in tf.io.gfile.listdir(root_dir)]\n\n\ndef gcs_dataset_info_files(dataset_dir: str) -> Optional[List[str]]:\n \"\"\"Return paths to GCS files in the given dataset directory.\"\"\"\n return gcs_listdir(posixpath.join(GCS_DATASET_INFO_DIR, dataset_dir))\n\n\ndef is_dataset_on_gcs(dataset_name: str) -> bool:\n \"\"\"If the dataset is available on the GCS bucket gs://tfds-data/datasets.\"\"\"\n dir_name = posixpath.join(GCS_DATASETS_DIR, dataset_name)\n return not _is_gcs_disabled and tf.io.gfile.exists(gcs_path(dir_name))\n\n\ndef download_gcs_dataset(\n dataset_name, local_dataset_dir, max_simultaneous_downloads=25\n):\n \"\"\"Downloads prepared GCS dataset to local dataset directory.\"\"\"\n if _is_gcs_disabled:\n raise AssertionError('Cannot download from GCS when _is_gcs_disabled')\n\n prefix = posixpath.join(GCS_DATASETS_DIR, dataset_name)\n gcs_paths_to_dl = gcs_listdir(prefix)\n\n # Filter out the diffs folder if present\n filter_prefix = posixpath.join(prefix, 'diffs')\n gcs_paths_to_dl = [\n p for p in gcs_paths_to_dl if not p.startswith(filter_prefix)\n ]\n\n with tqdm_utils.async_tqdm(\n total=len(gcs_paths_to_dl), desc='Dl Completed...', unit=' file') as pbar:\n\n def _copy_from_gcs(gcs_path_):\n # Copy 'gs://tfds-data/datasets/ds/1.0.0/file' -> `local_dir/file`\n tf.io.gfile.copy(\n gcs_path(gcs_path_),\n os.path.join(local_dataset_dir, posixpath.basename(gcs_path_)),\n )\n pbar.update(1)\n\n with concurrent.futures.ThreadPoolExecutor(\n max_workers=max_simultaneous_downloads) as executor:\n futures = [\n executor.submit(_copy_from_gcs, path) for path in gcs_paths_to_dl\n ]\n for future in concurrent.futures.as_completed(futures):\n future.result()\n" ]
[ [ "tensorflow.compat.v2.io.gfile.listdir", "tensorflow.compat.v2.io.gfile.exists" ] ]
YifanShenSZ/pytorch
[ "b4232f7cbe407909f9d95b91304c73fdc4c66a50", "b4232f7cbe407909f9d95b91304c73fdc4c66a50", "b4232f7cbe407909f9d95b91304c73fdc4c66a50" ]
[ "test/ao/sparsity/test_sparsifier.py", "torch/fx/passes/splitter_base.py", "test/jit/test_schema_check.py" ]
[ "# -*- coding: utf-8 -*-\n# Owner(s): [\"module: unknown\"]\n\nimport itertools\nimport logging\nimport re\n\nimport torch\nfrom torch import nn\nfrom torch.ao.sparsity import BaseSparsifier, WeightNormSparsifier, FakeSparsity, NearlyDiagonalSparsifier\nfrom torch.nn.utils.parametrize import is_parametrized\n\nfrom torch.testing._internal.common_utils import TestCase\n\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)\n\nclass Model(nn.Module):\n def __init__(self):\n super().__init__()\n self.seq = nn.Sequential(\n nn.Linear(16, 16)\n )\n self.linear = nn.Linear(16, 16)\n self.head = nn.Linear(16, 4)\n\n def forward(self, x):\n x = self.seq(x)\n x = self.linear(x)\n x = self.head(x)\n return x\n\n\nclass ImplementedSparsifier(BaseSparsifier):\n def __init__(self, **kwargs):\n super().__init__(defaults=kwargs)\n\n def update_mask(self, module, **kwargs):\n module.parametrizations.weight[0].mask[0] = 0\n linear_state = self.state['linear.weight']\n linear_state['step_count'] = linear_state.get('step_count', 0) + 1\n\n\nclass TestBaseSparsifier(TestCase):\n def test_constructor(self):\n # Cannot instantiate the abstract base\n self.assertRaises(TypeError, BaseSparsifier)\n # Can instantiate the model with no configs\n model = Model()\n sparsifier = ImplementedSparsifier(test=3)\n sparsifier.prepare(model, config=None)\n assert len(sparsifier.groups) == 3\n sparsifier.step()\n # Can instantiate the model with configs\n sparsifier = ImplementedSparsifier(test=3)\n sparsifier.prepare(model, [{'tensor_fqn': 'linear.weight'}])\n assert len(sparsifier.groups) == 1\n assert sparsifier.groups[0]['tensor_fqn'] == 'linear.weight'\n assert 'test' in sparsifier.groups[0]\n assert sparsifier.groups[0]['test'] == 3\n\n def test_prepare_config(self):\n model = Model()\n sparsifier = ImplementedSparsifier(test=3)\n # Make sure there are no parametrizations before `prepare`\n assert not hasattr(model.seq[0], 'parametrizations')\n assert not hasattr(model.linear, 'parametrizations')\n assert not hasattr(model.head, 'parametrizations')\n sparsifier.prepare(model, config=[\n {'tensor_fqn': 'seq.0.weight', 'test': 42},\n # No 'linear' to make sure it will be skipped in the sparsification\n {'tensor_fqn': 'head.weight'}\n ])\n assert len(sparsifier.groups) == 2\n # Check if default argument is not assigned if explicit\n assert sparsifier.groups[0]['tensor_fqn'] == 'seq.0.weight'\n assert sparsifier.groups[0]['test'] == 42\n # Check if FQN and module are pointing to the same location\n assert sparsifier.groups[1]['tensor_fqn'] == 'head.weight'\n assert sparsifier.groups[1]['module'] == model.head\n # Check if parameterizations are attached\n assert hasattr(model.seq[0], 'parametrizations')\n assert not hasattr(model.linear, 'parametrizations')\n assert hasattr(model.head, 'parametrizations')\n\n def test_step(self):\n model = Model()\n sparsifier = ImplementedSparsifier(test=3)\n sparsifier.enable_mask_update = True\n sparsifier.prepare(model, [{'tensor_fqn': 'linear.weight'}])\n sparsifier.step()\n assert torch.all(model.linear.parametrizations.weight[0].mask[0] == 0)\n\n def test_state_dict(self):\n step_count = 3\n model0 = Model()\n sparsifier0 = ImplementedSparsifier(test=3)\n sparsifier0.prepare(model0, [{'tensor_fqn': 'linear.weight'}])\n mask = model0.linear.parametrizations['weight'][0].mask\n mask.data = torch.arange(mask.shape[0] * mask.shape[1]).reshape(mask.shape)\n for step in range(step_count):\n sparsifier0.step()\n state_dict = sparsifier0.state_dict()\n\n # Check the expected keys in the state_dict\n assert 'state' in state_dict\n assert 'step_count' in state_dict['state']['linear.weight']\n assert state_dict['state']['linear.weight']['step_count'] == 3\n assert 'groups' in state_dict\n assert 'test' in state_dict['groups'][0]\n assert 'tensor_fqn' in state_dict['groups'][0]\n assert state_dict['groups'][0]['tensor_fqn'] == 'linear.weight'\n\n # Check loading static_dict creates an equivalent model\n model1 = Model()\n sparsifier1 = ImplementedSparsifier()\n sparsifier1.prepare(model1, None)\n\n assert sparsifier0.state != sparsifier1.state\n\n # Make sure the masks are different in the beginning\n for mg in sparsifier0.groups:\n if mg['tensor_fqn'] == 'linear.weight':\n mask0 = mg['module'].parametrizations.weight[0].mask\n for mg in sparsifier1.groups:\n if mg['tensor_fqn'] == 'linear.weight':\n mask1 = mg['module'].parametrizations.weight[0].mask\n self.assertNotEqual(mask0, mask1)\n\n sparsifier1.load_state_dict(state_dict)\n\n # Make sure the states are loaded, and are correct\n assert sparsifier0.state == sparsifier1.state\n\n # Make sure the masks (and all dicts) are the same after loading\n assert len(sparsifier0.groups) == len(sparsifier1.groups)\n for idx in range(len(sparsifier0.groups)):\n mg0 = sparsifier0.groups[idx]\n mg1 = sparsifier1.groups[idx]\n for key in mg0.keys():\n assert key in mg1\n if key == 'module':\n # We cannot compare modules as they are different\n param0 = mg0[key].parametrizations.weight[0]\n param1 = mg1[key].parametrizations.weight[0]\n assert hasattr(param0, 'mask')\n assert hasattr(param1, 'mask')\n self.assertEqual(param0.__dict__, param1.__dict__)\n else:\n assert mg0[key] == mg1[key]\n\n def test_mask_squash(self):\n model = Model()\n sparsifier = ImplementedSparsifier(test=3)\n sparsifier.prepare(model, [{'tensor_fqn': 'linear.weight'}])\n assert hasattr(model.linear.parametrizations.weight[0], 'mask')\n assert is_parametrized(model.linear, 'weight')\n assert not is_parametrized(model.seq[0], 'weight')\n\n sparsifier.squash_mask()\n assert not is_parametrized(model.seq[0], 'weight')\n assert not is_parametrized(model.linear, 'weight')\n\n def test_mask_squash_with_params1(self):\n model = Model()\n sparsifier = ImplementedSparsifier(foo=3, bar=2, baz=1)\n sparsifier.prepare(model, [{'tensor_fqn': 'linear.weight'}, {'tensor_fqn': 'seq.0.weight'}])\n sparsifier.squash_mask(\n params_to_keep_per_layer={\n 'linear': ('foo', 'bar'),\n 'seq.0': ('baz',)\n })\n assert not is_parametrized(model.seq[0], 'weight')\n assert not is_parametrized(model.linear, 'weight')\n assert hasattr(model.seq[0], 'sparse_params')\n assert hasattr(model.linear, 'sparse_params')\n assert model.seq[0].sparse_params.get('foo', None) is None\n assert model.seq[0].sparse_params.get('bar', None) is None\n assert model.seq[0].sparse_params.get('baz', None) == 1\n assert model.linear.sparse_params.get('foo', None) == 3\n assert model.linear.sparse_params.get('bar', None) == 2\n assert model.linear.sparse_params.get('baz', None) is None\n\n def test_mask_squash_with_params2(self):\n model = Model()\n sparsifier = ImplementedSparsifier(foo=3, bar=2, baz=1)\n sparsifier.prepare(model, [{'tensor_fqn': 'linear.weight'}, {'tensor_fqn': 'seq.0.weight'}])\n sparsifier.squash_mask(params_to_keep=('foo', 'bar'))\n assert not is_parametrized(model.seq[0], 'weight')\n assert not is_parametrized(model.linear, 'weight')\n assert hasattr(model.seq[0], 'sparse_params')\n assert hasattr(model.linear, 'sparse_params')\n assert model.seq[0].sparse_params.get('foo', None) == 3\n assert model.seq[0].sparse_params.get('bar', None) == 2\n assert model.seq[0].sparse_params.get('baz', None) is None\n assert model.linear.sparse_params.get('foo', None) == 3\n assert model.linear.sparse_params.get('bar', None) == 2\n assert model.linear.sparse_params.get('baz', None) is None\n\n def test_mask_squash_with_params3(self):\n model = Model()\n sparsifier = ImplementedSparsifier(foo=3, bar=2, baz=1)\n sparsifier.prepare(model, [{'tensor_fqn': 'linear.weight'}, {'tensor_fqn': 'seq.0.weight'}])\n sparsifier.squash_mask(\n params_to_keep=('foo', 'bar'),\n params_to_keep_per_layer={'seq.0': ('baz',)})\n assert not is_parametrized(model.seq[0], 'weight')\n assert not is_parametrized(model.linear, 'weight')\n assert hasattr(model.seq[0], 'sparse_params')\n assert hasattr(model.linear, 'sparse_params')\n assert model.seq[0].sparse_params.get('foo', None) == 3\n assert model.seq[0].sparse_params.get('bar', None) == 2\n assert model.seq[0].sparse_params.get('baz', None) == 1\n assert model.linear.sparse_params.get('foo', None) == 3\n assert model.linear.sparse_params.get('bar', None) == 2\n assert model.linear.sparse_params.get('baz', None) is None\n\n\nclass TestWeightNormSparsifier(TestCase):\n def test_constructor(self):\n model = Model()\n sparsifier = WeightNormSparsifier()\n sparsifier.prepare(model, config=None)\n for g in sparsifier.groups:\n assert isinstance(g['module'], nn.Linear)\n # The groups are unordered\n assert g['module_fqn'] in ('seq.0', 'linear', 'head')\n\n def test_step(self):\n model = Model()\n sparsifier = WeightNormSparsifier(sparsity_level=0.5)\n sparsifier.prepare(model, config=[{'tensor_fqn': 'linear.weight'}])\n for g in sparsifier.groups:\n # Before step\n module = g['module']\n assert (1.0 - module.parametrizations['weight'][0].mask.mean()) == 0 # checking sparsity level is 0\n sparsifier.enable_mask_update = True\n sparsifier.step()\n self.assertAlmostEqual(model.linear.parametrizations['weight'][0].mask.mean().item(), 0.5, places=2)\n for g in sparsifier.groups:\n # After step\n module = g['module']\n assert (1.0 - module.parametrizations['weight'][0].mask.mean()) > 0 # checking sparsity level has increased\n # Test if the mask collapses to all zeros if the weights are randomized\n iters_before_collapse = 1000\n for _ in range(iters_before_collapse):\n model.linear.weight.data = torch.randn(model.linear.weight.shape)\n sparsifier.step()\n for g in sparsifier.groups:\n # After step\n module = g['module']\n assert (1.0 - module.parametrizations['weight'][0].mask.mean()) > 0 # checking sparsity level did not collapse\n\n def test_step_2_of_4(self):\n model = Model()\n sparsifier = WeightNormSparsifier(sparsity_level=1.0,\n sparse_block_shape=(1, 4),\n zeros_per_block=2)\n sparsifier.prepare(model, config=[{'tensor_fqn': 'linear.weight'}])\n sparsifier.step()\n # make sure the sparsity level is approximately 50%\n self.assertAlmostEqual(model.linear.parametrizations['weight'][0].mask.mean().item(), 0.5, places=2)\n # Make sure each block has exactly 50% zeros\n module = sparsifier.groups[0]['module']\n mask = module.parametrizations['weight'][0].mask\n for row in mask:\n for idx in range(0, len(row), 4):\n block = row[idx:idx + 4]\n block, _ = block.sort()\n assert (block[:2] == 0).all()\n assert (block[2:] != 0).all()\n\n def test_prepare(self):\n model = Model()\n sparsifier = WeightNormSparsifier()\n sparsifier.prepare(model, config=None)\n for g in sparsifier.groups:\n module = g['module']\n # Check mask exists\n assert hasattr(module.parametrizations['weight'][0], 'mask')\n # Check parametrization exists and is correct\n assert is_parametrized(module, 'weight')\n assert type(module.parametrizations.weight[0]) == FakeSparsity\n\n def test_mask_squash(self):\n model = Model()\n sparsifier = WeightNormSparsifier()\n sparsifier.prepare(model, config=None)\n sparsifier.squash_mask()\n for g in sparsifier.groups:\n module = g['module']\n assert not is_parametrized(module, 'weight')\n assert not hasattr(module, 'mask')\n\n def test_sparsity_levels(self):\n sparsity_levels = [-1.0, 0.0, 0.5, 1.0, 2.0]\n sparse_block_shapes = [(1, 1), (1, 4), (2, 2), (4, 1)]\n zeros_per_blocks = [0, 1, 2, 3, 4]\n\n testcases = itertools.tee(itertools.product(sparsity_levels,\n sparse_block_shapes,\n zeros_per_blocks))\n # Create a config and model with all the testcases\n model = nn.Sequential()\n sparsifier = WeightNormSparsifier()\n\n sparsity_per_layer_config = []\n p = re.compile(r'[-\\.\\s]')\n for sl, sbs, zpb in testcases[0]:\n # Make sure the number of zeros is not > values in a block\n if zpb > sbs[0] * sbs[1]:\n continue\n layer_name = f'{sl}_{sbs}_{zpb}'\n layer_name = p.sub('_', layer_name)\n\n layer = nn.Linear(12, 12, bias=False)\n layer.weight = nn.Parameter(torch.ones(12, 12))\n model.add_module(layer_name, layer)\n config = {\n 'tensor_fqn': layer_name + \".weight\",\n 'sparsity_level': sl,\n 'sparse_block_shape': sbs,\n 'zeros_per_block': zpb\n }\n sparsity_per_layer_config.append(config)\n\n sparsifier.prepare(model, sparsity_per_layer_config)\n sparsifier.step()\n sparsifier.squash_mask()\n model.eval()\n\n for sl, sbs, zpb in testcases[1]:\n if zpb > sbs[0] * sbs[1]:\n continue\n layer_name = f'{sl}_{sbs}_{zpb}'\n layer_name = p.sub('_', layer_name)\n layer = getattr(model, layer_name)\n\n # Level of sparsity is achieved\n sparse_mask = (layer.weight == 0).float()\n if zpb == 0:\n assert sparse_mask.mean() == 0\n else:\n # Ratio of individual zeros in the tensor\n true_sl = min(max(sl, 0.0), 1.0)\n true_sl = true_sl * zpb / sbs[0] / sbs[1]\n assert sparse_mask.mean() == true_sl\n\n\nclass TestNearlyDiagonalSparsifier(TestCase):\n def test_constructor(self):\n model = Model()\n sparsifier = NearlyDiagonalSparsifier(nearliness=1)\n sparsifier.prepare(model, config=None)\n for g in sparsifier.groups:\n assert isinstance(g['module'], nn.Linear)\n # The groups are unordered\n assert g['module_fqn'] in ('seq.0', 'linear', 'head')\n\n def test_step(self):\n model = Model()\n sparsifier = NearlyDiagonalSparsifier(nearliness=1)\n sparsifier.prepare(model, config=[{'tensor_fqn': 'linear.weight'}])\n\n for g in sparsifier.groups:\n # Before step\n module = g['module']\n assert (1.0 - module.parametrizations['weight'][0].mask.mean()) == 0 # checking sparsity level is 0\n\n sparsifier.enable_mask_update = True\n sparsifier.step()\n mask = module.parametrizations['weight'][0].mask\n height, width = mask.shape\n assert torch.all(mask == torch.eye(height, width))\n\n for g in sparsifier.groups:\n # After step\n module = g['module']\n assert (1.0 - module.parametrizations['weight'][0].mask.mean()) > 0 # checking sparsity level has increased\n\n # Test if the mask collapses to all zeros if the weights are randomized\n iters_before_collapse = 1000\n for _ in range(iters_before_collapse):\n model.linear.weight.data = torch.randn(model.linear.weight.shape)\n sparsifier.step()\n for g in sparsifier.groups:\n # After step\n module = g['module']\n assert (1.0 - module.parametrizations['weight'][0].mask.mean()) > 0 # checking sparsity level did not collapse\n\n def test_prepare(self):\n model = Model()\n sparsifier = NearlyDiagonalSparsifier(nearliness=1)\n sparsifier.prepare(model, config=None)\n for g in sparsifier.groups:\n module = g['module']\n # Check mask exists\n assert hasattr(module.parametrizations['weight'][0], 'mask')\n # Check parametrization exists and is correct\n assert is_parametrized(module, 'weight')\n assert type(module.parametrizations.weight[0]) == FakeSparsity\n\n def test_mask_squash(self):\n model = Model()\n sparsifier = NearlyDiagonalSparsifier(nearliness=1)\n sparsifier.prepare(model, config=None)\n sparsifier.step()\n sparsifier.squash_mask()\n for g in sparsifier.groups:\n module = g['module']\n assert not is_parametrized(module, 'weight')\n assert not hasattr(module, 'mask')\n weights = module.weight\n height, width = weights.shape\n assert torch.all(weights == torch.eye(height, width) * weights) # only diagonal to be present\n\n def test_sparsity_levels(self):\n nearliness_levels = list(nearliness for nearliness in range(-1, 100))\n model = nn.Sequential()\n\n p = re.compile(r'[-\\.\\s]')\n for nearliness in nearliness_levels:\n sparsifier = NearlyDiagonalSparsifier(nearliness=1)\n layer_name = f'{nearliness}'\n layer_name = p.sub('_', layer_name)\n\n layer = nn.Linear(32, 32, bias=False)\n layer.weight = nn.Parameter(torch.ones(32, 32))\n width, height = layer.weight.shape\n model.add_module(layer_name, layer)\n config = {\n 'tensor_fqn': layer_name + \".weight\",\n 'nearliness': nearliness\n }\n\n sparsifier.prepare(model, [config])\n # should raise a ValueError when nearliness arg is illegal\n if (nearliness > 0 and nearliness % 2 == 0) or (nearliness // 2 >= min(width, height)):\n with self.assertRaises(ValueError):\n sparsifier.step()\n else:\n sparsifier.step()\n sparsifier.squash_mask()\n model.eval()\n\n layer = getattr(model, layer_name)\n # verify that mask created corresponds to the nearliness\n self._verify_nearliness(layer.weight, nearliness)\n\n # helper function to verify nearliness of a mask\n def _verify_nearliness(self, mask: torch.Tensor, nearliness: int):\n if nearliness <= 0:\n assert torch.all(mask == torch.zeros(mask.shape[0], mask.shape[1]))\n else:\n height, width = mask.shape\n dist_to_diagonal = nearliness // 2\n for row in range(0, height):\n for col in range(0, width):\n if abs(row - col) <= dist_to_diagonal:\n assert mask[row, col] == 1\n else:\n assert mask[row, col] == 0\n", "import argparse\nfrom collections import defaultdict\nfrom dataclasses import dataclass\nfrom typing import NamedTuple, Sequence, Iterable, Any, List, Dict, Optional, Tuple\nimport logging\n\nimport torch\nfrom torch.fx.passes.graph_manipulation import get_size_of_node\nfrom torch.fx.node import map_arg\nfrom torch.fx._compatibility import compatibility\n\nfrom .operator_support import (\n get_node_target,\n OperatorSupportBase,\n)\nfrom .graph_drawer import FxGraphDrawer\nfrom .shape_prop import ShapeProp\nfrom .split_utils import split_by_tags\nfrom .tools_common import (\n FxNetAccFusionsFinder,\n CALLABLE_NODE_OPS,\n Tensors,\n NodeList,\n NodeSet,\n is_node_output_tensor,\n)\nimport warnings\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass _SplitterSettingBase:\n def __init__(self):\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--min_acc_module_size\",\n default=1,\n type=int,\n help=\"Minimum size limit of an accelerator subgraph.\",\n )\n parser.add_argument(\n \"--skip_fusion\",\n default=False,\n action=\"store_true\",\n help=\"If true then no fusion groups. Fusion group is used to \"\n \"enforce no non-tensor data flow between submodules. If we don't \"\n \"have this constrain, setting this to false is recommended as it \"\n \"can reduce overhead.\",\n )\n parser.add_argument(\n \"--allow_non_tensor\",\n default=False,\n action=\"store_true\",\n help=\"For some backends non-tensor data flow between cpu and them \"\n \"are not allowed. Therefore, if a node supported by accelerator but \"\n \"it has non-tensor inputs or outputs to a cpu node we would want to \"\n \"consider it as a cpu node during splitting. However, for some backends \"\n \"we might not care about non-tensor data flow and we can set this option \"\n \"to true to disable the functionality that prevent non-tensor data flow.\",\n )\n args, unknown = parser.parse_known_args()\n\n self.min_acc_module_size: int = args.min_acc_module_size\n self.skip_fusion: bool = args.skip_fusion\n self.allow_non_tensor: bool = args.allow_non_tensor\n\n\n@compatibility(is_backward_compatible=False)\nclass FxNetAccNodesFinder:\n \"\"\"\n Finds a set of nodes that can be supported on ACC, excluding nodes that have non-tensor\n input/output to cpu nodes to prevent non-tensor data flow between backends and cpu.\n\n I.e. if we have a chain:\n\n ACC_NODE_1 -> ACC_NODE_2 -> ACC_NODE_3 -> CPU_NODE_1\n\n where every ACC node produces non-tensor output, then they all should be treated as CPU nodes.\n\n This behavior can be turned off by passing allow_non_tensor=True.\n \"\"\"\n\n def __init__(\n self,\n module: torch.fx.GraphModule,\n operator_support: OperatorSupportBase,\n allow_non_tensor: bool,\n ):\n self.module = module\n self.operator_support = operator_support\n self.allow_non_tensor = allow_non_tensor\n\n def reduce_acc_nodes_non_tensor_input_helper(\n self, cpu_worklist: NodeList\n ):\n \"\"\"\n Transitively excludes nodes from ACC supported set.\n For every node in the worklist:\n - removes its downstream ACC nodes from ACC supported set,\n - if any downstream ACC node produces non-tensor output,\n then it gets added into the worklist.\n \"\"\"\n while cpu_worklist:\n node = cpu_worklist.pop(0)\n\n for user in node.users:\n if user in self.acc_nodes:\n self.acc_nodes.remove(user)\n if not is_node_output_tensor(user):\n cpu_worklist.append(user)\n\n def reduce_acc_nodes_non_tensor_input(self):\n \"\"\"\n Excludes nodes from ACC supported set that have direct\n upstream CPU nodes that produce non-tensor outputs.\n \"\"\"\n non_tensor_cpu_nodes: NodeList = []\n\n for node in self.module.graph.nodes:\n if node.op not in CALLABLE_NODE_OPS:\n continue\n if node in self.acc_nodes:\n continue\n if is_node_output_tensor(node):\n continue\n non_tensor_cpu_nodes.append(node)\n\n self.reduce_acc_nodes_non_tensor_input_helper(non_tensor_cpu_nodes)\n\n def reduce_acc_nodes_non_tensor_output(self):\n \"\"\"\n Excludes nodes from ACC supported set that produce non-tensor\n outputs and have downstream CPU nodes.\n \"\"\"\n while True:\n new_cpu_nodes: NodeList = []\n\n for acc_node in self.acc_nodes:\n if is_node_output_tensor(acc_node):\n continue\n for user in acc_node.users:\n if user not in self.acc_nodes:\n new_cpu_nodes.append(acc_node)\n break\n\n if not new_cpu_nodes:\n break\n\n for new_cpu_node in new_cpu_nodes:\n self.acc_nodes.remove(new_cpu_node)\n\n self.reduce_acc_nodes_non_tensor_input_helper(new_cpu_nodes)\n\n def __call__(self) -> NodeSet:\n submodules = dict(self.module.named_modules())\n self.acc_nodes = {\n n\n for n in self.module.graph.nodes\n if n.op in CALLABLE_NODE_OPS\n and self.operator_support.is_node_supported(submodules, n)\n }\n\n if not self.allow_non_tensor:\n self.reduce_acc_nodes_non_tensor_input()\n self.reduce_acc_nodes_non_tensor_output()\n\n return self.acc_nodes\n\n@compatibility(is_backward_compatible=False)\nclass FxNetSplitterInternalError(Exception):\n pass\n\n@compatibility(is_backward_compatible=False)\n@dataclass\nclass Subgraph:\n is_acc: bool\n nodes: NodeList\n\n\n@compatibility(is_backward_compatible=False)\nclass SplitResult(NamedTuple):\n \"\"\"\n Stores the results of the splitter.\n\n Attributes:\n split_module: root module after splitting.\n submodule_inputs: a dict that maps submodule name to its inputs.\n non_acc_submodule_prefix: the prefix for non acc submodules. For\n acc submodule the prefix is alwasy \"_run_on_acc_\".\n \"\"\"\n\n split_module: torch.fx.GraphModule\n submodule_inputs: Dict[str, Any]\n non_acc_submodule_prefix: str\n\n\n@compatibility(is_backward_compatible=False)\ndef generate_inputs_for_submodules(\n model: torch.nn.Module,\n inputs: Sequence[Any],\n target_submodules: Iterable[str]\n) -> Dict[str, Any]:\n \"\"\"\n Generate inputs for targeting submdoules in the given model. Note that if two submodules refer to the same obj, this\n function doesn't work.\n\n Args:\n model: root model.\n inputs: inputs to the root model.\n target_submodules: submodules that we want to generate inputs for.\n\n Returns:\n A dict that maps from submodule name to its inputs.\n \"\"\"\n\n handles = []\n results = {}\n submodule_to_names = dict((mod, name) for name, mod in model.named_modules())\n\n def pre_forward(module, module_inputs):\n results[submodule_to_names[module]] = module_inputs\n try:\n for name, mod in model.named_modules():\n if name in target_submodules:\n handles.append(mod.register_forward_pre_hook(pre_forward))\n model(*inputs)\n except Exception as e:\n warnings.warn(f\"Failed to generate submodule inputs because of the following error:\\n{e}\")\n finally:\n for h in handles:\n h.remove()\n return results\n\n\nclass _SplitterBase:\n \"\"\"\n Splits a GraphModule into sub-GraphModules for execution on CPU or the accelerator.\n Output is a GraphModule with supported and unsupported operators grouped into as few sub-GraphModules as possible.\n Assumes that only \"call_module\", \"call_function\" and \"call_method\" from FX IR can potentially be executed on the accelerator.\n\n Given the following graph:\n ==> b ==>\n // \\\\\n a d\n \\\\ //\n ==> c ==>\n\n class SimpleModule(torch.nn.Module):\n def forward(self, a):\n b = torch.sin(a)\n c = torch.cos(a)\n d = b + c\n return d\n\n and providing \"operator_support\" that indicates that 'b' and 'c' can be executed on the accelerator,\n we will get the following split result:\n\n main:\n def forward(self, a):\n run_on_acc_0_0 = self._run_on_acc_0_0(a)\n getitem = run_on_acc_0_0[0]\n getitem_1 = run_on_acc_0_0[1]\n run_on_cpu_1_1 = self._run_on_cpu_1_1(getitem, getitem_1)\n return run_on_cpu_1_1\n\n _run_on_acc_0_0:\n def forward(self, a):\n sin_1 = torch.sin(a)\n cos_1 = torch.cos(a)\n return (sin_1, cos_1)\n\n _run_on_cpu_1_1:\n def forward(self, sin_1, cos_1):\n add_1 = sin_1 + cos_1\n return add_1\n \"\"\"\n\n # PCIe bandwidth for the backend, default to 100 GB/s\n PCIe_BW = 100 * 2 ** 30\n\n def __init__(\n self,\n module: torch.fx.GraphModule,\n sample_input: Sequence[Any],\n operator_support: OperatorSupportBase,\n settings: _SplitterSettingBase,\n non_acc_submodule_name: str = \"_run_on_cpu_\",\n ):\n \"\"\"\n Preprocesses graph before splitting:\n - finds nodes supported by ACC,\n - finds fusion groups for ACC nodes having non-tensor IO,\n - builds a graph of direct dependencies,\n - builds a map of fused nodes to their fusions.\n As a result we get self.acc_nodes, self.deps and self.fusions.\n \"\"\"\n assert isinstance(module, torch.fx.GraphModule)\n\n self.module = module\n ShapeProp(self.module).propagate(*sample_input)\n\n self.settings = settings\n self.operator_support = operator_support\n self.sample_input = sample_input\n self.acc_nodes = FxNetAccNodesFinder(self.module, self.operator_support, self.settings.allow_non_tensor)()\n\n if self.settings.skip_fusion:\n self.fusions = {}\n else:\n self.fusions = FxNetAccFusionsFinder(module, self.acc_nodes)()\n\n # Modify deps to add more deps for fused nodes\n self.deps = self.find_deps()\n self.update_deps_for_fusions()\n\n self.non_acc_submodule_name = non_acc_submodule_name\n\n # ===============================================================\n # Helpers for ctor and initial state\n # ===============================================================\n\n def find_deps(self) -> Dict[torch.fx.Node, NodeSet]:\n \"\"\"\n Builds a graph of node dependencies. Leaf nodes don't have any\n dependencies and the \"output\" node doesn't have nodes depending on it.\n\n Resulting graph has only direct dependencies, i.e. there are no\n transitive dependencies.\n \"\"\"\n deps: Dict[torch.fx.Node, NodeSet] = defaultdict(set)\n for node in self.module.graph.nodes:\n if node.op not in CALLABLE_NODE_OPS:\n continue\n\n for user in node.users:\n if user.op != \"output\":\n deps[user].add(node)\n return deps\n\n def update_deps_for_fusions(self):\n \"\"\"\n Updates graph of dependencies so that:\n - nodes from the same fusion depend on the same set of outer nodes,\n - outer nodes depending on a fusion depend on all nodes in that fusion.\n \"\"\"\n for node in self.fusions:\n fusion = self.fusions[node]\n for fused_neighbor in fusion:\n self.deps[node].update(self.deps[fused_neighbor] - fusion)\n\n for user in fused_neighbor.users:\n if user not in fusion:\n self.deps[user].add(node)\n\n # ===============================================================\n # Helpers for preview\n # ===============================================================\n\n def _lower_model_to_backend(\n self, mod: torch.fx.GraphModule, inputs: Tensors\n ) -> torch.nn.Module:\n \"\"\"\n Lower the model to a backend.\n \"\"\"\n\n return mod\n\n def _find_culprit(\n self, mod: torch.fx.GraphModule, inputs: Tensors\n ) -> str:\n \"\"\"\n When an error occurs during lowering or running the lowered mod, we use this\n function to find culprits in the `mod` that causes the error.\n \"\"\"\n\n return \"Unable to find a culprit because _find_culprit() function is not implemented.\"\n\n def _draw_graph_based_on_node_support(\n self, mod: torch.fx.GraphModule, supported_nodes: NodeList\n ):\n color_map = {\n \"default\": \"AliceBlue\",\n \"supported\": \"chartreuse1\",\n \"unsupported\": \"crimson\",\n }\n\n class CustomDrawer(FxGraphDrawer):\n def _get_node_style(self, node):\n template = super()._get_node_style(node)\n if node in supported_nodes:\n template[\"fillcolor\"] = color_map[\"supported\"]\n elif node.op in CALLABLE_NODE_OPS:\n template[\"fillcolor\"] = color_map[\"unsupported\"]\n else:\n template[\"fillcolor\"] = color_map[\"default\"]\n\n return template\n\n drawer = CustomDrawer(mod, \"node_support\", ignore_getattr=True)\n dot_graph = drawer.get_main_dot_graph()\n dot_graph.write_raw(\"node_support.dot\")\n\n def node_support_preview(self, dump_graph: bool = False):\n submodules = dict(self.module.named_modules())\n\n supported_nodes: NodeList = []\n supported_node_types = defaultdict(set)\n unsupported_node_types = defaultdict(set)\n\n def get_dtype(arg):\n tensor_meta = arg.meta.get(\"tensor_meta\")\n return getattr(tensor_meta, \"dtype\", None)\n\n for node in self.module.graph.nodes:\n if node.op not in CALLABLE_NODE_OPS:\n continue\n\n target = get_node_target(submodules, node)\n\n # Store dtype of arg in node.args. If arg doesn't have dtype, i.e. not a tensor, we'll store None.\n arg_dtypes = [\n get_dtype(arg) if isinstance(arg, torch.fx.Node) else None\n for arg in node.args\n ]\n\n # Find last non-None element. If all elements are None, return max_len.\n last_index = len(arg_dtypes) - next(\n (\n i\n for i, dtype in enumerate(reversed(arg_dtypes))\n if dtype is not None\n ),\n len(arg_dtypes),\n )\n\n # Strip None elements at the end.\n arg_dtypes_tuple = tuple(arg_dtypes[:last_index])\n kwarg_dtypes_tuple = tuple(\n (k, get_dtype(arg))\n for k, arg in node.kwargs.items()\n if isinstance(arg, torch.fx.Node)\n )\n\n if self.operator_support.is_node_supported(submodules, node):\n supported_nodes.append(node)\n supported_node_types[target].add((arg_dtypes_tuple, kwarg_dtypes_tuple))\n else:\n unsupported_node_types[target].add((arg_dtypes_tuple, kwarg_dtypes_tuple))\n\n if dump_graph:\n self._draw_graph_based_on_node_support(self.module, supported_nodes)\n\n reports = \"\\nSupported node types in the model:\\n\"\n for t, dtypes in supported_node_types.items():\n for arg_dtypes_tuple, kwarg_dtypes_tuple in dtypes:\n reports += f\"{t}: ({arg_dtypes_tuple}, {dict(kwarg_dtypes_tuple)})\\n\"\n\n reports += \"\\nUnsupported node types in the model:\\n\"\n for t, dtypes in unsupported_node_types.items():\n for arg_dtypes_tuple, kwarg_dtypes_tuple in dtypes:\n reports += f\"{t}: ({arg_dtypes_tuple}, {dict(kwarg_dtypes_tuple)})\\n\"\n\n print(reports)\n\n # Return reports for testing purpose\n return reports\n\n def split_preview(self, dump_graph: bool = False):\n reports = \"\"\n subgraphs = self.put_nodes_into_subgraphs()\n acc_subgraphs_num = len([g for g in subgraphs if g.is_acc])\n cpu_subgraphs_num = len(subgraphs) - acc_subgraphs_num\n reports += f\"Before removing small acc subgraphs, total {len(subgraphs)} subgraphs are created:\"\n reports += f\" {acc_subgraphs_num} acc subgraphs and {cpu_subgraphs_num} cpu subgraphs.\\n\"\n\n subgraphs = self.remove_small_acc_subgraphs(subgraphs)\n acc_subgraphs_num = len([g for g in subgraphs if g.is_acc])\n cpu_subgraphs_num = len(subgraphs) - acc_subgraphs_num\n reports += f\"After removing small acc subgraphs, total {len(subgraphs)} subgraphs are created:\"\n reports += f\" {acc_subgraphs_num} acc subgraphs and {cpu_subgraphs_num} cpu subgraphs.\\n\"\n\n for i, subgraph in enumerate(subgraphs):\n reports += f\"_run_on_acc_{i}: \" if subgraph.is_acc else f\"{self.non_acc_submodule_name}{i}: \"\n reports += f\"{len(subgraph.nodes)} node(s)\\n\"\n\n self.tag(subgraphs)\n split_mod = self.split(remove_tag=True)\n split_mod.eval()\n\n if dump_graph:\n drawer = FxGraphDrawer(\n split_mod, \"preview\", ignore_getattr=True\n )\n dot_graphs = drawer.get_all_dot_graphs()\n for name, dot_graph in dot_graphs.items():\n dot_graph.write_raw(f\"{name}.dot\")\n\n max_qps: float = self.PCIe_BW\n bottleneck_module = \"\"\n\n for node in split_mod.graph.nodes:\n if node.op == \"call_module\" and \"acc\" in node.target:\n reports += f\"\\nProcessing acc submodule {node.target}\\n\"\n\n submod = getattr(split_mod, node.target)\n\n def get_submod_inputs(main_mod, submod, example_inputs):\n sub_inputs = None\n\n def get_inputs(self, inputs):\n nonlocal sub_inputs\n sub_inputs = inputs\n\n handle = submod.register_forward_pre_hook(get_inputs)\n main_mod(*example_inputs)\n handle.remove()\n return sub_inputs\n\n submod_inputs = get_submod_inputs(\n split_mod, submod, self.sample_input\n )\n ShapeProp(submod).propagate(*submod_inputs)\n\n total_input_bytes = 0\n total_output_bytes = 0\n\n reports += \"Checking inputs...\\n\"\n for n in submod.graph.nodes:\n if n.op == \"placeholder\":\n if not is_node_output_tensor(n):\n reports += f\"Input {n.name} is not a tensor, this might cause problems during lowering!\\n\"\n else:\n total_input_bytes += get_size_of_node(submod, n)[0]\n if n.op == \"output\":\n output_node = n\n\n reports += \"Checking outputs...\\n\"\n\n def get_bytes(node: torch.fx.Node):\n nonlocal total_output_bytes\n nonlocal reports\n if not is_node_output_tensor(node):\n reports += f\"Output {node.name} is not a tensor, this might cause problems during lowering!\\n\"\n else:\n total_output_bytes += get_size_of_node(submod, node)[0]\n\n map_arg(output_node.args, get_bytes)\n qps = self.PCIe_BW / max(total_input_bytes, total_output_bytes)\n reports += f\"Total input size in bytes is {total_input_bytes}, total output size in bytes is {total_output_bytes},\"\n reports += f\" theoretical max qps (bounds by PCIe bandwidth) for this submodule is {qps}.\\n\"\n\n if qps < max_qps:\n max_qps = qps\n bottleneck_module = node.target\n\n try:\n lowered_submod = self._lower_model_to_backend(submod, submod_inputs)\n except RuntimeError:\n reports += \"Run into an error during lowering!\\n\"\n reports += self._find_culprit(submod, submod_inputs)\n continue\n\n try:\n lowered_submod(*submod_inputs)\n except RuntimeError:\n reports += \"Run into an error during inference!\\n\"\n reports += self._find_culprit(submod, submod_inputs)\n else:\n reports += \"Lowering and running succeed!\\n\"\n\n reports += f\"\\nTheoretical max qps (bounds by PCIe bandwidth) for this model is {max_qps},\"\n reports += f\" bottleneck is submodule {bottleneck_module}.\"\n print(reports)\n\n # return the reports for testing purposes\n return reports\n\n # ===============================================================\n # Helpers for extend_acc_subgraph() method\n # ===============================================================\n\n def find_reverse_deps(\n self, tag_id: Optional[int] = None\n ) -> Dict[torch.fx.Node, NodeSet]:\n \"\"\"\n Builds reversed topological node dependencies, if tag_id is specified,\n we ignore nodes that are in later subgraph i.e. nodes have greater tag_id.\n \"\"\"\n result: Dict[torch.fx.Node, NodeSet] = defaultdict(set)\n\n for node in self.module.graph.nodes:\n if node.op not in CALLABLE_NODE_OPS:\n continue\n\n for user in node.users:\n if user.op not in CALLABLE_NODE_OPS:\n continue\n\n if tag_id is None or (int(user.tag.split(\"_\")[-1]) < tag_id):\n result[node].add(user)\n\n return result\n\n def update_reverse_deps_for_fusions(\n self, deps: Dict[torch.fx.Node, NodeSet]\n ):\n processed_node = set()\n\n for node, fusion in self.fusions.items():\n if node in processed_node:\n continue\n\n new_dep = set()\n\n # Create a new dependency set which include all the\n # dependencies of the nodes in the fusion group\n for n in fusion:\n new_dep.update(deps[n])\n\n # Exclude nodes in the fusion\n new_dep.difference_update(fusion)\n\n # Update dependency\n for n in fusion:\n deps[n] = new_dep\n\n for arg in n.all_input_nodes:\n if arg not in fusion:\n deps[arg].update(fusion)\n\n processed_node.add(n)\n\n def find_parent_nodes_of_subgraph(self, tag: str) -> NodeSet:\n \"\"\"\n Finds parent nodes of the `tag` subgraph.\n\n Traverse the inputs of nodes in the subgraph, if input doesn't belong to the subgraph\n and is not a placeholder, we consider it as the parent node of the subgraph.\n \"\"\"\n parent_nodes = set()\n\n for node in self.module.graph.nodes:\n if node.op in CALLABLE_NODE_OPS and node.tag == tag:\n for arg in node.all_input_nodes:\n if arg.op in CALLABLE_NODE_OPS and arg.tag != tag:\n parent_nodes.add(arg)\n\n return parent_nodes\n\n def extend_acc_subgraph(self, tag: str):\n \"\"\"\n Extend the acc subgraph with `tag` going the reversed topological direction.\n \"\"\"\n # Dict that maps node to its users and ignore users that\n # are in the subgraph that has greater tag\n deps = self.find_reverse_deps(tag_id=int(tag.split(\"_\")[-1]))\n self.update_reverse_deps_for_fusions(deps)\n\n # Parent nodes of the subgraph\n parent_nodes = self.find_parent_nodes_of_subgraph(tag)\n\n visited_nodes: NodeSet = set()\n\n while parent_nodes:\n node = None\n\n # Find a acc node that depends on visited nodes only\n for n in parent_nodes:\n if deps[n] <= visited_nodes and n in self.acc_nodes:\n node = n\n break\n\n if node is None:\n break\n\n # Put the node into `tag` subgraph\n node.tag = tag # type: ignore[attr-defined]\n parent_nodes.remove(node)\n visited_nodes.add(node)\n\n # If node is in a fusion group, add all fusion buddies to parent nodes\n if node in self.fusions:\n for fusion_node in self.fusions[node]:\n if fusion_node not in visited_nodes:\n parent_nodes.add(fusion_node)\n\n # Add inputs of the node to parent nodes\n for arg in node.all_input_nodes:\n if arg.op in CALLABLE_NODE_OPS and arg not in visited_nodes:\n parent_nodes.add(arg)\n\n # ===============================================================\n # Helpers for split() method\n # ===============================================================\n\n def starter_nodes(self) -> Tuple[NodeSet, NodeSet]:\n \"\"\"\n Finds nodes that consume module inputs or get_attr nodes.\n \"\"\"\n starter_cpu_nodes: NodeSet = set()\n starter_acc_nodes: NodeSet = set()\n for node in self.module.graph.nodes:\n if node.op not in {\"placeholder\", \"get_attr\"}:\n continue\n for user in node.users:\n if user in self.acc_nodes:\n starter_acc_nodes.add(user)\n else:\n starter_cpu_nodes.add(user)\n return starter_cpu_nodes, starter_acc_nodes\n\n def put_nodes_into_subgraphs(self) -> List[Subgraph]:\n # We start graph traversal from leaf nodes\n current_cpu_nodes, current_acc_nodes = self.starter_nodes()\n visited_nodes: NodeSet = set()\n\n # Determine which subgraph to start from based on node dependency\n acc_subgraph: bool = True\n for n in current_cpu_nodes:\n if self.deps[n] <= visited_nodes:\n acc_subgraph = False\n break\n\n current_subgraph_nodes: NodeList = []\n\n # Result accumulator\n subgraphs: List[Subgraph] = []\n while current_cpu_nodes or current_acc_nodes:\n # Find the first node that should belong to the current subgraph and has all dependencies resolved\n current_nodes = current_acc_nodes if acc_subgraph else current_cpu_nodes\n node = next(\n (n for n in current_nodes if self.deps[n] <= visited_nodes),\n None,\n )\n\n # If nothing was found, then it's time to flip the mode and start a new subgraph\n if node is None:\n if not current_subgraph_nodes:\n raise FxNetSplitterInternalError(\"Subgraph can't be empty\")\n\n subgraphs.append(\n Subgraph(is_acc=acc_subgraph, nodes=current_subgraph_nodes)\n )\n acc_subgraph = not acc_subgraph\n current_subgraph_nodes = []\n continue\n\n current_nodes.remove(node)\n visited_nodes.add(node)\n current_subgraph_nodes.append(node)\n\n # Add fusion buddies\n if node in self.fusions:\n if node in self.acc_nodes:\n current_acc_nodes.update(self.fusions[node] - visited_nodes)\n else:\n current_cpu_nodes.update(self.fusions[node] - visited_nodes)\n\n # Put depending nodes into the queue\n for user in node.users:\n if user.op not in CALLABLE_NODE_OPS:\n continue\n\n # Add downstream nodes\n if user in self.acc_nodes:\n current_acc_nodes.add(user)\n else:\n current_cpu_nodes.add(user)\n\n # Check if the last subgraph was not created\n if current_subgraph_nodes:\n subgraphs.append(\n Subgraph(is_acc=acc_subgraph, nodes=current_subgraph_nodes)\n )\n\n if not subgraphs:\n raise FxNetSplitterInternalError(\"Couldn't create subgraphs\")\n\n return subgraphs\n\n def remove_small_acc_subgraphs(self, subgraphs: List[Subgraph]) -> List[Subgraph]:\n \"\"\"\n This pass finds ACC submodules with less than specified size and merges\n them with adjacent CPU submodules.\n \"\"\"\n result: List[Subgraph] = []\n for subgraph in subgraphs:\n if subgraph.is_acc:\n if len(subgraph.nodes) >= self.settings.min_acc_module_size:\n result.append(subgraph)\n else:\n print(\n \"Eliminating acc subgraph because it's smaller than the threshold: \"\n f\"{len(subgraph.nodes)} < {self.settings.min_acc_module_size}\"\n )\n if result:\n result[-1].nodes.extend(subgraph.nodes)\n else:\n subgraph.is_acc = False\n result.append(subgraph)\n else:\n if result and not result[-1].is_acc:\n result[-1].nodes.extend(subgraph.nodes)\n else:\n result.append(subgraph)\n return result\n\n def tag(self, subgraphs: List[Subgraph]):\n self.tags: List[str] = []\n for subgraph in subgraphs:\n subgraph_name = self.non_acc_submodule_name\n\n tag = f\"_run_on_acc_{len(self.tags)}\" if subgraph.is_acc else f\"{self.non_acc_submodule_name}{len(self.tags)}\"\n self.tags.append(tag)\n for node in subgraph.nodes:\n if hasattr(node, \"tag\"):\n raise FxNetSplitterInternalError(f\"Node {node} was already tagged\")\n node.tag = tag # type: ignore[attr-defined]\n\n def split(self, remove_tag: bool = False) -> torch.fx.GraphModule:\n split_module = split_by_tags(self.module, self.tags)\n if remove_tag:\n for node in self.module.graph.nodes:\n if hasattr(node, \"tag\"):\n del node.tag\n return split_module\n\n def __call__(self) -> torch.fx.GraphModule:\n subgraphs = self.put_nodes_into_subgraphs()\n subgraphs = self.remove_small_acc_subgraphs(subgraphs)\n acc_subgraphs_count = len([s for s in subgraphs if s.is_acc])\n non_acc_subgraphs_count = len(subgraphs) - acc_subgraphs_count\n print(f\"Got {acc_subgraphs_count} acc subgraphs and {non_acc_subgraphs_count} non-acc subgraphs\")\n self.tag(subgraphs)\n return self.split()\n\n def generate_split_results(self) -> SplitResult:\n split_module = self()\n submodule_names = []\n for name, mod in split_module.named_children():\n submodule_names.append(name)\n submodule_inputs = generate_inputs_for_submodules(split_module, self.sample_input, submodule_names)\n return SplitResult(split_module, submodule_inputs, self.non_acc_submodule_name)\n", "# Owner(s): [\"oncall: jit\"]\n\nimport os\nimport sys\nimport torch\nfrom torch.utils._pytree import tree_map\n\n\nfrom torch.testing._internal.schema_check_mode import SchemaCheckMode\nfrom torch.utils._python_dispatch import enable_torch_dispatch_mode\nfrom torch.testing._internal.jit_utils import JitTestCase\n\npytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\nsys.path.append(pytorch_test_dir)\n\nif __name__ == '__main__':\n raise RuntimeError(\"This test file is not meant to be run directly, use:\\n\\n\"\n \"\\tpython test/test_jit.py TESTNAME\\n\\n\"\n \"instead.\")\n\n# This TorchDispatchTensor Subclass is used to simulate an incorrect schema\n# which is then used to test that SchemaCheckMode behaves as expected\n\nclass IncorrectAliasTensor(torch.Tensor):\n INCORRECT_OPS = {\"aten::add\", \"aten::sub\"}\n\n elem: torch.Tensor\n\n __slots__ = ['elem']\n\n __torch_function__ = torch._C._disabled_torch_function_impl\n\n @staticmethod\n def __new__(cls, elem, *args, **kwargs):\n # The wrapping tensor (IncorrectAliasTensor) shouldn't hold any\n # memory for the class in question, but it should still\n # advertise the same device as before\n r = torch.Tensor._make_wrapper_subclass( # type: ignore[attr-defined]\n cls, elem.size(),\n strides=elem.stride(), storage_offset=elem.storage_offset(),\n # TODO: clone storage aliasing\n dtype=elem.dtype, layout=elem.layout,\n device=elem.device, requires_grad=kwargs.get(\"requires_grad\", False)\n )\n # ...the real tensor is held as an element on the tensor.\n r.elem = elem.detach() if r.requires_grad else elem\n return r\n\n def __repr__(self):\n return super().__repr__(tensor_contents=f\"{self.elem}\")\n\n @classmethod\n def __torch_dispatch__(cls, func, types, args=(), kwargs=None):\n def unwrap(e):\n return e.elem if isinstance(e, cls) else e\n\n def wrap(e):\n return cls(e) if isinstance(e, torch.Tensor) else e\n unwrapped_args = tree_map(unwrap, args)\n out = func(*unwrapped_args, **tree_map(unwrap, kwargs))\n if func._schema.name in IncorrectAliasTensor.INCORRECT_OPS:\n args[0].elem = out\n\n return tree_map(wrap, out)\n\n# Tests various schema checking functionalities.\nclass TestSchemaCheck(JitTestCase):\n # Tests that SchemaCheckMode records operator order with grad\n def test_schema_check_mode_operator_order(self):\n schema_check = SchemaCheckMode()\n with enable_torch_dispatch_mode(schema_check):\n x = torch.rand((3, 3), requires_grad=True)\n x.relu().sin()\n self.assertEqual([\"aten::rand\", \"aten::relu\", \"aten::sin\"], schema_check.ops)\n\n # Tests that SchemaCheckMode records operator order without grad\n def test_schema_check_tensor_operator_order_without_grad(self):\n schema_check = SchemaCheckMode()\n with enable_torch_dispatch_mode(schema_check):\n x = torch.rand((3, 3), requires_grad=False)\n x.relu().sin()\n self.assertEqual([\"aten::rand\", \"aten::relu\", \"aten::sin\"], schema_check.ops)\n\n # Tests that SchemaCheckMode wraps torch.Tensor\n def test_schema_check_tensor_functionality(self):\n x = torch.rand((3, 3), requires_grad=True)\n expected = x.relu().sin()\n with enable_torch_dispatch_mode(SchemaCheckMode()):\n actual = x.relu().sin()\n self.assertEqual(expected, actual)\n\n # Tests that SchemaCheckMode wraps torch.Tensor when an argument's default is overriden\n def test_schema_check_tensor_functionality_default_replaced(self):\n x = torch.rand((3, 3), requires_grad=True)\n expected = x.add(x, alpha=2)\n with enable_torch_dispatch_mode(SchemaCheckMode()):\n actual = x.add(x, alpha=2)\n self.assertEqual(expected, actual)\n\n # Tests that SchemaCheckMode wraps torch.Tensor when there is a Tensor[] argument\n def test_schema_check_tensor_functionality_list_input(self):\n a = torch.rand((3, 3))\n b = torch.rand((3, 3))\n c = torch.rand((3, 3))\n expected = torch.linalg.multi_dot([a, b, c])\n with enable_torch_dispatch_mode(SchemaCheckMode()):\n actual = torch.linalg.multi_dot([a, b, c])\n self.assertEqual(expected, actual)\n\n # Tests that SchemaCheckMode wraps torch.Tensor when there is a kwarg tensor input\n def test_schema_check_tensor_functionality_kwarg_tensor(self):\n x = torch.rand((3, 5))\n w = torch.rand((4))\n expected = torch.stft(x, 4, win_length=4, window=w, return_complex=True)\n with enable_torch_dispatch_mode(SchemaCheckMode()):\n actual = torch.stft(x, 4, win_length=4, window=w, return_complex=True)\n self.assertEqual(expected, actual)\n\n # Tests that SchemaCheckMode wraps torch.Tensor with a mutable op\n def test_schema_check_tensor_functionality_mutable_inputs(self):\n expected = torch.rand((3, 3), requires_grad=False)\n actual = torch.clone(expected)\n expected.sinh_()\n with enable_torch_dispatch_mode(SchemaCheckMode()):\n actual.sinh_()\n self.assertEqual(expected, actual)\n\n # Tests that an exception is raised for a mismatching mutation\n def test_mutation_check_fail(self):\n with self.assertRaisesRegex(RuntimeError, \"Argument running_mean is not defined as mutable but was mutated\"):\n x = torch.rand((3, 3), requires_grad=True)\n batch = torch.nn.BatchNorm1d(3, track_running_stats=True)\n with enable_torch_dispatch_mode(SchemaCheckMode()):\n batch(x)\n\n # Tests that an exception is raised for a mismatching mutation over multiple ops\n def test_mutation_check_fail_multiple_operators(self):\n with self.assertRaisesRegex(RuntimeError, \"Argument running_mean is not defined as mutable but was mutated\"):\n x = torch.rand((3, 3), requires_grad=True)\n batch = torch.nn.BatchNorm1d(3, track_running_stats=True)\n with enable_torch_dispatch_mode(SchemaCheckMode()):\n x = x.sinh()\n x = x.tanh()\n x = x.relu()\n batch(x)\n\n # Tests that an exception is raised for a mismatching alias\n def test_alias_check_fail(self):\n with self.assertRaisesRegex(RuntimeError, \"Argument input is not defined to alias output but was aliasing\"):\n x = torch.rand((3, 3), requires_grad=True)\n y = torch.zeros((3, 3))\n with enable_torch_dispatch_mode(SchemaCheckMode()):\n IncorrectAliasTensor(x).add(IncorrectAliasTensor(y), alpha=2)\n\n # Tests that an exception is raised for a mismatching alias over multiple ops\n def test_alias_check_fail_multiple_operators(self):\n with self.assertRaisesRegex(RuntimeError, \"Argument input is not defined to alias output but was aliasing\"):\n x = torch.rand((3, 3), requires_grad=True)\n y = torch.zeros((3, 3), requires_grad=True)\n with enable_torch_dispatch_mode(SchemaCheckMode()):\n IncorrectAliasTensor(x).sin().relu().add(IncorrectAliasTensor(y), alpha=2)\n\n # Tests that an exception is raised for a centered mismatching alias over multiple ops\n def test_alias_check_fail_multiple_operators_centered(self):\n with self.assertRaisesRegex(RuntimeError, \"Argument input is not defined to alias output but was aliasing\"):\n x = torch.rand((3, 3), requires_grad=True)\n y = torch.zeros((3, 3), requires_grad=True)\n with enable_torch_dispatch_mode(SchemaCheckMode()):\n IncorrectAliasTensor(x).sin().add(IncorrectAliasTensor(y), alpha=2).relu()\n\n # Tests that SchemaCheckMode wraps Torch.tensor when inputs alias\n def test_alias_check_with_aliasing_inputs(self):\n expected = torch.rand((3, 3))\n x = expected\n actual = torch.clone(expected)\n y = actual\n expected.add_(x)\n with enable_torch_dispatch_mode(SchemaCheckMode()):\n actual.add_(y)\n self.assertEqual(expected, actual)\n\n # Tests that isAliasOf returns as expected\n def test_is_alias_of(self):\n x = torch.rand((3, 3), requires_grad=True)\n y = torch.rand((3, 3), requires_grad=True)\n y = x.add(x, alpha=2)\n self.assertTrue(torch._C._is_alias_of(x, x))\n self.assertFalse(torch._C._is_alias_of(x, y))\n" ]
[ [ "torch.nn.Linear", "torch.zeros", "torch.ao.sparsity.WeightNormSparsifier", "torch.arange", "torch.nn.Sequential", "torch.ao.sparsity.NearlyDiagonalSparsifier", "torch.ones", "torch.nn.utils.parametrize.is_parametrized", "torch.all", "torch.eye", "torch.randn" ], [ "torch.fx.node.map_arg", "torch.fx._compatibility.compatibility", "torch.fx.passes.graph_manipulation.get_size_of_node" ], [ "torch.zeros", "torch.rand", "torch.utils._python_dispatch.enable_torch_dispatch_mode", "torch._C._is_alias_of", "torch.linalg.multi_dot", "torch.utils._pytree.tree_map", "torch.nn.BatchNorm1d", "torch.clone", "torch.testing._internal.schema_check_mode.SchemaCheckMode", "torch.stft" ] ]
switchablenorms/SwitchNorm_Detection
[ "ab6848667bc8976367fdacb4b8ebbaeefdc79bd6" ]
[ "lib/core/test_engine.py" ]
[ "# Copyright (c) 2017-present, Facebook, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n##############################################################################\n\n\"\"\"Test a Detectron network on an imdb (image database).\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom collections import defaultdict\nimport cv2\nimport datetime\nimport logging\nimport numpy as np\nimport os\nimport yaml\n\nimport torch\nimport torch.nn as nn\n\nfrom core.config import cfg\n# from core.rpn_generator import generate_rpn_on_dataset #TODO: for rpn only case\n# from core.rpn_generator import generate_rpn_on_range\nfrom core.test import im_detect_all\nfrom core.test import im_conv_body_only\nfrom datasets import task_evaluation\nfrom datasets.json_dataset import JsonDataset\nfrom modeling import model_builder\nimport nn as mynn\nfrom utils.detectron_weight_helper import load_detectron_weight\nimport utils.env as envu\nimport utils.net as net_utils\nimport utils.subprocess as subprocess_utils\nimport utils.vis as vis_utils\nfrom utils.io import save_object\nfrom utils.timer import Timer\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_eval_functions():\n # Determine which parent or child function should handle inference\n if cfg.MODEL.RPN_ONLY:\n raise NotImplementedError\n # child_func = generate_rpn_on_range\n # parent_func = generate_rpn_on_dataset\n else:\n # Generic case that handles all network types other than RPN-only nets\n # and RetinaNet\n child_func = test_net\n parent_func = test_net_on_dataset\n\n return parent_func, child_func\n\n\ndef get_inference_dataset(index, datasets, is_parent=True):\n assert is_parent or len(cfg.TEST.DATASETS) == 1, \\\n 'The child inference process can only work on a single dataset'\n\n dataset_name = datasets\n\n if cfg.TEST.PRECOMPUTED_PROPOSALS:\n assert is_parent or len(cfg.TEST.PROPOSAL_FILES) == 1, \\\n 'The child inference process can only work on a single proposal file'\n assert len(cfg.TEST.PROPOSAL_FILES) == len(cfg.TEST.DATASETS), \\\n 'If proposals are used, one proposal file must be specified for ' \\\n 'each dataset'\n proposal_file = cfg.TEST.PROPOSAL_FILES[index]\n else:\n proposal_file = None\n\n return dataset_name, proposal_file\n\n\ndef run_inference(\n args, ind_range=None,\n multi_gpu_testing=False, gpu_id=0,\n check_expected_results=False,\n tb_logger=None,\n cur_iter=-1):\n global tblogger\n global curiter\n tblogger = tb_logger\n curiter = cur_iter\n\n parent_func, child_func = get_eval_functions()\n is_parent = ind_range is None\n\n def result_getter():\n if is_parent:\n # Parent case:\n # In this case we're either running inference on the entire dataset in a\n # single process or (if multi_gpu_testing is True) using this process to\n # launch subprocesses that each run inference on a range of the dataset\n all_results = {}\n for i in range(len(cfg.TEST.DATASETS)):\n dataset_name, proposal_file = get_inference_dataset(i, cfg.TEST.DATASETS[i])\n output_dir = args.output_dir\n results = parent_func(\n args,\n dataset_name,\n proposal_file,\n output_dir,\n multi_gpu=multi_gpu_testing\n )\n all_results.update(results)\n\n return all_results\n else:\n # Subprocess child case:\n # In this case test_net was called via subprocess.Popen to execute on a\n # range of inputs on a single dataset\n dataset_name, proposal_file = \\\n get_inference_dataset(0, cfg.TEST.DATASETS[0], is_parent=False)\n output_dir = args.output_dir\n return child_func(\n args,\n dataset_name,\n proposal_file,\n output_dir,\n ind_range=ind_range,\n gpu_id=gpu_id\n )\n\n all_results = result_getter()\n if check_expected_results and is_parent:\n task_evaluation.check_expected_results(\n all_results,\n atol=cfg.EXPECTED_RESULTS_ATOL,\n rtol=cfg.EXPECTED_RESULTS_RTOL\n )\n task_evaluation.log_copy_paste_friendly_results(all_results)\n\n return all_results\n\n\ndef test_net_on_dataset(\n args,\n dataset_name,\n proposal_file,\n output_dir,\n multi_gpu=False,\n gpu_id=0):\n \"\"\"Run inference on a dataset.\"\"\"\n dataset = JsonDataset(dataset_name)\n test_timer = Timer()\n test_timer.tic()\n if multi_gpu:\n num_images = len(dataset.get_roidb())\n all_boxes, all_segms, all_keyps = multi_gpu_test_net_on_dataset(\n args, dataset_name, proposal_file, num_images, output_dir\n )\n else:\n all_boxes, all_segms, all_keyps = test_net(\n args, dataset_name, proposal_file, output_dir, gpu_id=gpu_id\n )\n test_timer.toc()\n logger.info('Total inference time: {:.3f}s'.format(test_timer.average_time))\n results = task_evaluation.evaluate_all(\n dataset, all_boxes, all_segms, all_keyps, output_dir\n )\n return results\n\n\ndef multi_gpu_test_net_on_dataset(\n args, dataset_name, proposal_file, num_images, output_dir):\n \"\"\"Multi-gpu inference on a dataset.\"\"\"\n binary_dir = envu.get_runtime_dir()\n binary_ext = envu.get_py_bin_ext()\n binary = os.path.join(binary_dir, args.test_net_file + binary_ext)\n assert os.path.exists(binary), 'Binary \\'{}\\' not found'.format(binary)\n\n # Pass the target dataset and proposal file (if any) via the command line\n opts = ['TEST.DATASETS', '(\"{}\",)'.format(dataset_name)]\n if proposal_file:\n opts += ['TEST.PROPOSAL_FILES', '(\"{}\",)'.format(proposal_file)]\n\n # Run inference in parallel in subprocesses\n # Outputs will be a list of outputs from each subprocess, where the output\n # of each subprocess is the dictionary saved by test_net().\n outputs = subprocess_utils.process_in_parallel(\n 'detection', num_images, binary, output_dir,\n args.load_ckpt, args.load_detectron, opts\n )\n\n # Collate the results from each subprocess\n all_boxes = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]\n all_segms = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]\n all_keyps = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]\n for det_data in outputs:\n all_boxes_batch = det_data['all_boxes']\n all_segms_batch = det_data['all_segms']\n all_keyps_batch = det_data['all_keyps']\n for cls_idx in range(1, cfg.MODEL.NUM_CLASSES):\n all_boxes[cls_idx] += all_boxes_batch[cls_idx]\n all_segms[cls_idx] += all_segms_batch[cls_idx]\n all_keyps[cls_idx] += all_keyps_batch[cls_idx]\n det_file = os.path.join(output_dir, 'detections.pkl')\n cfg_yaml = yaml.dump(cfg)\n save_object(\n dict(\n all_boxes=all_boxes,\n all_segms=all_segms,\n all_keyps=all_keyps,\n cfg=cfg_yaml\n ), det_file\n )\n logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file)))\n\n return all_boxes, all_segms, all_keyps\n\n\ndef test_net(\n args,\n dataset_name,\n proposal_file,\n output_dir,\n model=None,\n ind_range=None,\n gpu_id=0):\n \"\"\"Run inference on all images in a dataset or over an index range of images\n in a dataset using a single GPU.\n \"\"\"\n assert not cfg.MODEL.RPN_ONLY, \\\n 'Use rpn_generate to generate proposals from RPN-only models'\n\n model = initialize_model_from_cfg(args, gpu_id=gpu_id)\n\n if tblogger is not None and gpu_id == 0:\n for name, param in model.named_parameters():\n if 'mean_weight' in name:\n softmax = nn.Softmax(0)\n weight = softmax(param).cpu().detach().numpy()\n tblogger.add_scalar('mean_weight/'+name+'/in', weight[0], curiter)\n tblogger.add_scalar('mean_weight/'+name+'/ln', weight[1], curiter)\n if len(weight) > 2:\n tblogger.add_scalar('mean_weight/'+name+'/bn', weight[2], curiter)\n elif 'var_weight' in name:\n softmax = nn.Softmax(0)\n weight = softmax(param).cpu().detach().numpy()\n tblogger.add_scalar('var_weight/'+name+'/in', weight[0], curiter)\n tblogger.add_scalar('var_weight/'+name+'/ln', weight[1], curiter)\n if len(weight) > 2:\n tblogger.add_scalar('var_weight/'+name+'/bn', weight[2], curiter)\n\n\n roidb, dataset, start_ind, end_ind, total_num_images = get_roidb_and_dataset(\n dataset_name, proposal_file, ind_range\n )\n model.eval()\n num_images = len(roidb)\n num_classes = cfg.MODEL.NUM_CLASSES\n all_boxes, all_segms, all_keyps = empty_results(num_classes, num_images)\n timers = defaultdict(Timer)\n for i, entry in enumerate(roidb):\n if cfg.TEST.PRECOMPUTED_PROPOSALS:\n # The roidb may contain ground-truth rois (for example, if the roidb\n # comes from the training or val split). We only want to evaluate\n # detection on the *non*-ground-truth rois. We select only the rois\n # that have the gt_classes field set to 0, which means there's no\n # ground truth.\n box_proposals = entry['boxes'][entry['gt_classes'] == 0]\n if len(box_proposals) == 0:\n continue\n else:\n # Faster R-CNN type models generate proposals on-the-fly with an\n # in-network RPN; 1-stage models don't require proposals.\n box_proposals = None\n\n im = cv2.imread(entry['image'])\n cls_boxes_i, cls_segms_i, cls_keyps_i = im_detect_all(model, im, box_proposals, timers)\n\n extend_results(i, all_boxes, cls_boxes_i)\n if cls_segms_i is not None:\n extend_results(i, all_segms, cls_segms_i)\n if cls_keyps_i is not None:\n extend_results(i, all_keyps, cls_keyps_i)\n\n if i % 10 == 0: # Reduce log file size\n ave_total_time = np.sum([t.average_time for t in timers.values()])\n eta_seconds = ave_total_time * (num_images - i - 1)\n eta = str(datetime.timedelta(seconds=int(eta_seconds)))\n det_time = (\n timers['im_detect_bbox'].average_time +\n timers['im_detect_mask'].average_time +\n timers['im_detect_keypoints'].average_time\n )\n misc_time = (\n timers['misc_bbox'].average_time +\n timers['misc_mask'].average_time +\n timers['misc_keypoints'].average_time\n )\n logger.info(\n (\n 'im_detect: range [{:d}, {:d}] of {:d}: '\n '{:d}/{:d} {:.3f}s + {:.3f}s (eta: {})'\n ).format(\n start_ind + 1, end_ind, total_num_images, start_ind + i + 1,\n start_ind + num_images, det_time, misc_time, eta\n )\n )\n\n if cfg.VIS:\n im_name = os.path.splitext(os.path.basename(entry['image']))[0]\n vis_utils.vis_one_image(\n im[:, :, ::-1],\n '{:d}_{:s}'.format(i, im_name),\n os.path.join(output_dir, 'vis'),\n cls_boxes_i,\n segms=cls_segms_i,\n keypoints=cls_keyps_i,\n thresh=cfg.VIS_TH,\n box_alpha=0.8,\n dataset=dataset,\n show_class=True\n )\n\n cfg_yaml = yaml.dump(cfg)\n if ind_range is not None:\n det_name = 'detection_range_%s_%s.pkl' % tuple(ind_range)\n else:\n det_name = 'detections.pkl'\n det_file = os.path.join(output_dir, det_name)\n save_object(\n dict(\n all_boxes=all_boxes,\n all_segms=all_segms,\n all_keyps=all_keyps,\n cfg=cfg_yaml\n ), det_file\n )\n logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file)))\n return all_boxes, all_segms, all_keyps\n\n\ndef initialize_model_from_cfg(args, gpu_id=0):\n \"\"\"Initialize a model from the global cfg. Loads test-time weights and\n set to evaluation mode.\n \"\"\"\n model = model_builder.Generalized_RCNN()\n\n if args.cuda:\n model.cuda()\n\n if args.load_ckpt:\n load_name = args.load_ckpt\n logger.info(\"loading checkpoint %s\", load_name)\n checkpoint = torch.load(load_name, map_location=lambda storage, loc: storage)\n net_utils.load_ckpt(model, checkpoint['model'])\n\n if args.load_detectron:\n logger.info(\"loading detectron weights %s\", args.load_detectron)\n load_detectron_weight(model, args.load_detectron)\n\n model = mynn.DataParallel(model, cpu_keywords=['im_info', 'roidb'], minibatch=True)\n\n return model\n\n\ndef get_roidb_and_dataset(dataset_name, proposal_file, ind_range):\n \"\"\"Get the roidb for the dataset specified in the global cfg. Optionally\n restrict it to a range of indices if ind_range is a pair of integers.\n \"\"\"\n dataset = JsonDataset(dataset_name)\n if cfg.TEST.PRECOMPUTED_PROPOSALS:\n assert proposal_file, 'No proposal file given'\n roidb = dataset.get_roidb(\n proposal_file=proposal_file,\n proposal_limit=cfg.TEST.PROPOSAL_LIMIT\n )\n else:\n roidb = dataset.get_roidb()\n\n if ind_range is not None:\n total_num_images = len(roidb)\n start, end = ind_range\n roidb = roidb[start:end]\n else:\n start = 0\n end = len(roidb)\n total_num_images = end\n\n return roidb, dataset, start, end, total_num_images\n\n\ndef empty_results(num_classes, num_images):\n \"\"\"Return empty results lists for boxes, masks, and keypoints.\n Box detections are collected into:\n all_boxes[cls][image] = N x 5 array with columns (x1, y1, x2, y2, score)\n Instance mask predictions are collected into:\n all_segms[cls][image] = [...] list of COCO RLE encoded masks that are in\n 1:1 correspondence with the boxes in all_boxes[cls][image]\n Keypoint predictions are collected into:\n all_keyps[cls][image] = [...] list of keypoints results, each encoded as\n a 3D array (#rois, 4, #keypoints) with the 4 rows corresponding to\n [x, y, logit, prob] (See: utils.keypoints.heatmaps_to_keypoints).\n Keypoints are recorded for person (cls = 1); they are in 1:1\n correspondence with the boxes in all_boxes[cls][image].\n \"\"\"\n # Note: do not be tempted to use [[] * N], which gives N references to the\n # *same* empty list.\n all_boxes = [[[] for _ in range(num_images)] for _ in range(num_classes)]\n all_segms = [[[] for _ in range(num_images)] for _ in range(num_classes)]\n all_keyps = [[[] for _ in range(num_images)] for _ in range(num_classes)]\n return all_boxes, all_segms, all_keyps\n\n\ndef extend_results(index, all_res, im_res):\n \"\"\"Add results for an image to the set of all results at the specified\n index.\n \"\"\"\n # Skip cls_idx 0 (__background__)\n for cls_idx in range(1, len(im_res)):\n all_res[cls_idx][index] = im_res[cls_idx]\n" ]
[ [ "torch.nn.Softmax", "torch.load" ] ]
NeoBert/liudengfeng-zipline
[ "dd436fa066a1a9718f676fa161fda32bbbf0f5d9" ]
[ "zipline/examples/pairtrade.py" ]
[ "#!/usr/bin/env python\n#\n# Copyright 2013 Quantopian, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logbook\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport statsmodels.api as sm\nfrom datetime import datetime\nimport pytz\n\nfrom zipline.algorithm import TradingAlgorithm\nfrom zipline.transforms import batch_transform\nfrom zipline.utils.factory import load_from_yahoo\n\n\n@batch_transform\ndef ols_transform(data, sid1, sid2):\n \"\"\"Computes regression coefficient (slope and intercept)\n via Ordinary Least Squares between two SIDs.\n \"\"\"\n p0 = data.price[sid1]\n p1 = sm.add_constant(data.price[sid2], prepend=True)\n slope, intercept = sm.OLS(p0, p1).fit().params\n\n return slope, intercept\n\n\nclass Pairtrade(TradingAlgorithm):\n \"\"\"Pairtrading relies on cointegration of two stocks.\n\n The expectation is that once the two stocks drifted apart\n (i.e. there is spread), they will eventually revert again. Thus,\n if we short the upward drifting stock and long the downward\n drifting stock (in short, we buy the spread) once the spread\n widened we can sell the spread with profit once they converged\n again. A nice property of this algorithm is that we enter the\n market in a neutral position.\n\n This specific algorithm tries to exploit the cointegration of\n Pepsi and Coca Cola by estimating the correlation between the\n two. Divergence of the spread is evaluated by z-scoring.\n \"\"\"\n\n def initialize(self, window_length=100):\n self.spreads = []\n self.invested = 0\n self.window_length = window_length\n self.ols_transform = ols_transform(refresh_period=self.window_length,\n window_length=self.window_length)\n self.PEP = self.symbol('PEP')\n self.KO = self.symbol('KO')\n\n def handle_data(self, data):\n ######################################################\n # 1. Compute regression coefficients between PEP and KO\n params = self.ols_transform.handle_data(data, self.PEP, self.KO)\n if params is None:\n return\n intercept, slope = params\n\n ######################################################\n # 2. Compute spread and zscore\n zscore = self.compute_zscore(data, slope, intercept)\n self.record(zscores=zscore)\n\n ######################################################\n # 3. Place orders\n self.place_orders(data, zscore)\n\n def compute_zscore(self, data, slope, intercept):\n \"\"\"1. Compute the spread given slope and intercept.\n 2. zscore the spread.\n \"\"\"\n spread = (data[self.PEP].price -\n (slope * data[self.KO].price + intercept))\n self.spreads.append(spread)\n spread_wind = self.spreads[-self.window_length:]\n zscore = (spread - np.mean(spread_wind)) / np.std(spread_wind)\n return zscore\n\n def place_orders(self, data, zscore):\n \"\"\"Buy spread if zscore is > 2, sell if zscore < .5.\n \"\"\"\n if zscore >= 2.0 and not self.invested:\n self.order(self.PEP, int(100 / data[self.PEP].price))\n self.order(self.KO, -int(100 / data[self.KO].price))\n self.invested = True\n elif zscore <= -2.0 and not self.invested:\n self.order(self.PEP, -int(100 / data[self.PEP].price))\n self.order(self.KO, int(100 / data[self.KO].price))\n self.invested = True\n elif abs(zscore) < .5 and self.invested:\n self.sell_spread()\n self.invested = False\n\n def sell_spread(self):\n \"\"\"\n decrease exposure, regardless of position long/short.\n buy for a short position, sell for a long.\n \"\"\"\n ko_amount = self.portfolio.positions[self.KO].amount\n self.order(self.KO, -1 * ko_amount)\n pep_amount = self.portfolio.positions[self.PEP].amount\n self.order(self.PEP, -1 * pep_amount)\n\nif __name__ == '__main__':\n logbook.StderrHandler().push_application()\n start = datetime(2000, 1, 1, 0, 0, 0, 0, pytz.utc)\n end = datetime(2002, 1, 1, 0, 0, 0, 0, pytz.utc)\n data = load_from_yahoo(stocks=['PEP', 'KO'], indexes={},\n start=start, end=end)\n\n pairtrade = Pairtrade()\n results = pairtrade.run(data)\n data['spreads'] = np.nan\n\n ax1 = plt.subplot(211)\n # TODO Bugged - indices are out of bounds\n # data[[pairtrade.PEPsid, pairtrade.KOsid]].plot(ax=ax1)\n plt.ylabel('price')\n plt.setp(ax1.get_xticklabels(), visible=False)\n\n ax2 = plt.subplot(212, sharex=ax1)\n results.zscores.plot(ax=ax2, color='r')\n plt.ylabel('zscored spread')\n\n plt.gcf().set_size_inches(18, 8)\n" ]
[ [ "numpy.mean", "numpy.std", "matplotlib.pyplot.gcf", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.subplot" ] ]
HSE-DynGraph-Research-team/DynGraphModelling
[ "890326f4bd7991ef88a7a79cd2c8a77541621423" ]
[ "models/CAW/utils.py" ]
[ "import numpy as np\nimport torch\nimport os\nimport random\n\n\nclass EarlyStopMonitor(object):\n def __init__(self, max_round=3, higher_better=True, tolerance=1e-3):\n self.max_round = max_round\n self.num_round = 0\n\n self.epoch_count = 0\n self.best_epoch = 0\n\n self.last_best = None\n self.higher_better = higher_better\n self.tolerance = tolerance\n\n def early_stop_check(self, curr_val):\n if not self.higher_better:\n curr_val *= -1\n if self.last_best is None:\n self.last_best = curr_val\n elif (curr_val - self.last_best) / np.abs(self.last_best) > self.tolerance:\n self.last_best = curr_val\n self.num_round = 0\n self.best_epoch = self.epoch_count\n else:\n self.num_round += 1\n self.epoch_count += 1\n return self.num_round >= self.max_round\n\n\nclass RandEdgeSampler(object):\n def __init__(self, src_list, dst_list):\n src_list = np.concatenate(src_list)\n dst_list = np.concatenate(dst_list)\n self.src_list = np.unique(src_list)\n self.dst_list = np.unique(dst_list)\n\n def sample(self, size):\n src_index = np.random.randint(0, len(self.src_list), size)\n dst_index = np.random.randint(0, len(self.dst_list), size)\n return self.src_list[src_index], self.dst_list[dst_index]\n\n\ndef set_random_seed(seed):\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n np.random.seed(seed)\n random.seed(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n\n\ndef process_sampling_numbers(num_neighbors, num_layers):\n num_neighbors = [int(n) for n in num_neighbors]\n if len(num_neighbors) == 1:\n num_neighbors = num_neighbors * num_layers\n else:\n num_layers = len(num_neighbors)\n return num_neighbors, num_layers\n" ]
[ [ "numpy.concatenate", "torch.cuda.manual_seed_all", "numpy.random.seed", "torch.manual_seed", "numpy.abs", "numpy.unique" ] ]
xopclabs/random-rotation-sklearn
[ "41f624066cfb1830bf067f77da9d284c6e46f1a1" ]
[ "rrsklearn/boosting.py" ]
[ "import numpy as np\nfrom sklearn.ensemble import GradientBoostingClassifier, GradientBoostingRegressor\nfrom sklearn.ensemble._gb import BaseGradientBoosting\nfrom .tree import RRDecisionTreeRegressor\n\n\nclass RRBaseGradientBoosting(BaseGradientBoosting):\n \"\"\"Abstract base class for Random Rotation Gradient Boosting.\"\"\"\n\n def _fit_stage(self, i, X, y, raw_predictions, sample_weight, sample_mask,\n random_state, X_csc=None, X_csr=None):\n \"\"\"Fit another stage of ``_n_classes`` trees to the boosting model.\"\"\"\n\n assert sample_mask.dtype == bool\n loss = self.loss_\n original_y = y\n\n # Need to pass a copy of raw_predictions to negative_gradient()\n # because raw_predictions is partially updated at the end of the loop\n # in update_terminal_regions(), and gradients need to be evaluated at\n # iteration i - 1.\n raw_predictions_copy = raw_predictions.copy()\n\n for k in range(loss.K):\n if loss.is_multi_class:\n y = np.array(original_y == k, dtype=np.float64)\n\n residual = loss.negative_gradient(y, raw_predictions_copy, k=k,\n sample_weight=sample_weight)\n\n rs = random_state.randint(0, 2**32 - 1)\n # induce regression tree on residuals\n tree = RRDecisionTreeRegressor(\n criterion=self.criterion,\n splitter='best',\n max_depth=self.max_depth,\n min_samples_split=self.min_samples_split,\n min_samples_leaf=self.min_samples_leaf,\n min_weight_fraction_leaf=self.min_weight_fraction_leaf,\n min_impurity_decrease=self.min_impurity_decrease,\n min_impurity_split=self.min_impurity_split,\n max_features=self.max_features,\n max_leaf_nodes=self.max_leaf_nodes,\n random_state=rs,\n ccp_alpha=self.ccp_alpha)\n\n if self.subsample < 1.0:\n # no inplace multiplication!\n sample_weight = sample_weight * sample_mask.astype(np.float64)\n\n X = X_csr if X_csr is not None else X\n tree.fit(X, residual, sample_weight=sample_weight,\n check_input=False)\n\n # update tree leaves\n loss.update_terminal_regions(\n tree.tree_, X, y, residual, raw_predictions, sample_weight,\n sample_mask, learning_rate=self.learning_rate, k=k)\n\n # add tree to ensemble\n self.estimators_[i, k] = tree\n\n return raw_predictions\n\n\nclass RRGradientBoostingClassifier(RRBaseGradientBoosting, GradientBoostingClassifier):\n '''Mixes Random Rotation BaseGradientBoosting with GradientBoostingClassifier'''\n pass\n\n\nclass RRGradientBoostingRegressor(RRBaseGradientBoosting, GradientBoostingRegressor):\n '''Mixes Random Rotation BaseGradientBoosting with GradientBoostingRegressor'''\n pass\n" ]
[ [ "numpy.array" ] ]
DimitryRakhlei/BTECH
[ "fefe469bd7d1f4adbc70bdc57670e793ad4c31f6" ]
[ "c8005/a1/src/avg.py" ]
[ "import glob\nimport numpy as np\n#import matplotlib.pyplot as plt\n\nmt_files = glob.glob(\"../logs/mt_*.log\")\nmp_files = glob.glob(\"../logs/mp_*.log\")\n\nprint(mt_files)\nprint(mp_files)\n\nvalues = {}\nfor fn in mt_files:\n with open(fn, \"r\") as file:\n values[fn] = np.array([float(x.rstrip()) for x in file.readlines()])\n\n\nfor fn in mp_files:\n with open(fn, \"r\") as file:\n values[fn] = np.array([float(x.rstrip()) for x in file.readlines()])\n\n\nprint(values)\n\n\naverages = {}\nfor fn in mt_files:\n averages[fn] = np.mean(values[fn])\n\nfor fn in mp_files:\n averages[fn] = np.mean(values[fn])\n\n\n\nprint(averages)\n\n#plt.plot([44444, averages[mt_files[0]]], [9999999, averages[mt_files[1]]], 'k', lw=2)\n#plt.plot([44444, averages[mp_files[0]]], [9999999, averages[mp_files[1]]], lw=2)\n#plt.xlim(1000, 0.001)\n#plt.show()\n" ]
[ [ "numpy.mean" ] ]
tk1012/ion-kit
[ "d42be09dfd78fe415058723c186a76a84c699d45" ]
[ "python/tests/test_all.py" ]
[ "# https://github.com/fixstars/ion-csharp/blob/master/test/Test.cs\nfrom ionpy import Node, Builder, Buffer, PortMap, Port, Param, Type, TypeCode\nimport numpy as np # TODO: rewrite with pure python\n\n\ndef test_all():\n t = Type(code_=TypeCode.Int, bits_=32, lanes_=1)\n input_port = Port(key='input', type=t, dim=2)\n value41 = Param(key='v', val='41')\n\n builder = Builder()\n builder.set_target(target='host')\n builder.with_bb_module(path='libion-bb-test.so')\n # builder.with_bb_module(path='ion-bb-test.dll') # for Windows\n\n node = builder.add('test_inc_i32x2').set_port(ports=[ input_port, ]).set_param(params=[ value41, ])\n\n port_map = PortMap()\n\n sizes = (4, 4)\n ibuf = Buffer(type=t, sizes=sizes)\n obuf = Buffer(type=t, sizes=sizes)\n\n idata = np.full((4*4, ), fill_value=1, dtype=np.int32)\n odata = np.full((4*4, ), fill_value=0, dtype=np.int32)\n\n idata_bytes = idata.tobytes(order='C')\n odata_bytes = odata.tobytes(order='C')\n\n ibuf.write(data=idata_bytes)\n obuf.write(data=odata_bytes)\n\n port_map.set_buffer(port=input_port, buffer=ibuf)\n port_map.set_buffer(port=node.get_port(key='output'), buffer=obuf)\n\n builder.run(port_map=port_map)\n\n obuf_bytes = obuf.read(num_data_bytes=len(odata_bytes))\n odata = np.frombuffer(obuf_bytes, dtype=np.int32)\n\n for i in range(4*4):\n assert odata[i] == 42\n" ]
[ [ "numpy.full", "numpy.frombuffer" ] ]
CadQuery/PostMesh
[ "d68f44707166d6556042ed79b336c996d8ae52c5" ]
[ "setup.py" ]
[ "from setuptools import setup\nfrom setuptools import find_packages\nfrom distutils.command.clean import clean\nfrom distutils.extension import Extension\nfrom distutils.sysconfig import get_config_vars\nfrom Cython.Build import cythonize\nimport os, platform, sys, fnmatch\nimport numpy\n\n\ndef setup_package():\n\n # Get Platform/OS\n _os = sys.platform\n\n # Get the current directory\n _pwd_ = os.path.dirname(os.path.realpath('__file__'))\n _upwd_ = os.path.dirname(_pwd_)\n\n # Remove the \"-Wstrict-prototypes\" compiler option, which isn't valid for C++.\n cfg_vars = get_config_vars()\n for key, value in cfg_vars.items():\n if isinstance(value,str):\n cfg_vars[key] = value.replace(\"-Wstrict-prototypes\", \"\")\n\n # Suppress numpy deprecation warnings\n no_deprecated = (\"NPY_NO_DEPRECATED_API\",None)\n\n sourcefiles = [\n os.path.join(_pwd_,\"bindings\",\"PostMeshPy.pyx\"),\n os.path.join(_pwd_,\"src\",\"PostMeshBase.cpp\"),\n os.path.join(_pwd_,\"src\",\"PostMeshCurve.cpp\"),\n os.path.join(_pwd_,\"src\",\"PostMeshSurface.cpp\")\n ]\n\n\n # Set the compiler\n # Must be called as: \"python setup.py build_ext CXX=/usr/bin/g++\"\n args = sys.argv\n _cxx_specified = False\n if len(args) > 1:\n for counter, arg in enumerate(args):\n if \"CXX\" in arg:\n _cxx_specified = True\n _cxx_compiler = arg.split(\"=\")[-1]\n args.remove(arg)\n if _cxx_specified:\n os.environ[\"CC\"] = _cxx_compiler\n os.environ[\"CXX\"] = _cxx_compiler\n else:\n _cxx_compiler = get_config_vars()['CXX'].split(' ')[0]\n os.environ[\"CC\"] = _cxx_compiler\n os.environ[\"CXX\"] = _cxx_compiler\n\n\n # Compiler arguments\n if \"clang++\" in _cxx_compiler or (\"c++\" in _cxx_compiler and \"darwin\" in _os):\n compiler_args = [\"-O3\",\"-std=c++11\",\"-m64\",\"-march=native\",\"-mtune=native\",\"-ffp-contract=fast\",\n \"-ffast-math\",\"-flto\",\"-DNPY_NO_DEPRECATED_API\",\"-Wno-shorten-64-to-32\"]\n else:\n compiler_args = [\"-O3\",\"-std=c++11\",\"-m64\",\"-march=native\",\"-mtune=native\",\"-ffp-contract=fast\",\n \"-mfpmath=sse\",\"-ffast-math\",\"-ftree-vectorize\",\"-finline-functions\",\"-finline-limit=100000\",\n \"-funroll-loops\",\"-Wno-unused-function\",\"-flto\",\"-DNPY_NO_DEPRECATED_API\",\"-Wno-cpp\"]\n\n # if \"darwin\" in _os:\n # compiler_args.append(\"-stdlib=libstdc++\")\n\n\n eigen_include_path = \"/usr/local/include/eigen/\"\n oce_include_path = \"/usr/local/include/oce/\"\n\n\n # Link to OpenCascade runtime libraries\n # Search for all subdirectories under /usr/local/lib\n # Change the directory name if occ is elsewhere\n occ_dir = \"/usr/local/lib\"\n all_dir_libs = os.listdir(occ_dir)\n occ_libs = []\n for i in all_dir_libs:\n lib_suffix = i.split(\".\")[-1]\n if i[:4]==\"libT\" and (lib_suffix != \"a\" and lib_suffix != \"la\" and lib_suffix != \"0\"):\n if \"darwin\" in _os:\n occ_libs.append(i[3:-6])\n elif \"linux\" in _os:\n occ_libs.append(\":\"+i)\n\n found_oce = False\n for i in occ_libs:\n if \"TKernel\" in i:\n found_oce = True\n break\n\n\n if found_oce is False:\n if \"darwin\" in _os:\n version = next(os.walk(\"/usr/local/Cellar/oce/\"))[1][0]\n occ_dir = os.path.join(\"/usr/local/Cellar/oce\",version,\"lib\")\n oce_include_path = os.path.join(\"/usr/local/Cellar/oce\",version,\"include\",\"oce\")\n elif \"linux\" in _os:\n occ_dir = \"/usr/lib/x86_64-linux-gnu\"\n oce_include_path = \"/usr/include/oce/\"\n\n all_dir_libs = os.listdir(occ_dir)\n for i in all_dir_libs:\n lib_suffix = i.split(\".\")[-1]\n if i[:4]==\"libT\" and (lib_suffix != \"a\" and lib_suffix != \"la\" and lib_suffix != \"0\"):\n occ_libs.append(\":\"+i)\n\n\n # Create extension module\n extensions = [\n Extension(\n name = \"PostMeshPy\",\n sources = sourcefiles,\n language=\"c++\",\n include_dirs = [_pwd_,\n _pwd_+\"/include/\",\n eigen_include_path,\n oce_include_path,\n numpy.get_include()],\n libraries= [\"stdc++\"] + occ_libs,\n library_dirs = [_pwd_, os.path.join(\"/usr\",\"local\",\"lib\")],\n extra_compile_args = compiler_args,\n define_macros=[no_deprecated],\n ),\n ]\n\n with open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\n setup(\n ext_modules = cythonize(extensions),\n name = \"PostMeshPy\",\n version = \"1.6.1\",\n description = \"A Python wrapper for PostMesh - a high order curvilinear mesh generator based on OpenCascade\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"Roman Poya\",\n author_email = \"[email protected]\",\n url = \"https://github.com/romeric/PostMesh\",\n license=\"MIT\",\n install_requires=[\n 'numpy>=1.9',\n 'cython>=0.23'],\n packages=find_packages(),\n include_package_data=True,\n package_data={'': ['bindings/*','src/*','include/*','example/*',\n '*.pyx', '*.pxd', '*.h', '*.hpp', '*.c', '*.cpp', 'Makefile']},\n extra_files = \"LICENSE.md\"\n )\n\n\nif __name__ == \"__main__\":\n setup_package()\n" ]
[ [ "numpy.get_include" ] ]
itsAbdulKhadar/Machine-Learning-with-Streamlit
[ "c8a0c7ca5a1bcf2730ae9587bcddfebe323965a3", "c8a0c7ca5a1bcf2730ae9587bcddfebe323965a3", "c8a0c7ca5a1bcf2730ae9587bcddfebe323965a3", "c8a0c7ca5a1bcf2730ae9587bcddfebe323965a3" ]
[ "venv/Lib/site-packages/pandas/tests/series/methods/test_replace.py", "venv/Lib/site-packages/streamlit/bootstrap.py", "venv/Lib/site-packages/pandas/tests/scalar/period/test_period.py", "venv/Lib/site-packages/pandas/io/excel/_openpyxl.py" ]
[ "import re\n\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nimport pandas._testing as tm\n\n\nclass TestSeriesReplace:\n def test_replace(self, datetime_series):\n N = 100\n ser = pd.Series(np.random.randn(N))\n ser[0:4] = np.nan\n ser[6:10] = 0\n\n # replace list with a single value\n return_value = ser.replace([np.nan], -1, inplace=True)\n assert return_value is None\n\n exp = ser.fillna(-1)\n tm.assert_series_equal(ser, exp)\n\n rs = ser.replace(0.0, np.nan)\n ser[ser == 0.0] = np.nan\n tm.assert_series_equal(rs, ser)\n\n ser = pd.Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N), dtype=object)\n ser[:5] = np.nan\n ser[6:10] = \"foo\"\n ser[20:30] = \"bar\"\n\n # replace list with a single value\n rs = ser.replace([np.nan, \"foo\", \"bar\"], -1)\n\n assert (rs[:5] == -1).all()\n assert (rs[6:10] == -1).all()\n assert (rs[20:30] == -1).all()\n assert (pd.isna(ser[:5])).all()\n\n # replace with different values\n rs = ser.replace({np.nan: -1, \"foo\": -2, \"bar\": -3})\n\n assert (rs[:5] == -1).all()\n assert (rs[6:10] == -2).all()\n assert (rs[20:30] == -3).all()\n assert (pd.isna(ser[:5])).all()\n\n # replace with different values with 2 lists\n rs2 = ser.replace([np.nan, \"foo\", \"bar\"], [-1, -2, -3])\n tm.assert_series_equal(rs, rs2)\n\n # replace inplace\n return_value = ser.replace([np.nan, \"foo\", \"bar\"], -1, inplace=True)\n assert return_value is None\n\n assert (ser[:5] == -1).all()\n assert (ser[6:10] == -1).all()\n assert (ser[20:30] == -1).all()\n\n ser = pd.Series([np.nan, 0, np.inf])\n tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))\n\n ser = pd.Series([np.nan, 0, \"foo\", \"bar\", np.inf, None, pd.NaT])\n tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))\n filled = ser.copy()\n filled[4] = 0\n tm.assert_series_equal(ser.replace(np.inf, 0), filled)\n\n ser = pd.Series(datetime_series.index)\n tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))\n\n # malformed\n msg = r\"Replacement lists must match in length\\. Expecting 3 got 2\"\n with pytest.raises(ValueError, match=msg):\n ser.replace([1, 2, 3], [np.nan, 0])\n\n # make sure that we aren't just masking a TypeError because bools don't\n # implement indexing\n with pytest.raises(TypeError, match=\"Cannot compare types .+\"):\n ser.replace([1, 2], [np.nan, 0])\n\n ser = pd.Series([0, 1, 2, 3, 4])\n result = ser.replace([0, 1, 2, 3, 4], [4, 3, 2, 1, 0])\n tm.assert_series_equal(result, pd.Series([4, 3, 2, 1, 0]))\n\n def test_replace_gh5319(self):\n # API change from 0.12?\n # GH 5319\n ser = pd.Series([0, np.nan, 2, 3, 4])\n expected = ser.ffill()\n result = ser.replace([np.nan])\n tm.assert_series_equal(result, expected)\n\n ser = pd.Series([0, np.nan, 2, 3, 4])\n expected = ser.ffill()\n result = ser.replace(np.nan)\n tm.assert_series_equal(result, expected)\n # GH 5797\n ser = pd.Series(pd.date_range(\"20130101\", periods=5))\n expected = ser.copy()\n expected.loc[2] = pd.Timestamp(\"20120101\")\n result = ser.replace({pd.Timestamp(\"20130103\"): pd.Timestamp(\"20120101\")})\n tm.assert_series_equal(result, expected)\n result = ser.replace(pd.Timestamp(\"20130103\"), pd.Timestamp(\"20120101\"))\n tm.assert_series_equal(result, expected)\n\n # GH 11792: Test with replacing NaT in a list with tz data\n ts = pd.Timestamp(\"2015/01/01\", tz=\"UTC\")\n s = pd.Series([pd.NaT, pd.Timestamp(\"2015/01/01\", tz=\"UTC\")])\n result = s.replace([np.nan, pd.NaT], pd.Timestamp.min)\n expected = pd.Series([pd.Timestamp.min, ts], dtype=object)\n tm.assert_series_equal(expected, result)\n\n def test_replace_timedelta_td64(self):\n tdi = pd.timedelta_range(0, periods=5)\n ser = pd.Series(tdi)\n\n # Using a single dict argument means we go through replace_list\n result = ser.replace({ser[1]: ser[3]})\n\n expected = pd.Series([ser[0], ser[3], ser[2], ser[3], ser[4]])\n tm.assert_series_equal(result, expected)\n\n def test_replace_with_single_list(self):\n ser = pd.Series([0, 1, 2, 3, 4])\n result = ser.replace([1, 2, 3])\n tm.assert_series_equal(result, pd.Series([0, 0, 0, 0, 4]))\n\n s = ser.copy()\n return_value = s.replace([1, 2, 3], inplace=True)\n assert return_value is None\n tm.assert_series_equal(s, pd.Series([0, 0, 0, 0, 4]))\n\n # make sure things don't get corrupted when fillna call fails\n s = ser.copy()\n msg = (\n r\"Invalid fill method\\. Expecting pad \\(ffill\\) or backfill \"\n r\"\\(bfill\\)\\. Got crash_cymbal\"\n )\n with pytest.raises(ValueError, match=msg):\n return_value = s.replace([1, 2, 3], inplace=True, method=\"crash_cymbal\")\n assert return_value is None\n tm.assert_series_equal(s, ser)\n\n def test_replace_with_empty_list(self):\n # GH 21977\n s = pd.Series([[1], [2, 3], [], np.nan, [4]])\n expected = s\n result = s.replace([], np.nan)\n tm.assert_series_equal(result, expected)\n\n # GH 19266\n with pytest.raises(ValueError, match=\"cannot assign mismatch\"):\n s.replace({np.nan: []})\n with pytest.raises(ValueError, match=\"cannot assign mismatch\"):\n s.replace({np.nan: [\"dummy\", \"alt\"]})\n\n def test_replace_mixed_types(self):\n s = pd.Series(np.arange(5), dtype=\"int64\")\n\n def check_replace(to_rep, val, expected):\n sc = s.copy()\n r = s.replace(to_rep, val)\n return_value = sc.replace(to_rep, val, inplace=True)\n assert return_value is None\n tm.assert_series_equal(expected, r)\n tm.assert_series_equal(expected, sc)\n\n # MUST upcast to float\n e = pd.Series([0.0, 1.0, 2.0, 3.0, 4.0])\n tr, v = [3], [3.0]\n check_replace(tr, v, e)\n\n # MUST upcast to float\n e = pd.Series([0, 1, 2, 3.5, 4])\n tr, v = [3], [3.5]\n check_replace(tr, v, e)\n\n # casts to object\n e = pd.Series([0, 1, 2, 3.5, \"a\"])\n tr, v = [3, 4], [3.5, \"a\"]\n check_replace(tr, v, e)\n\n # again casts to object\n e = pd.Series([0, 1, 2, 3.5, pd.Timestamp(\"20130101\")])\n tr, v = [3, 4], [3.5, pd.Timestamp(\"20130101\")]\n check_replace(tr, v, e)\n\n # casts to object\n e = pd.Series([0, 1, 2, 3.5, True], dtype=\"object\")\n tr, v = [3, 4], [3.5, True]\n check_replace(tr, v, e)\n\n # test an object with dates + floats + integers + strings\n dr = pd.Series(pd.date_range(\"1/1/2001\", \"1/10/2001\", freq=\"D\"))\n result = dr.astype(object).replace([dr[0], dr[1], dr[2]], [1.0, 2, \"a\"])\n expected = pd.Series([1.0, 2, \"a\"] + dr[3:].tolist(), dtype=object)\n tm.assert_series_equal(result, expected)\n\n def test_replace_bool_with_string_no_op(self):\n s = pd.Series([True, False, True])\n result = s.replace(\"fun\", \"in-the-sun\")\n tm.assert_series_equal(s, result)\n\n def test_replace_bool_with_string(self):\n # nonexistent elements\n s = pd.Series([True, False, True])\n result = s.replace(True, \"2u\")\n expected = pd.Series([\"2u\", False, \"2u\"])\n tm.assert_series_equal(expected, result)\n\n def test_replace_bool_with_bool(self):\n s = pd.Series([True, False, True])\n result = s.replace(True, False)\n expected = pd.Series([False] * len(s))\n tm.assert_series_equal(expected, result)\n\n def test_replace_with_dict_with_bool_keys(self):\n s = pd.Series([True, False, True])\n with pytest.raises(TypeError, match=\"Cannot compare types .+\"):\n s.replace({\"asdf\": \"asdb\", True: \"yes\"})\n\n def test_replace2(self):\n N = 100\n ser = pd.Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N), dtype=object)\n ser[:5] = np.nan\n ser[6:10] = \"foo\"\n ser[20:30] = \"bar\"\n\n # replace list with a single value\n rs = ser.replace([np.nan, \"foo\", \"bar\"], -1)\n\n assert (rs[:5] == -1).all()\n assert (rs[6:10] == -1).all()\n assert (rs[20:30] == -1).all()\n assert (pd.isna(ser[:5])).all()\n\n # replace with different values\n rs = ser.replace({np.nan: -1, \"foo\": -2, \"bar\": -3})\n\n assert (rs[:5] == -1).all()\n assert (rs[6:10] == -2).all()\n assert (rs[20:30] == -3).all()\n assert (pd.isna(ser[:5])).all()\n\n # replace with different values with 2 lists\n rs2 = ser.replace([np.nan, \"foo\", \"bar\"], [-1, -2, -3])\n tm.assert_series_equal(rs, rs2)\n\n # replace inplace\n return_value = ser.replace([np.nan, \"foo\", \"bar\"], -1, inplace=True)\n assert return_value is None\n assert (ser[:5] == -1).all()\n assert (ser[6:10] == -1).all()\n assert (ser[20:30] == -1).all()\n\n def test_replace_with_dictlike_and_string_dtype(self):\n # GH 32621\n s = pd.Series([\"one\", \"two\", np.nan], dtype=\"string\")\n expected = pd.Series([\"1\", \"2\", np.nan])\n result = s.replace({\"one\": \"1\", \"two\": \"2\"})\n tm.assert_series_equal(expected, result)\n\n def test_replace_with_empty_dictlike(self):\n # GH 15289\n s = pd.Series(list(\"abcd\"))\n tm.assert_series_equal(s, s.replace(dict()))\n\n with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):\n empty_series = pd.Series([])\n tm.assert_series_equal(s, s.replace(empty_series))\n\n def test_replace_string_with_number(self):\n # GH 15743\n s = pd.Series([1, 2, 3])\n result = s.replace(\"2\", np.nan)\n expected = pd.Series([1, 2, 3])\n tm.assert_series_equal(expected, result)\n\n def test_replace_replacer_equals_replacement(self):\n # GH 20656\n # make sure all replacers are matching against original values\n s = pd.Series([\"a\", \"b\"])\n expected = pd.Series([\"b\", \"a\"])\n result = s.replace({\"a\": \"b\", \"b\": \"a\"})\n tm.assert_series_equal(expected, result)\n\n def test_replace_unicode_with_number(self):\n # GH 15743\n s = pd.Series([1, 2, 3])\n result = s.replace(\"2\", np.nan)\n expected = pd.Series([1, 2, 3])\n tm.assert_series_equal(expected, result)\n\n def test_replace_mixed_types_with_string(self):\n # Testing mixed\n s = pd.Series([1, 2, 3, \"4\", 4, 5])\n result = s.replace([2, \"4\"], np.nan)\n expected = pd.Series([1, np.nan, 3, np.nan, 4, 5])\n tm.assert_series_equal(expected, result)\n\n @pytest.mark.parametrize(\n \"categorical, numeric\",\n [\n (pd.Categorical(\"A\", categories=[\"A\", \"B\"]), [1]),\n (pd.Categorical((\"A\",), categories=[\"A\", \"B\"]), [1]),\n (pd.Categorical((\"A\", \"B\"), categories=[\"A\", \"B\"]), [1, 2]),\n ],\n )\n def test_replace_categorical(self, categorical, numeric):\n # GH 24971\n # Do not check if dtypes are equal due to a known issue that\n # Categorical.replace sometimes coerces to object (GH 23305)\n s = pd.Series(categorical)\n result = s.replace({\"A\": 1, \"B\": 2})\n expected = pd.Series(numeric)\n tm.assert_series_equal(expected, result)\n\n def test_replace_categorical_single(self):\n # GH 26988\n dti = pd.date_range(\"2016-01-01\", periods=3, tz=\"US/Pacific\")\n s = pd.Series(dti)\n c = s.astype(\"category\")\n\n expected = c.copy()\n expected = expected.cat.add_categories(\"foo\")\n expected[2] = \"foo\"\n expected = expected.cat.remove_unused_categories()\n assert c[2] != \"foo\"\n\n result = c.replace(c[2], \"foo\")\n tm.assert_series_equal(expected, result)\n assert c[2] != \"foo\" # ensure non-inplace call does not alter original\n\n return_value = c.replace(c[2], \"foo\", inplace=True)\n assert return_value is None\n tm.assert_series_equal(expected, c)\n\n first_value = c[0]\n return_value = c.replace(c[1], c[0], inplace=True)\n assert return_value is None\n assert c[0] == c[1] == first_value # test replacing with existing value\n\n def test_replace_with_no_overflowerror(self):\n # GH 25616\n # casts to object without Exception from OverflowError\n s = pd.Series([0, 1, 2, 3, 4])\n result = s.replace([3], [\"100000000000000000000\"])\n expected = pd.Series([0, 1, 2, \"100000000000000000000\", 4])\n tm.assert_series_equal(result, expected)\n\n s = pd.Series([0, \"100000000000000000000\", \"100000000000000000001\"])\n result = s.replace([\"100000000000000000000\"], [1])\n expected = pd.Series([0, 1, \"100000000000000000001\"])\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"ser, to_replace, exp\",\n [\n ([1, 2, 3], {1: 2, 2: 3, 3: 4}, [2, 3, 4]),\n ([\"1\", \"2\", \"3\"], {\"1\": \"2\", \"2\": \"3\", \"3\": \"4\"}, [\"2\", \"3\", \"4\"]),\n ],\n )\n def test_replace_commutative(self, ser, to_replace, exp):\n # GH 16051\n # DataFrame.replace() overwrites when values are non-numeric\n\n series = pd.Series(ser)\n\n expected = pd.Series(exp)\n result = series.replace(to_replace)\n\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"ser, exp\", [([1, 2, 3], [1, True, 3]), ([\"x\", 2, 3], [\"x\", True, 3])]\n )\n def test_replace_no_cast(self, ser, exp):\n # GH 9113\n # BUG: replace int64 dtype with bool coerces to int64\n\n series = pd.Series(ser)\n result = series.replace(2, True)\n expected = pd.Series(exp)\n\n tm.assert_series_equal(result, expected)\n\n def test_replace_invalid_to_replace(self):\n # GH 18634\n # API: replace() should raise an exception if invalid argument is given\n series = pd.Series([\"a\", \"b\", \"c \"])\n msg = (\n r\"Expecting 'to_replace' to be either a scalar, array-like, \"\n r\"dict or None, got invalid type.*\"\n )\n with pytest.raises(TypeError, match=msg):\n series.replace(lambda x: x.strip())\n\n def test_replace_only_one_dictlike_arg(self):\n # GH#33340\n\n ser = pd.Series([1, 2, \"A\", pd.Timestamp.now(), True])\n to_replace = {0: 1, 2: \"A\"}\n value = \"foo\"\n msg = \"Series.replace cannot use dict-like to_replace and non-None value\"\n with pytest.raises(ValueError, match=msg):\n ser.replace(to_replace, value)\n\n to_replace = 1\n value = {0: \"foo\", 2: \"bar\"}\n msg = \"Series.replace cannot use dict-value and non-None to_replace\"\n with pytest.raises(ValueError, match=msg):\n ser.replace(to_replace, value)\n\n def test_replace_extension_other(self):\n # https://github.com/pandas-dev/pandas/issues/34530\n ser = pd.Series(pd.array([1, 2, 3], dtype=\"Int64\"))\n ser.replace(\"\", \"\") # no exception\n\n def test_replace_with_compiled_regex(self):\n # https://github.com/pandas-dev/pandas/issues/35680\n s = pd.Series([\"a\", \"b\", \"c\"])\n regex = re.compile(\"^a$\")\n result = s.replace({regex: \"z\"}, regex=True)\n expected = pd.Series([\"z\", \"b\", \"c\"])\n tm.assert_series_equal(result, expected)\n", "# Copyright 2018-2021 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport signal\nimport sys\nfrom typing import Any, Dict\n\nimport click\nimport tornado.ioloop\nfrom streamlit.git_util import GitRepo, MIN_GIT_VERSION\n\nfrom streamlit import version\nfrom streamlit import config\nfrom streamlit import net_util\nfrom streamlit import url_util\nfrom streamlit import env_util\nfrom streamlit import secrets\nfrom streamlit import util\nfrom streamlit.config import CONFIG_FILENAMES\nfrom streamlit.logger import get_logger\nfrom streamlit.report import Report\nfrom streamlit.secrets import SECRETS_FILE_LOC\nfrom streamlit.server.server import Server, server_address_is_unix_socket\nfrom streamlit.watcher.file_watcher import watch_file\nfrom streamlit.watcher.file_watcher import report_watchdog_availability\n\nLOGGER = get_logger(__name__)\n\n# Wait for 1 second before opening a browser. This gives old tabs a chance to\n# reconnect.\n# This must be >= 2 * WebSocketConnection.ts#RECONNECT_WAIT_TIME_MS.\nBROWSER_WAIT_TIMEOUT_SEC = 1\n\nNEW_VERSION_TEXT = \"\"\"\n %(new_version)s\n\n See what's new at https://discuss.streamlit.io/c/announcements\n\n Enter the following command to upgrade:\n %(prompt)s %(command)s\n\"\"\" % {\n \"new_version\": click.style(\n \"A new version of Streamlit is available.\", fg=\"blue\", bold=True\n ),\n \"prompt\": click.style(\"$\", fg=\"blue\"),\n \"command\": click.style(\"pip install streamlit --upgrade\", bold=True),\n}\n\n\ndef _set_up_signal_handler():\n LOGGER.debug(\"Setting up signal handler\")\n\n def signal_handler(signal_number, stack_frame):\n # The server will shut down its threads and stop the ioloop\n Server.get_current().stop()\n\n signal.signal(signal.SIGTERM, signal_handler)\n signal.signal(signal.SIGINT, signal_handler)\n if sys.platform == \"win32\":\n signal.signal(signal.SIGBREAK, signal_handler)\n else:\n signal.signal(signal.SIGQUIT, signal_handler)\n\n\ndef _fix_sys_path(script_path):\n \"\"\"Add the script's folder to the sys path.\n\n Python normally does this automatically, but since we exec the script\n ourselves we need to do it instead.\n \"\"\"\n sys.path.insert(0, os.path.dirname(script_path))\n\n\ndef _fix_matplotlib_crash():\n \"\"\"Set Matplotlib backend to avoid a crash.\n\n The default Matplotlib backend crashes Python on OSX when run on a thread\n that's not the main thread, so here we set a safer backend as a fix.\n Users can always disable this behavior by setting the config\n runner.fixMatplotlib = false.\n\n This fix is OS-independent. We didn't see a good reason to make this\n Mac-only. Consistency within Streamlit seemed more important.\n \"\"\"\n if config.get_option(\"runner.fixMatplotlib\"):\n try:\n # TODO: a better option may be to set\n # os.environ[\"MPLBACKEND\"] = \"Agg\". We'd need to do this towards\n # the top of __init__.py, before importing anything that imports\n # pandas (which imports matplotlib). Alternately, we could set\n # this environment variable in a new entrypoint defined in\n # setup.py. Both of these introduce additional trickiness: they\n # need to run without consulting streamlit.config.get_option,\n # because this would import streamlit, and therefore matplotlib.\n import matplotlib\n\n matplotlib.use(\"Agg\")\n except ImportError:\n pass\n\n\ndef _fix_tornado_crash():\n \"\"\"Set default asyncio policy to be compatible with Tornado 6.\n\n Tornado 6 (at least) is not compatible with the default\n asyncio implementation on Windows. So here we\n pick the older SelectorEventLoopPolicy when the OS is Windows\n if the known-incompatible default policy is in use.\n\n This has to happen as early as possible to make it a low priority and\n overrideable\n\n See: https://github.com/tornadoweb/tornado/issues/2608\n\n FIXME: if/when tornado supports the defaults in asyncio,\n remove and bump tornado requirement for py38\n \"\"\"\n if env_util.IS_WINDOWS and sys.version_info >= (3, 8):\n import asyncio\n\n try:\n from asyncio import ( # type: ignore[attr-defined]\n WindowsProactorEventLoopPolicy,\n WindowsSelectorEventLoopPolicy,\n )\n except ImportError:\n pass\n # Not affected\n else:\n if type(asyncio.get_event_loop_policy()) is WindowsProactorEventLoopPolicy:\n # WindowsProactorEventLoopPolicy is not compatible with\n # Tornado 6 fallback to the pre-3.8 default of Selector\n asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy())\n\n\ndef _fix_sys_argv(script_path, args):\n \"\"\"sys.argv needs to exclude streamlit arguments and parameters\n and be set to what a user's script may expect.\n \"\"\"\n import sys\n\n sys.argv = [script_path] + list(args)\n\n\ndef _on_server_start(server):\n _maybe_print_old_git_warning(server.script_path)\n _print_url(server.is_running_hello)\n report_watchdog_availability()\n _print_new_version_message()\n\n # Load secrets.toml if it exists. If the file doesn't exist, this\n # function will return without raising an exception. We catch any parse\n # errors and display them here.\n try:\n secrets.load_if_toml_exists()\n except BaseException as e:\n LOGGER.error(f\"Failed to load {SECRETS_FILE_LOC}\", exc_info=e)\n\n def maybe_open_browser():\n if config.get_option(\"server.headless\"):\n # Don't open browser when in headless mode.\n return\n\n if server.browser_is_connected:\n # Don't auto-open browser if there's already a browser connected.\n # This can happen if there's an old tab repeatedly trying to\n # connect, and it happens to success before we launch the browser.\n return\n\n if config.is_manually_set(\"browser.serverAddress\"):\n addr = config.get_option(\"browser.serverAddress\")\n elif config.is_manually_set(\"server.address\"):\n if server_address_is_unix_socket():\n # Don't open browser when server address is an unix socket\n return\n addr = config.get_option(\"server.address\")\n else:\n addr = \"localhost\"\n\n util.open_browser(Report.get_url(addr))\n\n # Schedule the browser to open using the IO Loop on the main thread, but\n # only if no other browser connects within 1s.\n ioloop = tornado.ioloop.IOLoop.current()\n ioloop.call_later(BROWSER_WAIT_TIMEOUT_SEC, maybe_open_browser)\n\n\ndef _fix_pydeck_mapbox_api_warning():\n \"\"\"Sets MAPBOX_API_KEY environment variable needed for PyDeck otherwise it will throw an exception\"\"\"\n\n os.environ[\"MAPBOX_API_KEY\"] = config.get_option(\"mapbox.token\")\n\n\ndef _print_new_version_message():\n if version.should_show_new_version_notice():\n click.secho(NEW_VERSION_TEXT)\n\n\ndef _print_url(is_running_hello):\n if is_running_hello:\n title_message = \"Welcome to Streamlit. Check out our demo in your browser.\"\n else:\n title_message = \"You can now view your Streamlit app in your browser.\"\n\n named_urls = []\n\n if config.is_manually_set(\"browser.serverAddress\"):\n named_urls = [\n (\"URL\", Report.get_url(config.get_option(\"browser.serverAddress\")))\n ]\n\n elif (\n config.is_manually_set(\"server.address\") and not server_address_is_unix_socket()\n ):\n named_urls = [\n (\"URL\", Report.get_url(config.get_option(\"server.address\"))),\n ]\n\n elif config.get_option(\"server.headless\"):\n internal_ip = net_util.get_internal_ip()\n if internal_ip:\n named_urls.append((\"Network URL\", Report.get_url(internal_ip)))\n\n external_ip = net_util.get_external_ip()\n if external_ip:\n named_urls.append((\"External URL\", Report.get_url(external_ip)))\n\n else:\n named_urls = [\n (\"Local URL\", Report.get_url(\"localhost\")),\n ]\n\n internal_ip = net_util.get_internal_ip()\n if internal_ip:\n named_urls.append((\"Network URL\", Report.get_url(internal_ip)))\n\n click.secho(\"\")\n click.secho(\" %s\" % title_message, fg=\"blue\", bold=True)\n click.secho(\"\")\n\n for url_name, url in named_urls:\n url_util.print_url(url_name, url)\n\n click.secho(\"\")\n\n if is_running_hello:\n click.secho(\" Ready to create your own Python apps super quickly?\")\n click.secho(\" Head over to \", nl=False)\n click.secho(\"https://docs.streamlit.io\", bold=True)\n click.secho(\"\")\n click.secho(\" May you create awesome apps!\")\n click.secho(\"\")\n click.secho(\"\")\n\n\ndef _maybe_print_old_git_warning(script_path: str) -> None:\n \"\"\"If our script is running in a Git repo, and we're running a very old\n Git version, print a warning that Git integration will be unavailable.\n \"\"\"\n repo = GitRepo(script_path)\n if (\n not repo.is_valid()\n and repo.git_version is not None\n and repo.git_version < MIN_GIT_VERSION\n ):\n git_version_string = \".\".join(str(val) for val in repo.git_version)\n min_version_string = \".\".join(str(val) for val in MIN_GIT_VERSION)\n click.secho(\"\")\n click.secho(\" Git integration is disabled.\", fg=\"yellow\", bold=True)\n click.secho(\"\")\n click.secho(\n f\" Streamlit requires Git {min_version_string} or later, \"\n f\"but you have {git_version_string}.\",\n fg=\"yellow\",\n )\n click.secho(\n \" Git is used by Streamlit Sharing (https://streamlit.io/sharing).\",\n fg=\"yellow\",\n )\n click.secho(\" To enable this feature, please update Git.\", fg=\"yellow\")\n\n\ndef load_config_options(flag_options: Dict[str, Any]):\n \"\"\"Load config options from config.toml files, then overlay the ones set by\n flag_options.\n\n The \"streamlit run\" command supports passing Streamlit's config options\n as flags. This function reads through the config options set via flag,\n massages them, and passes them to get_config_options() so that they\n overwrite config option defaults and those loaded from config.toml files.\n\n Parameters\n ----------\n flag_options : Dict[str, Any]\n A dict of config options where the keys are the CLI flag version of the\n config option names.\n \"\"\"\n options_from_flags = {\n name.replace(\"_\", \".\"): val\n for name, val in flag_options.items()\n if val is not None\n }\n\n # Force a reparse of config files (if they exist). The result is cached\n # for future calls.\n config.get_config_options(force_reparse=True, options_from_flags=options_from_flags)\n\n\ndef _install_config_watchers(flag_options: Dict[str, Any]):\n def on_config_changed(_path):\n load_config_options(flag_options)\n\n for filename in CONFIG_FILENAMES:\n if os.path.exists(filename):\n watch_file(filename, on_config_changed)\n\n\ndef run(script_path, command_line, args, flag_options):\n \"\"\"Run a script in a separate thread and start a server for the app.\n\n This starts a blocking ioloop.\n\n Parameters\n ----------\n script_path : str\n command_line : str\n args : [str]\n flag_options : Dict[str, Any]\n \"\"\"\n _fix_sys_path(script_path)\n _fix_matplotlib_crash()\n _fix_tornado_crash()\n _fix_sys_argv(script_path, args)\n _fix_pydeck_mapbox_api_warning()\n _install_config_watchers(flag_options)\n\n # Install a signal handler that will shut down the ioloop\n # and close all our threads\n _set_up_signal_handler()\n\n ioloop = tornado.ioloop.IOLoop.current()\n\n # Create and start the server.\n server = Server(ioloop, script_path, command_line)\n server.start(_on_server_start)\n\n # (Must come after start(), because this starts a new thread and start()\n # may call sys.exit() which doesn't kill other threads.\n server.add_preheated_report_session()\n\n # Start the ioloop. This function will not return until the\n # server is shut down.\n ioloop.start()\n", "from datetime import date, datetime, timedelta\n\nimport numpy as np\nimport pytest\nimport pytz\n\nfrom pandas._libs.tslibs import iNaT, period as libperiod\nfrom pandas._libs.tslibs.ccalendar import DAYS, MONTHS\nfrom pandas._libs.tslibs.parsing import DateParseError\nfrom pandas._libs.tslibs.period import INVALID_FREQ_ERR_MSG, IncompatibleFrequency\nfrom pandas._libs.tslibs.timezones import dateutil_gettz, maybe_get_tz\nfrom pandas.compat.numpy import np_datetime64_compat\n\nimport pandas as pd\nfrom pandas import NaT, Period, Timedelta, Timestamp, offsets\nimport pandas._testing as tm\n\n\nclass TestPeriodConstruction:\n def test_construction(self):\n i1 = Period(\"1/1/2005\", freq=\"M\")\n i2 = Period(\"Jan 2005\")\n\n assert i1 == i2\n\n i1 = Period(\"2005\", freq=\"A\")\n i2 = Period(\"2005\")\n i3 = Period(\"2005\", freq=\"a\")\n\n assert i1 == i2\n assert i1 == i3\n\n i4 = Period(\"2005\", freq=\"M\")\n i5 = Period(\"2005\", freq=\"m\")\n\n msg = r\"Input has different freq=M from Period\\(freq=A-DEC\\)\"\n with pytest.raises(IncompatibleFrequency, match=msg):\n i1 != i4\n assert i4 == i5\n\n i1 = Period.now(\"Q\")\n i2 = Period(datetime.now(), freq=\"Q\")\n i3 = Period.now(\"q\")\n\n assert i1 == i2\n assert i1 == i3\n\n i1 = Period(\"1982\", freq=\"min\")\n i2 = Period(\"1982\", freq=\"MIN\")\n assert i1 == i2\n\n i1 = Period(year=2005, month=3, day=1, freq=\"D\")\n i2 = Period(\"3/1/2005\", freq=\"D\")\n assert i1 == i2\n\n i3 = Period(year=2005, month=3, day=1, freq=\"d\")\n assert i1 == i3\n\n i1 = Period(\"2007-01-01 09:00:00.001\")\n expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1000), freq=\"L\")\n assert i1 == expected\n\n expected = Period(np_datetime64_compat(\"2007-01-01 09:00:00.001Z\"), freq=\"L\")\n assert i1 == expected\n\n i1 = Period(\"2007-01-01 09:00:00.00101\")\n expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1010), freq=\"U\")\n assert i1 == expected\n\n expected = Period(np_datetime64_compat(\"2007-01-01 09:00:00.00101Z\"), freq=\"U\")\n assert i1 == expected\n\n msg = \"Must supply freq for ordinal value\"\n with pytest.raises(ValueError, match=msg):\n Period(ordinal=200701)\n\n msg = \"Invalid frequency: X\"\n with pytest.raises(ValueError, match=msg):\n Period(\"2007-1-1\", freq=\"X\")\n\n # GH#34703 tuple freq disallowed\n with pytest.raises(TypeError, match=\"pass as a string instead\"):\n Period(\"1982\", freq=(\"Min\", 1))\n\n def test_construction_bday(self):\n\n # Biz day construction, roll forward if non-weekday\n i1 = Period(\"3/10/12\", freq=\"B\")\n i2 = Period(\"3/10/12\", freq=\"D\")\n assert i1 == i2.asfreq(\"B\")\n i2 = Period(\"3/11/12\", freq=\"D\")\n assert i1 == i2.asfreq(\"B\")\n i2 = Period(\"3/12/12\", freq=\"D\")\n assert i1 == i2.asfreq(\"B\")\n\n i3 = Period(\"3/10/12\", freq=\"b\")\n assert i1 == i3\n\n i1 = Period(year=2012, month=3, day=10, freq=\"B\")\n i2 = Period(\"3/12/12\", freq=\"B\")\n assert i1 == i2\n\n def test_construction_quarter(self):\n\n i1 = Period(year=2005, quarter=1, freq=\"Q\")\n i2 = Period(\"1/1/2005\", freq=\"Q\")\n assert i1 == i2\n\n i1 = Period(year=2005, quarter=3, freq=\"Q\")\n i2 = Period(\"9/1/2005\", freq=\"Q\")\n assert i1 == i2\n\n i1 = Period(\"2005Q1\")\n i2 = Period(year=2005, quarter=1, freq=\"Q\")\n i3 = Period(\"2005q1\")\n assert i1 == i2\n assert i1 == i3\n\n i1 = Period(\"05Q1\")\n assert i1 == i2\n lower = Period(\"05q1\")\n assert i1 == lower\n\n i1 = Period(\"1Q2005\")\n assert i1 == i2\n lower = Period(\"1q2005\")\n assert i1 == lower\n\n i1 = Period(\"1Q05\")\n assert i1 == i2\n lower = Period(\"1q05\")\n assert i1 == lower\n\n i1 = Period(\"4Q1984\")\n assert i1.year == 1984\n lower = Period(\"4q1984\")\n assert i1 == lower\n\n def test_construction_month(self):\n\n expected = Period(\"2007-01\", freq=\"M\")\n i1 = Period(\"200701\", freq=\"M\")\n assert i1 == expected\n\n i1 = Period(\"200701\", freq=\"M\")\n assert i1 == expected\n\n i1 = Period(200701, freq=\"M\")\n assert i1 == expected\n\n i1 = Period(ordinal=200701, freq=\"M\")\n assert i1.year == 18695\n\n i1 = Period(datetime(2007, 1, 1), freq=\"M\")\n i2 = Period(\"200701\", freq=\"M\")\n assert i1 == i2\n\n i1 = Period(date(2007, 1, 1), freq=\"M\")\n i2 = Period(datetime(2007, 1, 1), freq=\"M\")\n i3 = Period(np.datetime64(\"2007-01-01\"), freq=\"M\")\n i4 = Period(np_datetime64_compat(\"2007-01-01 00:00:00Z\"), freq=\"M\")\n i5 = Period(np_datetime64_compat(\"2007-01-01 00:00:00.000Z\"), freq=\"M\")\n assert i1 == i2\n assert i1 == i3\n assert i1 == i4\n assert i1 == i5\n\n def test_period_constructor_offsets(self):\n assert Period(\"1/1/2005\", freq=offsets.MonthEnd()) == Period(\n \"1/1/2005\", freq=\"M\"\n )\n assert Period(\"2005\", freq=offsets.YearEnd()) == Period(\"2005\", freq=\"A\")\n assert Period(\"2005\", freq=offsets.MonthEnd()) == Period(\"2005\", freq=\"M\")\n assert Period(\"3/10/12\", freq=offsets.BusinessDay()) == Period(\n \"3/10/12\", freq=\"B\"\n )\n assert Period(\"3/10/12\", freq=offsets.Day()) == Period(\"3/10/12\", freq=\"D\")\n\n assert Period(\n year=2005, quarter=1, freq=offsets.QuarterEnd(startingMonth=12)\n ) == Period(year=2005, quarter=1, freq=\"Q\")\n assert Period(\n year=2005, quarter=2, freq=offsets.QuarterEnd(startingMonth=12)\n ) == Period(year=2005, quarter=2, freq=\"Q\")\n\n assert Period(year=2005, month=3, day=1, freq=offsets.Day()) == Period(\n year=2005, month=3, day=1, freq=\"D\"\n )\n assert Period(year=2012, month=3, day=10, freq=offsets.BDay()) == Period(\n year=2012, month=3, day=10, freq=\"B\"\n )\n\n expected = Period(\"2005-03-01\", freq=\"3D\")\n assert Period(year=2005, month=3, day=1, freq=offsets.Day(3)) == expected\n assert Period(year=2005, month=3, day=1, freq=\"3D\") == expected\n\n assert Period(year=2012, month=3, day=10, freq=offsets.BDay(3)) == Period(\n year=2012, month=3, day=10, freq=\"3B\"\n )\n\n assert Period(200701, freq=offsets.MonthEnd()) == Period(200701, freq=\"M\")\n\n i1 = Period(ordinal=200701, freq=offsets.MonthEnd())\n i2 = Period(ordinal=200701, freq=\"M\")\n assert i1 == i2\n assert i1.year == 18695\n assert i2.year == 18695\n\n i1 = Period(datetime(2007, 1, 1), freq=\"M\")\n i2 = Period(\"200701\", freq=\"M\")\n assert i1 == i2\n\n i1 = Period(date(2007, 1, 1), freq=\"M\")\n i2 = Period(datetime(2007, 1, 1), freq=\"M\")\n i3 = Period(np.datetime64(\"2007-01-01\"), freq=\"M\")\n i4 = Period(np_datetime64_compat(\"2007-01-01 00:00:00Z\"), freq=\"M\")\n i5 = Period(np_datetime64_compat(\"2007-01-01 00:00:00.000Z\"), freq=\"M\")\n assert i1 == i2\n assert i1 == i3\n assert i1 == i4\n assert i1 == i5\n\n i1 = Period(\"2007-01-01 09:00:00.001\")\n expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1000), freq=\"L\")\n assert i1 == expected\n\n expected = Period(np_datetime64_compat(\"2007-01-01 09:00:00.001Z\"), freq=\"L\")\n assert i1 == expected\n\n i1 = Period(\"2007-01-01 09:00:00.00101\")\n expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1010), freq=\"U\")\n assert i1 == expected\n\n expected = Period(np_datetime64_compat(\"2007-01-01 09:00:00.00101Z\"), freq=\"U\")\n assert i1 == expected\n\n def test_invalid_arguments(self):\n msg = \"Must supply freq for datetime value\"\n with pytest.raises(ValueError, match=msg):\n Period(datetime.now())\n with pytest.raises(ValueError, match=msg):\n Period(datetime.now().date())\n\n msg = \"Value must be Period, string, integer, or datetime\"\n with pytest.raises(ValueError, match=msg):\n Period(1.6, freq=\"D\")\n msg = \"Ordinal must be an integer\"\n with pytest.raises(ValueError, match=msg):\n Period(ordinal=1.6, freq=\"D\")\n msg = \"Only value or ordinal but not both should be given but not both\"\n with pytest.raises(ValueError, match=msg):\n Period(ordinal=2, value=1, freq=\"D\")\n\n msg = \"If value is None, freq cannot be None\"\n with pytest.raises(ValueError, match=msg):\n Period(month=1)\n\n msg = \"Given date string not likely a datetime\"\n with pytest.raises(ValueError, match=msg):\n Period(\"-2000\", \"A\")\n msg = \"day is out of range for month\"\n with pytest.raises(DateParseError, match=msg):\n Period(\"0\", \"A\")\n msg = \"Unknown datetime string format, unable to parse\"\n with pytest.raises(DateParseError, match=msg):\n Period(\"1/1/-2000\", \"A\")\n\n def test_constructor_corner(self):\n expected = Period(\"2007-01\", freq=\"2M\")\n assert Period(year=2007, month=1, freq=\"2M\") == expected\n\n assert Period(None) is NaT\n\n p = Period(\"2007-01-01\", freq=\"D\")\n\n result = Period(p, freq=\"A\")\n exp = Period(\"2007\", freq=\"A\")\n assert result == exp\n\n def test_constructor_infer_freq(self):\n p = Period(\"2007-01-01\")\n assert p.freq == \"D\"\n\n p = Period(\"2007-01-01 07\")\n assert p.freq == \"H\"\n\n p = Period(\"2007-01-01 07:10\")\n assert p.freq == \"T\"\n\n p = Period(\"2007-01-01 07:10:15\")\n assert p.freq == \"S\"\n\n p = Period(\"2007-01-01 07:10:15.123\")\n assert p.freq == \"L\"\n\n p = Period(\"2007-01-01 07:10:15.123000\")\n assert p.freq == \"L\"\n\n p = Period(\"2007-01-01 07:10:15.123400\")\n assert p.freq == \"U\"\n\n def test_multiples(self):\n result1 = Period(\"1989\", freq=\"2A\")\n result2 = Period(\"1989\", freq=\"A\")\n assert result1.ordinal == result2.ordinal\n assert result1.freqstr == \"2A-DEC\"\n assert result2.freqstr == \"A-DEC\"\n assert result1.freq == offsets.YearEnd(2)\n assert result2.freq == offsets.YearEnd()\n\n assert (result1 + 1).ordinal == result1.ordinal + 2\n assert (1 + result1).ordinal == result1.ordinal + 2\n assert (result1 - 1).ordinal == result2.ordinal - 2\n assert (-1 + result1).ordinal == result2.ordinal - 2\n\n @pytest.mark.parametrize(\"month\", MONTHS)\n def test_period_cons_quarterly(self, month):\n # bugs in scikits.timeseries\n freq = f\"Q-{month}\"\n exp = Period(\"1989Q3\", freq=freq)\n assert \"1989Q3\" in str(exp)\n stamp = exp.to_timestamp(\"D\", how=\"end\")\n p = Period(stamp, freq=freq)\n assert p == exp\n\n stamp = exp.to_timestamp(\"3D\", how=\"end\")\n p = Period(stamp, freq=freq)\n assert p == exp\n\n @pytest.mark.parametrize(\"month\", MONTHS)\n def test_period_cons_annual(self, month):\n # bugs in scikits.timeseries\n freq = f\"A-{month}\"\n exp = Period(\"1989\", freq=freq)\n stamp = exp.to_timestamp(\"D\", how=\"end\") + timedelta(days=30)\n p = Period(stamp, freq=freq)\n\n assert p == exp + 1\n assert isinstance(p, Period)\n\n @pytest.mark.parametrize(\"day\", DAYS)\n @pytest.mark.parametrize(\"num\", range(10, 17))\n def test_period_cons_weekly(self, num, day):\n daystr = f\"2011-02-{num}\"\n freq = f\"W-{day}\"\n\n result = Period(daystr, freq=freq)\n expected = Period(daystr, freq=\"D\").asfreq(freq)\n assert result == expected\n assert isinstance(result, Period)\n\n def test_period_from_ordinal(self):\n p = Period(\"2011-01\", freq=\"M\")\n res = Period._from_ordinal(p.ordinal, freq=\"M\")\n assert p == res\n assert isinstance(res, Period)\n\n @pytest.mark.parametrize(\"freq\", [\"A\", \"M\", \"D\", \"H\"])\n def test_construct_from_nat_string_and_freq(self, freq):\n per = Period(\"NaT\", freq=freq)\n assert per is NaT\n\n per = Period(\"NaT\", freq=\"2\" + freq)\n assert per is NaT\n\n per = Period(\"NaT\", freq=\"3\" + freq)\n assert per is NaT\n\n def test_period_cons_nat(self):\n p = Period(\"nat\", freq=\"W-SUN\")\n assert p is NaT\n\n p = Period(iNaT, freq=\"D\")\n assert p is NaT\n\n p = Period(iNaT, freq=\"3D\")\n assert p is NaT\n\n p = Period(iNaT, freq=\"1D1H\")\n assert p is NaT\n\n p = Period(\"NaT\")\n assert p is NaT\n\n p = Period(iNaT)\n assert p is NaT\n\n def test_period_cons_mult(self):\n p1 = Period(\"2011-01\", freq=\"3M\")\n p2 = Period(\"2011-01\", freq=\"M\")\n assert p1.ordinal == p2.ordinal\n\n assert p1.freq == offsets.MonthEnd(3)\n assert p1.freqstr == \"3M\"\n\n assert p2.freq == offsets.MonthEnd()\n assert p2.freqstr == \"M\"\n\n result = p1 + 1\n assert result.ordinal == (p2 + 3).ordinal\n\n assert result.freq == p1.freq\n assert result.freqstr == \"3M\"\n\n result = p1 - 1\n assert result.ordinal == (p2 - 3).ordinal\n assert result.freq == p1.freq\n assert result.freqstr == \"3M\"\n\n msg = \"Frequency must be positive, because it represents span: -3M\"\n with pytest.raises(ValueError, match=msg):\n Period(\"2011-01\", freq=\"-3M\")\n\n msg = \"Frequency must be positive, because it represents span: 0M\"\n with pytest.raises(ValueError, match=msg):\n Period(\"2011-01\", freq=\"0M\")\n\n def test_period_cons_combined(self):\n p = [\n (\n Period(\"2011-01\", freq=\"1D1H\"),\n Period(\"2011-01\", freq=\"1H1D\"),\n Period(\"2011-01\", freq=\"H\"),\n ),\n (\n Period(ordinal=1, freq=\"1D1H\"),\n Period(ordinal=1, freq=\"1H1D\"),\n Period(ordinal=1, freq=\"H\"),\n ),\n ]\n\n for p1, p2, p3 in p:\n assert p1.ordinal == p3.ordinal\n assert p2.ordinal == p3.ordinal\n\n assert p1.freq == offsets.Hour(25)\n assert p1.freqstr == \"25H\"\n\n assert p2.freq == offsets.Hour(25)\n assert p2.freqstr == \"25H\"\n\n assert p3.freq == offsets.Hour()\n assert p3.freqstr == \"H\"\n\n result = p1 + 1\n assert result.ordinal == (p3 + 25).ordinal\n assert result.freq == p1.freq\n assert result.freqstr == \"25H\"\n\n result = p2 + 1\n assert result.ordinal == (p3 + 25).ordinal\n assert result.freq == p2.freq\n assert result.freqstr == \"25H\"\n\n result = p1 - 1\n assert result.ordinal == (p3 - 25).ordinal\n assert result.freq == p1.freq\n assert result.freqstr == \"25H\"\n\n result = p2 - 1\n assert result.ordinal == (p3 - 25).ordinal\n assert result.freq == p2.freq\n assert result.freqstr == \"25H\"\n\n msg = \"Frequency must be positive, because it represents span: -25H\"\n with pytest.raises(ValueError, match=msg):\n Period(\"2011-01\", freq=\"-1D1H\")\n with pytest.raises(ValueError, match=msg):\n Period(\"2011-01\", freq=\"-1H1D\")\n with pytest.raises(ValueError, match=msg):\n Period(ordinal=1, freq=\"-1D1H\")\n with pytest.raises(ValueError, match=msg):\n Period(ordinal=1, freq=\"-1H1D\")\n\n msg = \"Frequency must be positive, because it represents span: 0D\"\n with pytest.raises(ValueError, match=msg):\n Period(\"2011-01\", freq=\"0D0H\")\n with pytest.raises(ValueError, match=msg):\n Period(ordinal=1, freq=\"0D0H\")\n\n # You can only combine together day and intraday offsets\n msg = \"Invalid frequency: 1W1D\"\n with pytest.raises(ValueError, match=msg):\n Period(\"2011-01\", freq=\"1W1D\")\n msg = \"Invalid frequency: 1D1W\"\n with pytest.raises(ValueError, match=msg):\n Period(\"2011-01\", freq=\"1D1W\")\n\n @pytest.mark.parametrize(\"hour\", range(24))\n def test_period_large_ordinal(self, hour):\n # Issue #36430\n # Integer overflow for Period over the maximum timestamp\n p = pd.Period(ordinal=2562048 + hour, freq=\"1H\")\n assert p.hour == hour\n\n\nclass TestPeriodMethods:\n def test_round_trip(self):\n p = Period(\"2000Q1\")\n new_p = tm.round_trip_pickle(p)\n assert new_p == p\n\n def test_hash(self):\n assert hash(Period(\"2011-01\", freq=\"M\")) == hash(Period(\"2011-01\", freq=\"M\"))\n\n assert hash(Period(\"2011-01-01\", freq=\"D\")) != hash(Period(\"2011-01\", freq=\"M\"))\n\n assert hash(Period(\"2011-01\", freq=\"3M\")) != hash(Period(\"2011-01\", freq=\"2M\"))\n\n assert hash(Period(\"2011-01\", freq=\"M\")) != hash(Period(\"2011-02\", freq=\"M\"))\n\n # --------------------------------------------------------------\n # to_timestamp\n\n @pytest.mark.parametrize(\"tzstr\", [\"Europe/Brussels\", \"Asia/Tokyo\", \"US/Pacific\"])\n def test_to_timestamp_tz_arg(self, tzstr):\n # GH#34522 tz kwarg deprecated\n with tm.assert_produces_warning(FutureWarning):\n p = Period(\"1/1/2005\", freq=\"M\").to_timestamp(tz=tzstr)\n exp = Timestamp(\"1/1/2005\", tz=\"UTC\").tz_convert(tzstr)\n exp_zone = pytz.timezone(tzstr).normalize(p)\n\n assert p == exp\n assert p.tz == exp_zone.tzinfo\n assert p.tz == exp.tz\n\n with tm.assert_produces_warning(FutureWarning):\n p = Period(\"1/1/2005\", freq=\"3H\").to_timestamp(tz=tzstr)\n exp = Timestamp(\"1/1/2005\", tz=\"UTC\").tz_convert(tzstr)\n exp_zone = pytz.timezone(tzstr).normalize(p)\n\n assert p == exp\n assert p.tz == exp_zone.tzinfo\n assert p.tz == exp.tz\n\n with tm.assert_produces_warning(FutureWarning):\n p = Period(\"1/1/2005\", freq=\"A\").to_timestamp(freq=\"A\", tz=tzstr)\n exp = Timestamp(\"31/12/2005\", tz=\"UTC\").tz_convert(tzstr)\n exp_zone = pytz.timezone(tzstr).normalize(p)\n\n assert p == exp\n assert p.tz == exp_zone.tzinfo\n assert p.tz == exp.tz\n\n with tm.assert_produces_warning(FutureWarning):\n p = Period(\"1/1/2005\", freq=\"A\").to_timestamp(freq=\"3H\", tz=tzstr)\n exp = Timestamp(\"1/1/2005\", tz=\"UTC\").tz_convert(tzstr)\n exp_zone = pytz.timezone(tzstr).normalize(p)\n\n assert p == exp\n assert p.tz == exp_zone.tzinfo\n assert p.tz == exp.tz\n\n @pytest.mark.parametrize(\n \"tzstr\",\n [\"dateutil/Europe/Brussels\", \"dateutil/Asia/Tokyo\", \"dateutil/US/Pacific\"],\n )\n def test_to_timestamp_tz_arg_dateutil(self, tzstr):\n tz = maybe_get_tz(tzstr)\n with tm.assert_produces_warning(FutureWarning):\n p = Period(\"1/1/2005\", freq=\"M\").to_timestamp(tz=tz)\n exp = Timestamp(\"1/1/2005\", tz=\"UTC\").tz_convert(tzstr)\n assert p == exp\n assert p.tz == dateutil_gettz(tzstr.split(\"/\", 1)[1])\n assert p.tz == exp.tz\n\n with tm.assert_produces_warning(FutureWarning):\n p = Period(\"1/1/2005\", freq=\"M\").to_timestamp(freq=\"3H\", tz=tz)\n exp = Timestamp(\"1/1/2005\", tz=\"UTC\").tz_convert(tzstr)\n assert p == exp\n assert p.tz == dateutil_gettz(tzstr.split(\"/\", 1)[1])\n assert p.tz == exp.tz\n\n def test_to_timestamp_tz_arg_dateutil_from_string(self):\n with tm.assert_produces_warning(FutureWarning):\n p = Period(\"1/1/2005\", freq=\"M\").to_timestamp(tz=\"dateutil/Europe/Brussels\")\n assert p.tz == dateutil_gettz(\"Europe/Brussels\")\n\n def test_to_timestamp_mult(self):\n p = Period(\"2011-01\", freq=\"M\")\n assert p.to_timestamp(how=\"S\") == Timestamp(\"2011-01-01\")\n expected = Timestamp(\"2011-02-01\") - Timedelta(1, \"ns\")\n assert p.to_timestamp(how=\"E\") == expected\n\n p = Period(\"2011-01\", freq=\"3M\")\n assert p.to_timestamp(how=\"S\") == Timestamp(\"2011-01-01\")\n expected = Timestamp(\"2011-04-01\") - Timedelta(1, \"ns\")\n assert p.to_timestamp(how=\"E\") == expected\n\n def test_to_timestamp(self):\n p = Period(\"1982\", freq=\"A\")\n start_ts = p.to_timestamp(how=\"S\")\n aliases = [\"s\", \"StarT\", \"BEGIn\"]\n for a in aliases:\n assert start_ts == p.to_timestamp(\"D\", how=a)\n # freq with mult should not affect to the result\n assert start_ts == p.to_timestamp(\"3D\", how=a)\n\n end_ts = p.to_timestamp(how=\"E\")\n aliases = [\"e\", \"end\", \"FINIsH\"]\n for a in aliases:\n assert end_ts == p.to_timestamp(\"D\", how=a)\n assert end_ts == p.to_timestamp(\"3D\", how=a)\n\n from_lst = [\"A\", \"Q\", \"M\", \"W\", \"B\", \"D\", \"H\", \"Min\", \"S\"]\n\n def _ex(p):\n if p.freq == \"B\":\n return p.start_time + Timedelta(days=1, nanoseconds=-1)\n return Timestamp((p + p.freq).start_time.value - 1)\n\n for i, fcode in enumerate(from_lst):\n p = Period(\"1982\", freq=fcode)\n result = p.to_timestamp().to_period(fcode)\n assert result == p\n\n assert p.start_time == p.to_timestamp(how=\"S\")\n\n assert p.end_time == _ex(p)\n\n # Frequency other than daily\n\n p = Period(\"1985\", freq=\"A\")\n\n result = p.to_timestamp(\"H\", how=\"end\")\n expected = Timestamp(1986, 1, 1) - Timedelta(1, \"ns\")\n assert result == expected\n result = p.to_timestamp(\"3H\", how=\"end\")\n assert result == expected\n\n result = p.to_timestamp(\"T\", how=\"end\")\n expected = Timestamp(1986, 1, 1) - Timedelta(1, \"ns\")\n assert result == expected\n result = p.to_timestamp(\"2T\", how=\"end\")\n assert result == expected\n\n result = p.to_timestamp(how=\"end\")\n expected = Timestamp(1986, 1, 1) - Timedelta(1, \"ns\")\n assert result == expected\n\n expected = datetime(1985, 1, 1)\n result = p.to_timestamp(\"H\", how=\"start\")\n assert result == expected\n result = p.to_timestamp(\"T\", how=\"start\")\n assert result == expected\n result = p.to_timestamp(\"S\", how=\"start\")\n assert result == expected\n result = p.to_timestamp(\"3H\", how=\"start\")\n assert result == expected\n result = p.to_timestamp(\"5S\", how=\"start\")\n assert result == expected\n\n def test_to_timestamp_business_end(self):\n per = pd.Period(\"1990-01-05\", \"B\") # Friday\n result = per.to_timestamp(\"B\", how=\"E\")\n\n expected = pd.Timestamp(\"1990-01-06\") - pd.Timedelta(nanoseconds=1)\n assert result == expected\n\n @pytest.mark.parametrize(\n \"ts, expected\",\n [\n (\"1970-01-01 00:00:00\", 0),\n (\"1970-01-01 00:00:00.000001\", 1),\n (\"1970-01-01 00:00:00.00001\", 10),\n (\"1970-01-01 00:00:00.499\", 499000),\n (\"1999-12-31 23:59:59.999\", 999000),\n (\"1999-12-31 23:59:59.999999\", 999999),\n (\"2050-12-31 23:59:59.5\", 500000),\n (\"2050-12-31 23:59:59.500001\", 500001),\n (\"2050-12-31 23:59:59.123456\", 123456),\n ],\n )\n @pytest.mark.parametrize(\"freq\", [None, \"us\", \"ns\"])\n def test_to_timestamp_microsecond(self, ts, expected, freq):\n # GH 24444\n result = Period(ts).to_timestamp(freq=freq).microsecond\n assert result == expected\n\n # --------------------------------------------------------------\n # Rendering: __repr__, strftime, etc\n\n def test_repr(self):\n p = Period(\"Jan-2000\")\n assert \"2000-01\" in repr(p)\n\n p = Period(\"2000-12-15\")\n assert \"2000-12-15\" in repr(p)\n\n def test_repr_nat(self):\n p = Period(\"nat\", freq=\"M\")\n assert repr(NaT) in repr(p)\n\n def test_millisecond_repr(self):\n p = Period(\"2000-01-01 12:15:02.123\")\n\n assert repr(p) == \"Period('2000-01-01 12:15:02.123', 'L')\"\n\n def test_microsecond_repr(self):\n p = Period(\"2000-01-01 12:15:02.123567\")\n\n assert repr(p) == \"Period('2000-01-01 12:15:02.123567', 'U')\"\n\n def test_strftime(self):\n # GH#3363\n p = Period(\"2000-1-1 12:34:12\", freq=\"S\")\n res = p.strftime(\"%Y-%m-%d %H:%M:%S\")\n assert res == \"2000-01-01 12:34:12\"\n assert isinstance(res, str)\n\n\nclass TestPeriodProperties:\n \"\"\"Test properties such as year, month, weekday, etc....\"\"\"\n\n @pytest.mark.parametrize(\"freq\", [\"A\", \"M\", \"D\", \"H\"])\n def test_is_leap_year(self, freq):\n # GH 13727\n p = Period(\"2000-01-01 00:00:00\", freq=freq)\n assert p.is_leap_year\n assert isinstance(p.is_leap_year, bool)\n\n p = Period(\"1999-01-01 00:00:00\", freq=freq)\n assert not p.is_leap_year\n\n p = Period(\"2004-01-01 00:00:00\", freq=freq)\n assert p.is_leap_year\n\n p = Period(\"2100-01-01 00:00:00\", freq=freq)\n assert not p.is_leap_year\n\n def test_quarterly_negative_ordinals(self):\n p = Period(ordinal=-1, freq=\"Q-DEC\")\n assert p.year == 1969\n assert p.quarter == 4\n assert isinstance(p, Period)\n\n p = Period(ordinal=-2, freq=\"Q-DEC\")\n assert p.year == 1969\n assert p.quarter == 3\n assert isinstance(p, Period)\n\n p = Period(ordinal=-2, freq=\"M\")\n assert p.year == 1969\n assert p.month == 11\n assert isinstance(p, Period)\n\n def test_freq_str(self):\n i1 = Period(\"1982\", freq=\"Min\")\n assert i1.freq == offsets.Minute()\n assert i1.freqstr == \"T\"\n\n def test_period_deprecated_freq(self):\n cases = {\n \"M\": [\"MTH\", \"MONTH\", \"MONTHLY\", \"Mth\", \"month\", \"monthly\"],\n \"B\": [\"BUS\", \"BUSINESS\", \"BUSINESSLY\", \"WEEKDAY\", \"bus\"],\n \"D\": [\"DAY\", \"DLY\", \"DAILY\", \"Day\", \"Dly\", \"Daily\"],\n \"H\": [\"HR\", \"HOUR\", \"HRLY\", \"HOURLY\", \"hr\", \"Hour\", \"HRly\"],\n \"T\": [\"minute\", \"MINUTE\", \"MINUTELY\", \"minutely\"],\n \"S\": [\"sec\", \"SEC\", \"SECOND\", \"SECONDLY\", \"second\"],\n \"L\": [\"MILLISECOND\", \"MILLISECONDLY\", \"millisecond\"],\n \"U\": [\"MICROSECOND\", \"MICROSECONDLY\", \"microsecond\"],\n \"N\": [\"NANOSECOND\", \"NANOSECONDLY\", \"nanosecond\"],\n }\n\n msg = INVALID_FREQ_ERR_MSG\n for exp, freqs in cases.items():\n for freq in freqs:\n with pytest.raises(ValueError, match=msg):\n Period(\"2016-03-01 09:00\", freq=freq)\n with pytest.raises(ValueError, match=msg):\n Period(ordinal=1, freq=freq)\n\n # check supported freq-aliases still works\n p1 = Period(\"2016-03-01 09:00\", freq=exp)\n p2 = Period(ordinal=1, freq=exp)\n assert isinstance(p1, Period)\n assert isinstance(p2, Period)\n\n def test_start_time(self):\n freq_lst = [\"A\", \"Q\", \"M\", \"D\", \"H\", \"T\", \"S\"]\n xp = datetime(2012, 1, 1)\n for f in freq_lst:\n p = Period(\"2012\", freq=f)\n assert p.start_time == xp\n assert Period(\"2012\", freq=\"B\").start_time == datetime(2012, 1, 2)\n assert Period(\"2012\", freq=\"W\").start_time == datetime(2011, 12, 26)\n\n def test_end_time(self):\n p = Period(\"2012\", freq=\"A\")\n\n def _ex(*args):\n return Timestamp(Timestamp(datetime(*args)).value - 1)\n\n xp = _ex(2013, 1, 1)\n assert xp == p.end_time\n\n p = Period(\"2012\", freq=\"Q\")\n xp = _ex(2012, 4, 1)\n assert xp == p.end_time\n\n p = Period(\"2012\", freq=\"M\")\n xp = _ex(2012, 2, 1)\n assert xp == p.end_time\n\n p = Period(\"2012\", freq=\"D\")\n xp = _ex(2012, 1, 2)\n assert xp == p.end_time\n\n p = Period(\"2012\", freq=\"H\")\n xp = _ex(2012, 1, 1, 1)\n assert xp == p.end_time\n\n p = Period(\"2012\", freq=\"B\")\n xp = _ex(2012, 1, 3)\n assert xp == p.end_time\n\n p = Period(\"2012\", freq=\"W\")\n xp = _ex(2012, 1, 2)\n assert xp == p.end_time\n\n # Test for GH 11738\n p = Period(\"2012\", freq=\"15D\")\n xp = _ex(2012, 1, 16)\n assert xp == p.end_time\n\n p = Period(\"2012\", freq=\"1D1H\")\n xp = _ex(2012, 1, 2, 1)\n assert xp == p.end_time\n\n p = Period(\"2012\", freq=\"1H1D\")\n xp = _ex(2012, 1, 2, 1)\n assert xp == p.end_time\n\n def test_end_time_business_friday(self):\n # GH#34449\n per = Period(\"1990-01-05\", \"B\")\n result = per.end_time\n\n expected = pd.Timestamp(\"1990-01-06\") - pd.Timedelta(nanoseconds=1)\n assert result == expected\n\n def test_anchor_week_end_time(self):\n def _ex(*args):\n return Timestamp(Timestamp(datetime(*args)).value - 1)\n\n p = Period(\"2013-1-1\", \"W-SAT\")\n xp = _ex(2013, 1, 6)\n assert p.end_time == xp\n\n def test_properties_annually(self):\n # Test properties on Periods with annually frequency.\n a_date = Period(freq=\"A\", year=2007)\n assert a_date.year == 2007\n\n def test_properties_quarterly(self):\n # Test properties on Periods with daily frequency.\n qedec_date = Period(freq=\"Q-DEC\", year=2007, quarter=1)\n qejan_date = Period(freq=\"Q-JAN\", year=2007, quarter=1)\n qejun_date = Period(freq=\"Q-JUN\", year=2007, quarter=1)\n #\n for x in range(3):\n for qd in (qedec_date, qejan_date, qejun_date):\n assert (qd + x).qyear == 2007\n assert (qd + x).quarter == x + 1\n\n def test_properties_monthly(self):\n # Test properties on Periods with daily frequency.\n m_date = Period(freq=\"M\", year=2007, month=1)\n for x in range(11):\n m_ival_x = m_date + x\n assert m_ival_x.year == 2007\n if 1 <= x + 1 <= 3:\n assert m_ival_x.quarter == 1\n elif 4 <= x + 1 <= 6:\n assert m_ival_x.quarter == 2\n elif 7 <= x + 1 <= 9:\n assert m_ival_x.quarter == 3\n elif 10 <= x + 1 <= 12:\n assert m_ival_x.quarter == 4\n assert m_ival_x.month == x + 1\n\n def test_properties_weekly(self):\n # Test properties on Periods with daily frequency.\n w_date = Period(freq=\"W\", year=2007, month=1, day=7)\n #\n assert w_date.year == 2007\n assert w_date.quarter == 1\n assert w_date.month == 1\n assert w_date.week == 1\n assert (w_date - 1).week == 52\n assert w_date.days_in_month == 31\n assert Period(freq=\"W\", year=2012, month=2, day=1).days_in_month == 29\n\n def test_properties_weekly_legacy(self):\n # Test properties on Periods with daily frequency.\n w_date = Period(freq=\"W\", year=2007, month=1, day=7)\n assert w_date.year == 2007\n assert w_date.quarter == 1\n assert w_date.month == 1\n assert w_date.week == 1\n assert (w_date - 1).week == 52\n assert w_date.days_in_month == 31\n\n exp = Period(freq=\"W\", year=2012, month=2, day=1)\n assert exp.days_in_month == 29\n\n msg = INVALID_FREQ_ERR_MSG\n with pytest.raises(ValueError, match=msg):\n Period(freq=\"WK\", year=2007, month=1, day=7)\n\n def test_properties_daily(self):\n # Test properties on Periods with daily frequency.\n b_date = Period(freq=\"B\", year=2007, month=1, day=1)\n #\n assert b_date.year == 2007\n assert b_date.quarter == 1\n assert b_date.month == 1\n assert b_date.day == 1\n assert b_date.weekday == 0\n assert b_date.dayofyear == 1\n assert b_date.days_in_month == 31\n assert Period(freq=\"B\", year=2012, month=2, day=1).days_in_month == 29\n\n d_date = Period(freq=\"D\", year=2007, month=1, day=1)\n\n assert d_date.year == 2007\n assert d_date.quarter == 1\n assert d_date.month == 1\n assert d_date.day == 1\n assert d_date.weekday == 0\n assert d_date.dayofyear == 1\n assert d_date.days_in_month == 31\n assert Period(freq=\"D\", year=2012, month=2, day=1).days_in_month == 29\n\n def test_properties_hourly(self):\n # Test properties on Periods with hourly frequency.\n h_date1 = Period(freq=\"H\", year=2007, month=1, day=1, hour=0)\n h_date2 = Period(freq=\"2H\", year=2007, month=1, day=1, hour=0)\n\n for h_date in [h_date1, h_date2]:\n assert h_date.year == 2007\n assert h_date.quarter == 1\n assert h_date.month == 1\n assert h_date.day == 1\n assert h_date.weekday == 0\n assert h_date.dayofyear == 1\n assert h_date.hour == 0\n assert h_date.days_in_month == 31\n assert (\n Period(freq=\"H\", year=2012, month=2, day=1, hour=0).days_in_month == 29\n )\n\n def test_properties_minutely(self):\n # Test properties on Periods with minutely frequency.\n t_date = Period(freq=\"Min\", year=2007, month=1, day=1, hour=0, minute=0)\n #\n assert t_date.quarter == 1\n assert t_date.month == 1\n assert t_date.day == 1\n assert t_date.weekday == 0\n assert t_date.dayofyear == 1\n assert t_date.hour == 0\n assert t_date.minute == 0\n assert t_date.days_in_month == 31\n assert (\n Period(freq=\"D\", year=2012, month=2, day=1, hour=0, minute=0).days_in_month\n == 29\n )\n\n def test_properties_secondly(self):\n # Test properties on Periods with secondly frequency.\n s_date = Period(\n freq=\"Min\", year=2007, month=1, day=1, hour=0, minute=0, second=0\n )\n #\n assert s_date.year == 2007\n assert s_date.quarter == 1\n assert s_date.month == 1\n assert s_date.day == 1\n assert s_date.weekday == 0\n assert s_date.dayofyear == 1\n assert s_date.hour == 0\n assert s_date.minute == 0\n assert s_date.second == 0\n assert s_date.days_in_month == 31\n assert (\n Period(\n freq=\"Min\", year=2012, month=2, day=1, hour=0, minute=0, second=0\n ).days_in_month\n == 29\n )\n\n\nclass TestPeriodField:\n def test_get_period_field_array_raises_on_out_of_range(self):\n msg = \"Buffer dtype mismatch, expected 'const int64_t' but got 'double'\"\n with pytest.raises(ValueError, match=msg):\n libperiod.get_period_field_arr(-1, np.empty(1), 0)\n\n\nclass TestPeriodComparisons:\n def test_comparison_same_period_different_object(self):\n # Separate Period objects for the same period\n left = Period(\"2000-01\", \"M\")\n right = Period(\"2000-01\", \"M\")\n\n assert left == right\n assert left >= right\n assert left <= right\n assert not left < right\n assert not left > right\n\n def test_comparison_same_freq(self):\n jan = Period(\"2000-01\", \"M\")\n feb = Period(\"2000-02\", \"M\")\n\n assert not jan == feb\n assert jan != feb\n assert jan < feb\n assert jan <= feb\n assert not jan > feb\n assert not jan >= feb\n\n def test_comparison_mismatched_freq(self):\n jan = Period(\"2000-01\", \"M\")\n day = Period(\"2012-01-01\", \"D\")\n\n msg = r\"Input has different freq=D from Period\\(freq=M\\)\"\n with pytest.raises(IncompatibleFrequency, match=msg):\n jan == day\n with pytest.raises(IncompatibleFrequency, match=msg):\n jan != day\n with pytest.raises(IncompatibleFrequency, match=msg):\n jan < day\n with pytest.raises(IncompatibleFrequency, match=msg):\n jan <= day\n with pytest.raises(IncompatibleFrequency, match=msg):\n jan > day\n with pytest.raises(IncompatibleFrequency, match=msg):\n jan >= day\n\n def test_comparison_invalid_type(self):\n jan = Period(\"2000-01\", \"M\")\n\n assert not jan == 1\n assert jan != 1\n\n int_or_per = \"'(Period|int)'\"\n msg = f\"not supported between instances of {int_or_per} and {int_or_per}\"\n for left, right in [(jan, 1), (1, jan)]:\n\n with pytest.raises(TypeError, match=msg):\n left > right\n with pytest.raises(TypeError, match=msg):\n left >= right\n with pytest.raises(TypeError, match=msg):\n left < right\n with pytest.raises(TypeError, match=msg):\n left <= right\n\n def test_sort_periods(self):\n jan = Period(\"2000-01\", \"M\")\n feb = Period(\"2000-02\", \"M\")\n mar = Period(\"2000-03\", \"M\")\n periods = [mar, jan, feb]\n correctPeriods = [jan, feb, mar]\n assert sorted(periods) == correctPeriods\n\n def test_period_cmp_nat(self):\n p = Period(\"2011-01-01\", freq=\"D\")\n\n t = Timestamp(\"2011-01-01\")\n # confirm Period('NaT') work identical with Timestamp('NaT')\n for left, right in [\n (NaT, p),\n (p, NaT),\n (NaT, t),\n (t, NaT),\n ]:\n assert not left < right\n assert not left > right\n assert not left == right\n assert left != right\n assert not left <= right\n assert not left >= right\n\n\nclass TestArithmetic:\n def test_sub_delta(self):\n left, right = Period(\"2011\", freq=\"A\"), Period(\"2007\", freq=\"A\")\n result = left - right\n assert result == 4 * right.freq\n\n msg = r\"Input has different freq=M from Period\\(freq=A-DEC\\)\"\n with pytest.raises(IncompatibleFrequency, match=msg):\n left - Period(\"2007-01\", freq=\"M\")\n\n def test_add_integer(self):\n per1 = Period(freq=\"D\", year=2008, month=1, day=1)\n per2 = Period(freq=\"D\", year=2008, month=1, day=2)\n assert per1 + 1 == per2\n assert 1 + per1 == per2\n\n def test_add_sub_nat(self):\n # GH#13071\n p = Period(\"2011-01\", freq=\"M\")\n assert p + NaT is NaT\n assert NaT + p is NaT\n assert p - NaT is NaT\n assert NaT - p is NaT\n\n def test_add_invalid(self):\n # GH#4731\n per1 = Period(freq=\"D\", year=2008, month=1, day=1)\n per2 = Period(freq=\"D\", year=2008, month=1, day=2)\n\n msg = \"|\".join(\n [\n r\"unsupported operand type\\(s\\)\",\n \"can only concatenate str\",\n \"must be str, not Period\",\n ]\n )\n with pytest.raises(TypeError, match=msg):\n per1 + \"str\"\n with pytest.raises(TypeError, match=msg):\n \"str\" + per1\n with pytest.raises(TypeError, match=msg):\n per1 + per2\n\n boxes = [lambda x: x, lambda x: pd.Series([x]), lambda x: pd.Index([x])]\n ids = [\"identity\", \"Series\", \"Index\"]\n\n @pytest.mark.parametrize(\"lbox\", boxes, ids=ids)\n @pytest.mark.parametrize(\"rbox\", boxes, ids=ids)\n def test_add_timestamp_raises(self, rbox, lbox):\n # GH#17983\n ts = Timestamp(\"2017\")\n per = Period(\"2017\", freq=\"M\")\n\n # We may get a different message depending on which class raises\n # the error.\n msg = \"|\".join(\n [\n \"cannot add\",\n \"unsupported operand\",\n \"can only operate on a\",\n \"incompatible type\",\n \"ufunc add cannot use operands\",\n ]\n )\n with pytest.raises(TypeError, match=msg):\n lbox(ts) + rbox(per)\n\n with pytest.raises(TypeError, match=msg):\n lbox(per) + rbox(ts)\n\n with pytest.raises(TypeError, match=msg):\n lbox(per) + rbox(per)\n\n def test_sub(self):\n per1 = Period(\"2011-01-01\", freq=\"D\")\n per2 = Period(\"2011-01-15\", freq=\"D\")\n\n off = per1.freq\n assert per1 - per2 == -14 * off\n assert per2 - per1 == 14 * off\n\n msg = r\"Input has different freq=M from Period\\(freq=D\\)\"\n with pytest.raises(IncompatibleFrequency, match=msg):\n per1 - Period(\"2011-02\", freq=\"M\")\n\n @pytest.mark.parametrize(\"n\", [1, 2, 3, 4])\n def test_sub_n_gt_1_ticks(self, tick_classes, n):\n # GH 23878\n p1 = Period(\"19910905\", freq=tick_classes(n))\n p2 = Period(\"19920406\", freq=tick_classes(n))\n\n expected = Period(str(p2), freq=p2.freq.base) - Period(\n str(p1), freq=p1.freq.base\n )\n\n assert (p2 - p1) == expected\n\n @pytest.mark.parametrize(\"normalize\", [True, False])\n @pytest.mark.parametrize(\"n\", [1, 2, 3, 4])\n @pytest.mark.parametrize(\n \"offset, kwd_name\",\n [\n (offsets.YearEnd, \"month\"),\n (offsets.QuarterEnd, \"startingMonth\"),\n (offsets.MonthEnd, None),\n (offsets.Week, \"weekday\"),\n ],\n )\n def test_sub_n_gt_1_offsets(self, offset, kwd_name, n, normalize):\n # GH 23878\n kwds = {kwd_name: 3} if kwd_name is not None else {}\n p1_d = \"19910905\"\n p2_d = \"19920406\"\n p1 = Period(p1_d, freq=offset(n, normalize, **kwds))\n p2 = Period(p2_d, freq=offset(n, normalize, **kwds))\n\n expected = Period(p2_d, freq=p2.freq.base) - Period(p1_d, freq=p1.freq.base)\n\n assert (p2 - p1) == expected\n\n def test_add_offset(self):\n # freq is DateOffset\n for freq in [\"A\", \"2A\", \"3A\"]:\n p = Period(\"2011\", freq=freq)\n exp = Period(\"2013\", freq=freq)\n assert p + offsets.YearEnd(2) == exp\n assert offsets.YearEnd(2) + p == exp\n\n for o in [\n offsets.YearBegin(2),\n offsets.MonthBegin(1),\n offsets.Minute(),\n np.timedelta64(365, \"D\"),\n timedelta(365),\n ]:\n msg = \"Input has different freq|Input cannot be converted to Period\"\n with pytest.raises(IncompatibleFrequency, match=msg):\n p + o\n\n if isinstance(o, np.timedelta64):\n msg = \"cannot use operands with types\"\n with pytest.raises(TypeError, match=msg):\n o + p\n else:\n msg = \"|\".join(\n [\n \"Input has different freq\",\n \"Input cannot be converted to Period\",\n ]\n )\n with pytest.raises(IncompatibleFrequency, match=msg):\n o + p\n\n for freq in [\"M\", \"2M\", \"3M\"]:\n p = Period(\"2011-03\", freq=freq)\n exp = Period(\"2011-05\", freq=freq)\n assert p + offsets.MonthEnd(2) == exp\n assert offsets.MonthEnd(2) + p == exp\n\n exp = Period(\"2012-03\", freq=freq)\n assert p + offsets.MonthEnd(12) == exp\n assert offsets.MonthEnd(12) + p == exp\n\n for o in [\n offsets.YearBegin(2),\n offsets.MonthBegin(1),\n offsets.Minute(),\n np.timedelta64(365, \"D\"),\n timedelta(365),\n ]:\n msg = \"Input has different freq|Input cannot be converted to Period\"\n with pytest.raises(IncompatibleFrequency, match=msg):\n p + o\n\n if isinstance(o, np.timedelta64):\n msg = \"cannot use operands with types\"\n with pytest.raises(TypeError, match=msg):\n o + p\n else:\n msg = \"|\".join(\n [\n \"Input has different freq\",\n \"Input cannot be converted to Period\",\n ]\n )\n with pytest.raises(IncompatibleFrequency, match=msg):\n o + p\n\n # freq is Tick\n for freq in [\"D\", \"2D\", \"3D\"]:\n p = Period(\"2011-04-01\", freq=freq)\n\n exp = Period(\"2011-04-06\", freq=freq)\n assert p + offsets.Day(5) == exp\n assert offsets.Day(5) + p == exp\n\n exp = Period(\"2011-04-02\", freq=freq)\n assert p + offsets.Hour(24) == exp\n assert offsets.Hour(24) + p == exp\n\n exp = Period(\"2011-04-03\", freq=freq)\n assert p + np.timedelta64(2, \"D\") == exp\n msg = \"cannot use operands with types\"\n with pytest.raises(TypeError, match=msg):\n np.timedelta64(2, \"D\") + p\n\n exp = Period(\"2011-04-02\", freq=freq)\n assert p + np.timedelta64(3600 * 24, \"s\") == exp\n with pytest.raises(TypeError, match=msg):\n np.timedelta64(3600 * 24, \"s\") + p\n\n exp = Period(\"2011-03-30\", freq=freq)\n assert p + timedelta(-2) == exp\n assert timedelta(-2) + p == exp\n\n exp = Period(\"2011-04-03\", freq=freq)\n assert p + timedelta(hours=48) == exp\n assert timedelta(hours=48) + p == exp\n\n for o in [\n offsets.YearBegin(2),\n offsets.MonthBegin(1),\n offsets.Minute(),\n np.timedelta64(4, \"h\"),\n timedelta(hours=23),\n ]:\n msg = \"Input has different freq|Input cannot be converted to Period\"\n with pytest.raises(IncompatibleFrequency, match=msg):\n p + o\n\n if isinstance(o, np.timedelta64):\n msg = \"cannot use operands with types\"\n with pytest.raises(TypeError, match=msg):\n o + p\n else:\n msg = \"|\".join(\n [\n \"Input has different freq\",\n \"Input cannot be converted to Period\",\n ]\n )\n with pytest.raises(IncompatibleFrequency, match=msg):\n o + p\n\n for freq in [\"H\", \"2H\", \"3H\"]:\n p = Period(\"2011-04-01 09:00\", freq=freq)\n\n exp = Period(\"2011-04-03 09:00\", freq=freq)\n assert p + offsets.Day(2) == exp\n assert offsets.Day(2) + p == exp\n\n exp = Period(\"2011-04-01 12:00\", freq=freq)\n assert p + offsets.Hour(3) == exp\n assert offsets.Hour(3) + p == exp\n\n msg = \"cannot use operands with types\"\n exp = Period(\"2011-04-01 12:00\", freq=freq)\n assert p + np.timedelta64(3, \"h\") == exp\n with pytest.raises(TypeError, match=msg):\n np.timedelta64(3, \"h\") + p\n\n exp = Period(\"2011-04-01 10:00\", freq=freq)\n assert p + np.timedelta64(3600, \"s\") == exp\n with pytest.raises(TypeError, match=msg):\n np.timedelta64(3600, \"s\") + p\n\n exp = Period(\"2011-04-01 11:00\", freq=freq)\n assert p + timedelta(minutes=120) == exp\n assert timedelta(minutes=120) + p == exp\n\n exp = Period(\"2011-04-05 12:00\", freq=freq)\n assert p + timedelta(days=4, minutes=180) == exp\n assert timedelta(days=4, minutes=180) + p == exp\n\n for o in [\n offsets.YearBegin(2),\n offsets.MonthBegin(1),\n offsets.Minute(),\n np.timedelta64(3200, \"s\"),\n timedelta(hours=23, minutes=30),\n ]:\n msg = \"Input has different freq|Input cannot be converted to Period\"\n with pytest.raises(IncompatibleFrequency, match=msg):\n p + o\n\n if isinstance(o, np.timedelta64):\n msg = \"cannot use operands with types\"\n with pytest.raises(TypeError, match=msg):\n o + p\n else:\n msg = \"|\".join(\n [\n \"Input has different freq\",\n \"Input cannot be converted to Period\",\n ]\n )\n with pytest.raises(IncompatibleFrequency, match=msg):\n o + p\n\n def test_sub_offset(self):\n # freq is DateOffset\n msg = \"Input has different freq|Input cannot be converted to Period\"\n for freq in [\"A\", \"2A\", \"3A\"]:\n p = Period(\"2011\", freq=freq)\n assert p - offsets.YearEnd(2) == Period(\"2009\", freq=freq)\n\n for o in [\n offsets.YearBegin(2),\n offsets.MonthBegin(1),\n offsets.Minute(),\n np.timedelta64(365, \"D\"),\n timedelta(365),\n ]:\n with pytest.raises(IncompatibleFrequency, match=msg):\n p - o\n\n for freq in [\"M\", \"2M\", \"3M\"]:\n p = Period(\"2011-03\", freq=freq)\n assert p - offsets.MonthEnd(2) == Period(\"2011-01\", freq=freq)\n assert p - offsets.MonthEnd(12) == Period(\"2010-03\", freq=freq)\n\n for o in [\n offsets.YearBegin(2),\n offsets.MonthBegin(1),\n offsets.Minute(),\n np.timedelta64(365, \"D\"),\n timedelta(365),\n ]:\n with pytest.raises(IncompatibleFrequency, match=msg):\n p - o\n\n # freq is Tick\n for freq in [\"D\", \"2D\", \"3D\"]:\n p = Period(\"2011-04-01\", freq=freq)\n assert p - offsets.Day(5) == Period(\"2011-03-27\", freq=freq)\n assert p - offsets.Hour(24) == Period(\"2011-03-31\", freq=freq)\n assert p - np.timedelta64(2, \"D\") == Period(\"2011-03-30\", freq=freq)\n assert p - np.timedelta64(3600 * 24, \"s\") == Period(\"2011-03-31\", freq=freq)\n assert p - timedelta(-2) == Period(\"2011-04-03\", freq=freq)\n assert p - timedelta(hours=48) == Period(\"2011-03-30\", freq=freq)\n\n for o in [\n offsets.YearBegin(2),\n offsets.MonthBegin(1),\n offsets.Minute(),\n np.timedelta64(4, \"h\"),\n timedelta(hours=23),\n ]:\n with pytest.raises(IncompatibleFrequency, match=msg):\n p - o\n\n for freq in [\"H\", \"2H\", \"3H\"]:\n p = Period(\"2011-04-01 09:00\", freq=freq)\n assert p - offsets.Day(2) == Period(\"2011-03-30 09:00\", freq=freq)\n assert p - offsets.Hour(3) == Period(\"2011-04-01 06:00\", freq=freq)\n assert p - np.timedelta64(3, \"h\") == Period(\"2011-04-01 06:00\", freq=freq)\n assert p - np.timedelta64(3600, \"s\") == Period(\n \"2011-04-01 08:00\", freq=freq\n )\n assert p - timedelta(minutes=120) == Period(\"2011-04-01 07:00\", freq=freq)\n assert p - timedelta(days=4, minutes=180) == Period(\n \"2011-03-28 06:00\", freq=freq\n )\n\n for o in [\n offsets.YearBegin(2),\n offsets.MonthBegin(1),\n offsets.Minute(),\n np.timedelta64(3200, \"s\"),\n timedelta(hours=23, minutes=30),\n ]:\n with pytest.raises(IncompatibleFrequency, match=msg):\n p - o\n\n @pytest.mark.parametrize(\"freq\", [\"M\", \"2M\", \"3M\"])\n def test_period_addsub_nat(self, freq):\n per = Period(\"2011-01\", freq=freq)\n\n # For subtraction, NaT is treated as another Period object\n assert NaT - per is NaT\n assert per - NaT is NaT\n\n # For addition, NaT is treated as offset-like\n assert NaT + per is NaT\n assert per + NaT is NaT\n\n def test_period_ops_offset(self):\n p = Period(\"2011-04-01\", freq=\"D\")\n result = p + offsets.Day()\n exp = Period(\"2011-04-02\", freq=\"D\")\n assert result == exp\n\n result = p - offsets.Day(2)\n exp = Period(\"2011-03-30\", freq=\"D\")\n assert result == exp\n\n msg = r\"Input cannot be converted to Period\\(freq=D\\)\"\n with pytest.raises(IncompatibleFrequency, match=msg):\n p + offsets.Hour(2)\n\n with pytest.raises(IncompatibleFrequency, match=msg):\n p - offsets.Hour(2)\n\n\ndef test_period_immutable():\n # see gh-17116\n msg = \"not writable\"\n\n per = Period(\"2014Q1\")\n with pytest.raises(AttributeError, match=msg):\n per.ordinal = 14\n\n freq = per.freq\n with pytest.raises(AttributeError, match=msg):\n per.freq = 2 * freq\n\n\ndef test_small_year_parsing():\n per1 = Period(\"0001-01-07\", \"D\")\n assert per1.year == 1\n assert per1.day == 7\n\n\ndef test_negone_ordinals():\n freqs = [\"A\", \"M\", \"Q\", \"D\", \"H\", \"T\", \"S\"]\n\n period = Period(ordinal=-1, freq=\"D\")\n for freq in freqs:\n repr(period.asfreq(freq))\n\n for freq in freqs:\n period = Period(ordinal=-1, freq=freq)\n repr(period)\n assert period.year == 1969\n\n period = Period(ordinal=-1, freq=\"B\")\n repr(period)\n period = Period(ordinal=-1, freq=\"W\")\n repr(period)\n", "from typing import List\n\nimport numpy as np\n\nfrom pandas._typing import FilePathOrBuffer, Scalar\nfrom pandas.compat._optional import import_optional_dependency\n\nfrom pandas.io.excel._base import ExcelWriter, _BaseExcelReader\nfrom pandas.io.excel._util import _validate_freeze_panes\n\n\nclass _OpenpyxlWriter(ExcelWriter):\n engine = \"openpyxl\"\n supported_extensions = (\".xlsx\", \".xlsm\")\n\n def __init__(self, path, engine=None, mode=\"w\", **engine_kwargs):\n # Use the openpyxl module as the Excel writer.\n from openpyxl.workbook import Workbook\n\n super().__init__(path, mode=mode, **engine_kwargs)\n\n if self.mode == \"a\": # Load from existing workbook\n from openpyxl import load_workbook\n\n book = load_workbook(self.path)\n self.book = book\n else:\n # Create workbook object with default optimized_write=True.\n self.book = Workbook()\n\n if self.book.worksheets:\n try:\n self.book.remove(self.book.worksheets[0])\n except AttributeError:\n\n # compat - for openpyxl <= 2.4\n self.book.remove_sheet(self.book.worksheets[0])\n\n def save(self):\n \"\"\"\n Save workbook to disk.\n \"\"\"\n return self.book.save(self.path)\n\n @classmethod\n def _convert_to_style(cls, style_dict):\n \"\"\"\n Converts a style_dict to an openpyxl style object.\n\n Parameters\n ----------\n style_dict : style dictionary to convert\n \"\"\"\n from openpyxl.style import Style\n\n xls_style = Style()\n for key, value in style_dict.items():\n for nk, nv in value.items():\n if key == \"borders\":\n (\n xls_style.borders.__getattribute__(nk).__setattr__(\n \"border_style\", nv\n )\n )\n else:\n xls_style.__getattribute__(key).__setattr__(nk, nv)\n\n return xls_style\n\n @classmethod\n def _convert_to_style_kwargs(cls, style_dict):\n \"\"\"\n Convert a style_dict to a set of kwargs suitable for initializing\n or updating-on-copy an openpyxl v2 style object.\n\n Parameters\n ----------\n style_dict : dict\n A dict with zero or more of the following keys (or their synonyms).\n 'font'\n 'fill'\n 'border' ('borders')\n 'alignment'\n 'number_format'\n 'protection'\n\n Returns\n -------\n style_kwargs : dict\n A dict with the same, normalized keys as ``style_dict`` but each\n value has been replaced with a native openpyxl style object of the\n appropriate class.\n \"\"\"\n _style_key_map = {\"borders\": \"border\"}\n\n style_kwargs = {}\n for k, v in style_dict.items():\n if k in _style_key_map:\n k = _style_key_map[k]\n _conv_to_x = getattr(cls, f\"_convert_to_{k}\", lambda x: None)\n new_v = _conv_to_x(v)\n if new_v:\n style_kwargs[k] = new_v\n\n return style_kwargs\n\n @classmethod\n def _convert_to_color(cls, color_spec):\n \"\"\"\n Convert ``color_spec`` to an openpyxl v2 Color object.\n\n Parameters\n ----------\n color_spec : str, dict\n A 32-bit ARGB hex string, or a dict with zero or more of the\n following keys.\n 'rgb'\n 'indexed'\n 'auto'\n 'theme'\n 'tint'\n 'index'\n 'type'\n\n Returns\n -------\n color : openpyxl.styles.Color\n \"\"\"\n from openpyxl.styles import Color\n\n if isinstance(color_spec, str):\n return Color(color_spec)\n else:\n return Color(**color_spec)\n\n @classmethod\n def _convert_to_font(cls, font_dict):\n \"\"\"\n Convert ``font_dict`` to an openpyxl v2 Font object.\n\n Parameters\n ----------\n font_dict : dict\n A dict with zero or more of the following keys (or their synonyms).\n 'name'\n 'size' ('sz')\n 'bold' ('b')\n 'italic' ('i')\n 'underline' ('u')\n 'strikethrough' ('strike')\n 'color'\n 'vertAlign' ('vertalign')\n 'charset'\n 'scheme'\n 'family'\n 'outline'\n 'shadow'\n 'condense'\n\n Returns\n -------\n font : openpyxl.styles.Font\n \"\"\"\n from openpyxl.styles import Font\n\n _font_key_map = {\n \"sz\": \"size\",\n \"b\": \"bold\",\n \"i\": \"italic\",\n \"u\": \"underline\",\n \"strike\": \"strikethrough\",\n \"vertalign\": \"vertAlign\",\n }\n\n font_kwargs = {}\n for k, v in font_dict.items():\n if k in _font_key_map:\n k = _font_key_map[k]\n if k == \"color\":\n v = cls._convert_to_color(v)\n font_kwargs[k] = v\n\n return Font(**font_kwargs)\n\n @classmethod\n def _convert_to_stop(cls, stop_seq):\n \"\"\"\n Convert ``stop_seq`` to a list of openpyxl v2 Color objects,\n suitable for initializing the ``GradientFill`` ``stop`` parameter.\n\n Parameters\n ----------\n stop_seq : iterable\n An iterable that yields objects suitable for consumption by\n ``_convert_to_color``.\n\n Returns\n -------\n stop : list of openpyxl.styles.Color\n \"\"\"\n return map(cls._convert_to_color, stop_seq)\n\n @classmethod\n def _convert_to_fill(cls, fill_dict):\n \"\"\"\n Convert ``fill_dict`` to an openpyxl v2 Fill object.\n\n Parameters\n ----------\n fill_dict : dict\n A dict with one or more of the following keys (or their synonyms),\n 'fill_type' ('patternType', 'patterntype')\n 'start_color' ('fgColor', 'fgcolor')\n 'end_color' ('bgColor', 'bgcolor')\n or one or more of the following keys (or their synonyms).\n 'type' ('fill_type')\n 'degree'\n 'left'\n 'right'\n 'top'\n 'bottom'\n 'stop'\n\n Returns\n -------\n fill : openpyxl.styles.Fill\n \"\"\"\n from openpyxl.styles import GradientFill, PatternFill\n\n _pattern_fill_key_map = {\n \"patternType\": \"fill_type\",\n \"patterntype\": \"fill_type\",\n \"fgColor\": \"start_color\",\n \"fgcolor\": \"start_color\",\n \"bgColor\": \"end_color\",\n \"bgcolor\": \"end_color\",\n }\n\n _gradient_fill_key_map = {\"fill_type\": \"type\"}\n\n pfill_kwargs = {}\n gfill_kwargs = {}\n for k, v in fill_dict.items():\n pk = gk = None\n if k in _pattern_fill_key_map:\n pk = _pattern_fill_key_map[k]\n if k in _gradient_fill_key_map:\n gk = _gradient_fill_key_map[k]\n if pk in [\"start_color\", \"end_color\"]:\n v = cls._convert_to_color(v)\n if gk == \"stop\":\n v = cls._convert_to_stop(v)\n if pk:\n pfill_kwargs[pk] = v\n elif gk:\n gfill_kwargs[gk] = v\n else:\n pfill_kwargs[k] = v\n gfill_kwargs[k] = v\n\n try:\n return PatternFill(**pfill_kwargs)\n except TypeError:\n return GradientFill(**gfill_kwargs)\n\n @classmethod\n def _convert_to_side(cls, side_spec):\n \"\"\"\n Convert ``side_spec`` to an openpyxl v2 Side object.\n\n Parameters\n ----------\n side_spec : str, dict\n A string specifying the border style, or a dict with zero or more\n of the following keys (or their synonyms).\n 'style' ('border_style')\n 'color'\n\n Returns\n -------\n side : openpyxl.styles.Side\n \"\"\"\n from openpyxl.styles import Side\n\n _side_key_map = {\"border_style\": \"style\"}\n\n if isinstance(side_spec, str):\n return Side(style=side_spec)\n\n side_kwargs = {}\n for k, v in side_spec.items():\n if k in _side_key_map:\n k = _side_key_map[k]\n if k == \"color\":\n v = cls._convert_to_color(v)\n side_kwargs[k] = v\n\n return Side(**side_kwargs)\n\n @classmethod\n def _convert_to_border(cls, border_dict):\n \"\"\"\n Convert ``border_dict`` to an openpyxl v2 Border object.\n\n Parameters\n ----------\n border_dict : dict\n A dict with zero or more of the following keys (or their synonyms).\n 'left'\n 'right'\n 'top'\n 'bottom'\n 'diagonal'\n 'diagonal_direction'\n 'vertical'\n 'horizontal'\n 'diagonalUp' ('diagonalup')\n 'diagonalDown' ('diagonaldown')\n 'outline'\n\n Returns\n -------\n border : openpyxl.styles.Border\n \"\"\"\n from openpyxl.styles import Border\n\n _border_key_map = {\"diagonalup\": \"diagonalUp\", \"diagonaldown\": \"diagonalDown\"}\n\n border_kwargs = {}\n for k, v in border_dict.items():\n if k in _border_key_map:\n k = _border_key_map[k]\n if k == \"color\":\n v = cls._convert_to_color(v)\n if k in [\"left\", \"right\", \"top\", \"bottom\", \"diagonal\"]:\n v = cls._convert_to_side(v)\n border_kwargs[k] = v\n\n return Border(**border_kwargs)\n\n @classmethod\n def _convert_to_alignment(cls, alignment_dict):\n \"\"\"\n Convert ``alignment_dict`` to an openpyxl v2 Alignment object.\n\n Parameters\n ----------\n alignment_dict : dict\n A dict with zero or more of the following keys (or their synonyms).\n 'horizontal'\n 'vertical'\n 'text_rotation'\n 'wrap_text'\n 'shrink_to_fit'\n 'indent'\n Returns\n -------\n alignment : openpyxl.styles.Alignment\n \"\"\"\n from openpyxl.styles import Alignment\n\n return Alignment(**alignment_dict)\n\n @classmethod\n def _convert_to_number_format(cls, number_format_dict):\n \"\"\"\n Convert ``number_format_dict`` to an openpyxl v2.1.0 number format\n initializer.\n\n Parameters\n ----------\n number_format_dict : dict\n A dict with zero or more of the following keys.\n 'format_code' : str\n\n Returns\n -------\n number_format : str\n \"\"\"\n return number_format_dict[\"format_code\"]\n\n @classmethod\n def _convert_to_protection(cls, protection_dict):\n \"\"\"\n Convert ``protection_dict`` to an openpyxl v2 Protection object.\n\n Parameters\n ----------\n protection_dict : dict\n A dict with zero or more of the following keys.\n 'locked'\n 'hidden'\n\n Returns\n -------\n \"\"\"\n from openpyxl.styles import Protection\n\n return Protection(**protection_dict)\n\n def write_cells(\n self, cells, sheet_name=None, startrow=0, startcol=0, freeze_panes=None\n ):\n # Write the frame cells using openpyxl.\n sheet_name = self._get_sheet_name(sheet_name)\n\n _style_cache = {}\n\n if sheet_name in self.sheets:\n wks = self.sheets[sheet_name]\n else:\n wks = self.book.create_sheet()\n wks.title = sheet_name\n self.sheets[sheet_name] = wks\n\n if _validate_freeze_panes(freeze_panes):\n wks.freeze_panes = wks.cell(\n row=freeze_panes[0] + 1, column=freeze_panes[1] + 1\n )\n\n for cell in cells:\n xcell = wks.cell(\n row=startrow + cell.row + 1, column=startcol + cell.col + 1\n )\n xcell.value, fmt = self._value_with_fmt(cell.val)\n if fmt:\n xcell.number_format = fmt\n\n style_kwargs = {}\n if cell.style:\n key = str(cell.style)\n style_kwargs = _style_cache.get(key)\n if style_kwargs is None:\n style_kwargs = self._convert_to_style_kwargs(cell.style)\n _style_cache[key] = style_kwargs\n\n if style_kwargs:\n for k, v in style_kwargs.items():\n setattr(xcell, k, v)\n\n if cell.mergestart is not None and cell.mergeend is not None:\n\n wks.merge_cells(\n start_row=startrow + cell.row + 1,\n start_column=startcol + cell.col + 1,\n end_column=startcol + cell.mergeend + 1,\n end_row=startrow + cell.mergestart + 1,\n )\n\n # When cells are merged only the top-left cell is preserved\n # The behaviour of the other cells in a merged range is\n # undefined\n if style_kwargs:\n first_row = startrow + cell.row + 1\n last_row = startrow + cell.mergestart + 1\n first_col = startcol + cell.col + 1\n last_col = startcol + cell.mergeend + 1\n\n for row in range(first_row, last_row + 1):\n for col in range(first_col, last_col + 1):\n if row == first_row and col == first_col:\n # Ignore first cell. It is already handled.\n continue\n xcell = wks.cell(column=col, row=row)\n for k, v in style_kwargs.items():\n setattr(xcell, k, v)\n\n\nclass _OpenpyxlReader(_BaseExcelReader):\n def __init__(self, filepath_or_buffer: FilePathOrBuffer) -> None:\n \"\"\"\n Reader using openpyxl engine.\n\n Parameters\n ----------\n filepath_or_buffer : string, path object or Workbook\n Object to be parsed.\n \"\"\"\n import_optional_dependency(\"openpyxl\")\n super().__init__(filepath_or_buffer)\n\n @property\n def _workbook_class(self):\n from openpyxl import Workbook\n\n return Workbook\n\n def load_workbook(self, filepath_or_buffer: FilePathOrBuffer):\n from openpyxl import load_workbook\n\n return load_workbook(\n filepath_or_buffer, read_only=True, data_only=True, keep_links=False\n )\n\n def close(self):\n # https://stackoverflow.com/questions/31416842/\n # openpyxl-does-not-close-excel-workbook-in-read-only-mode\n self.book.close()\n\n @property\n def sheet_names(self) -> List[str]:\n return self.book.sheetnames\n\n def get_sheet_by_name(self, name: str):\n return self.book[name]\n\n def get_sheet_by_index(self, index: int):\n return self.book.worksheets[index]\n\n def _convert_cell(self, cell, convert_float: bool) -> Scalar:\n\n # TODO: replace with openpyxl constants\n if cell.is_date:\n return cell.value\n elif cell.data_type == \"e\":\n return np.nan\n elif cell.data_type == \"b\":\n return bool(cell.value)\n elif cell.value is None:\n return \"\" # compat with xlrd\n elif cell.data_type == \"n\":\n # GH5394\n if convert_float:\n val = int(cell.value)\n if val == cell.value:\n return val\n else:\n return float(cell.value)\n\n return cell.value\n\n def get_sheet_data(self, sheet, convert_float: bool) -> List[List[Scalar]]:\n data: List[List[Scalar]] = []\n for row in sheet.rows:\n data.append([self._convert_cell(cell, convert_float) for cell in row])\n\n return data\n" ]
[ [ "pandas.isna", "pandas._testing.assert_produces_warning", "pandas.array", "pandas.date_range", "numpy.random.randn", "pandas.timedelta_range", "pandas.Timestamp.now", "pandas.Timestamp", "pandas.Categorical", "numpy.arange", "pandas.Series", "pandas._testing.assert_series_equal", "pandas._testing.makeDateIndex" ], [ "matplotlib.use" ], [ "pandas.offsets.YearEnd", "pandas.offsets.Minute", "pandas.offsets.Day", "pandas.Timestamp", "pandas.Period.now", "numpy.empty", "pandas.Timedelta", "pandas._testing.round_trip_pickle", "pandas.offsets.BusinessDay", "pandas.offsets.BDay", "pandas.offsets.YearBegin", "pandas.Period", "pandas.Period._from_ordinal", "pandas.offsets.QuarterEnd", "numpy.timedelta64", "numpy.datetime64", "pandas.offsets.MonthEnd", "pandas._libs.tslibs.timezones.maybe_get_tz", "pandas.Index", "pandas._testing.assert_produces_warning", "pandas.offsets.MonthBegin", "pandas.compat.numpy.np_datetime64_compat", "pandas._libs.tslibs.timezones.dateutil_gettz", "pandas.offsets.Hour", "pandas.Series" ], [ "pandas.compat._optional.import_optional_dependency", "pandas.io.excel._util._validate_freeze_panes" ] ]
octaviomtz/inpaint_melanoma
[ "19cf85a0d51f04ad3e1e3ef68ddf1cc5e27a0b84" ]
[ "inpaint_melanoma/core.py" ]
[ "# AUTOGENERATED! DO NOT EDIT! File to edit: 00_core.ipynb (unless otherwise specified).\n\n__all__ = ['rgb2gray', 'otsu_thresh_4largest_component', 'component_closest_center', 'get_center', 'denormalizePatches',\n 'figs_horizontal2', 'figs_comparison', 'figs_horizontal3', 'plot_inpaints_pairs', 'channels_first_last',\n 'plot_distributions', 'get_saved_images', 'get_sample_distributions_per_channel']\n\n# Cell\nimport argparse\nimport matplotlib.pyplot as plt\nimport os\nimport numpy as np\nimport torch\nimport torch.optim\nfrom torch import nn\nfrom copy import copy, deepcopy\nimport time\nfrom mpl_toolkits.mplot3d.art3d import Poly3DCollection\nimport pandas as pd\nfrom skimage import measure, morphology\nfrom itertools import groupby, count\nimport matplotlib.patches as patches\nfrom skimage.morphology import watershed\nfrom skimage.feature import peak_local_max\nfrom torch.autograd import Variable\nfrom scipy.spatial import distance\nimport sys\nfrom PIL import Image\nfrom matplotlib.gridspec import GridSpec\nimport random\n\n# Cell\n# from models.skip import skip\nfrom .skip import *\nfrom .inpainting_utils import *\nfrom .common_utils import *\nfrom .inpainting_nodules_functions import *\n\n# Cell\nimport warnings\nfrom torch.autograd import Variable\n# from google.colab import drive\nfrom scipy import ndimage\nfrom skimage import filters\n\nwarnings.filterwarnings(\"ignore\", category=UserWarning)\n\n# Cell\ndef rgb2gray(rgb):\n '''https://stackoverflow.com/questions/12201577/how-can-i-convert-an-rgb-image-into-grayscale-in-python'''\n return np.dot(rgb[...,:3], [0.2989, 0.5870, 0.1140])\n\ndef otsu_thresh_4largest_component(img2):\n val = filters.threshold_otsu(img2)\n mask_otsu_orig = img2<val\n mask_otsu = ndimage.morphology.binary_erosion(mask_otsu_orig, iterations=20)\n mask_otsu = ndimage.morphology.binary_dilation(mask_otsu, iterations=80)\n mask_otsu = ndimage.morphology.binary_fill_holes(mask_otsu)\n labeled_mask, cc_num = ndimage.label(mask_otsu)\n sorted_comp = np.bincount(labeled_mask.flat)\n sorted_comp = np.sort(sorted_comp)[::-1]\n mask_lesions = []\n for i in np.arange(1, np.min([len(sorted_comp), 4])):\n mask_lesions.append((labeled_mask == np.where(np.bincount(labeled_mask.flat) == sorted_comp[i])[0][0]))\n return mask_lesions\n\ndef component_closest_center(img2, masks_lesions):\n y_half, x_half = [i//2 for i in np.shape(img2)]\n y_half_x_half = np.asarray([y_half, x_half])\n ml_closest = masks_lesions[0] # default\n dist_min = 10000\n for i in masks_lesions:\n yy,xx = np.where(i==1)\n ymed_xmed = np.asarray([np.median(yy), np.median(xx)])\n dist_new = distance.cdist(np.expand_dims(y_half_x_half,0), np.expand_dims(ymed_xmed,0))\n if dist_new < dist_min:\n dist_min = dist_new\n ml_closest = i\n return ml_closest\n\ndef get_center(img, part=.25):\n factor = 32\n y_half, x_half, _ = [i//2 for i in np.shape(img)]\n y_include, x_include = np.asarray([y_half, x_half])* part\n y_include = y_include + (factor - y_include % factor)\n x_include = x_include + (factor - x_include % factor)\n y_part1, x_part1 = int(y_half - y_include), int(x_half - x_include)\n y_part2, x_part2 = int(y_half + y_include), int(x_half + x_include)\n y_part1, y_part2, x_part1, x_part2\n return img[y_part1: y_part2, x_part1: x_part2,:], y_part1, x_part1\n\ndef denormalizePatches(img):\n img = img * 255.\n img = img.astype('int16')\n return img\n\n# Cell\ndef figs_horizontal2(ff, names_selected, suffix_available, path_source):\n f1 = [names_selected+j for j in suffix_available if names_selected+j in ff]\n f1 = np.unique(f1)\n f1 = np.sort(f1)\n n_img = len(f1)\n fig, ax = plt.subplots(1,n_img,figsize=(24,5))\n for idx,i in enumerate(f1):\n # name_display = '_'.join(names_selected.split('_')[1:])\n name_display = i.split('_ISIC')[0].split('_')[-1]\n a = np.fromfile(f'{path_source}{i}',dtype='int16')\n a = a/255\n a = np.clip(a,0,1)\n a = np.reshape(a, (3,512,512))\n a = np.swapaxes(a,0,1)\n a = np.swapaxes(a,1,2)\n ax[idx].imshow(a)\n ax[idx].text(10,50,name_display)\n for axx in ax.ravel(): axx.axis('off')\n plt.tight_layout()\n print(names_selected)\n return f1\n\n# Cell\ndef figs_comparison(ff, names_selected, suffix_available, gen_idx, folder='/mnt/90cf2a10-3cf8-48a6-9234-9973231cadc6/Kaggle/melanoma/datasets_preprocessed/size_512/'):\n f1 = [names_selected+j for j in suffix_available if names_selected+j in ff]\n f1 = np.unique(f1)\n f1 = np.sort(f1)\n n_img = len(f1)\n i = f1[gen_idx]\n\n key = 'ISIC'+suffix_available[0].split('.raw')[0].split('ISIC')[-1]\n orig = plt.imread(f'{folder}{key}.jpg')\n mask = np.load(f'{folder}mask_{key}.npz')\n mask = mask.f.arr_0\n\n fig, ax = plt.subplots(1,4,figsize=(12,5))\n name_display = i.split('_ISIC')[0].split('_')[-1]\n inpain = np.fromfile(f'{path_source}{i}',dtype='int16')\n inpain = inpain/255\n inpain = np.clip(inpain,0,1)\n inpain = np.reshape(inpain, (3,512,512))\n inpain = np.swapaxes(inpain,0,1)\n inpain = np.swapaxes(inpain,1,2)\n ax[1].imshow(orig)\n ax[0].imshow(orig)\n ax[0].imshow(mask, alpha=.3)\n ax[2].imshow(inpain)\n ax[3].imshow(inpain)\n ax[3].imshow(mask, alpha=.3)\n for axx in ax.ravel(): axx.axis('off')\n plt.tight_layout()\n return key, inpain\n\n# Cell\ndef figs_horizontal3(ff, names_selected, suffix_available, path_results):\n f1 = [names_selected+j for j in suffix_available if names_selected+j in ff]\n f1 = np.unique(f1)\n f1 = np.sort(f1)\n n_img = len(f1)\n fig, ax = plt.subplots(1,n_img,figsize=(24,5))\n for idx,i in enumerate(f1):\n name_display = i.split('_ISIC')[0].split('_')[-1]\n a = Image.open(f'{path_results}{i}')\n ax[idx].imshow(a)\n ax[idx].text(10,50,name_display)\n for axx in ax.ravel(): axx.axis('off')\n plt.tight_layout()\n print(names_selected)\n return f1\n\n# Cell\ndef plot_inpaints_pairs(mse_error, images_raw, images_combined, epochs_saved, filename, archi, params, path_save=''):\n fontsize = 20\n color1 = \"#3F5D7D\"\n color2 = \"#990F02\"\n color3 = \"#ffe84f\"\n widths = [1,2,2,2,2]\n fig=plt.figure(figsize=(18,8));\n gs=GridSpec(2,5, width_ratios=widths)\n ax1=fig.add_subplot(gs[:,0]) # First row, first column\n ax2=fig.add_subplot(gs[0,1]) # First row, second column\n ax3=fig.add_subplot(gs[0,2]) # First row, third column\n ax4=fig.add_subplot(gs[0,3])\n ax5=fig.add_subplot(gs[0,4])\n ax6=fig.add_subplot(gs[1,1])\n ax7=fig.add_subplot(gs[1,2])\n ax8=fig.add_subplot(gs[1,3])\n ax9=fig.add_subplot(gs[1,4])\n\n count=0\n for i, ax_ in zip(images_raw, [ax2, ax4, ax6, ax8]):\n ax_.imshow(i)\n ax_.text(10, 50, str(epochs_saved[-4+count]*10), fontsize=fontsize)\n count+=1\n for i, ax_ in zip(images_combined, [ax3, ax5, ax7, ax9]): ax_.imshow(i)\n for i in [ax2, ax3, ax4, ax5, ax6, ax7, ax8, ax9]: i.axis('off')\n\n name = f'{archi}\\n{params}'\n name = name.replace('_LR','\\nLR')\n ax9.text(10,140,name, fontsize=fontsize)\n ax1.semilogy(mse_error, color=color1)\n epochs_saved = np.asarray(epochs_saved)*10\n ax1.semilogy(np.asarray(epochs_saved)[-4:],np.asarray(mse_error)[np.asarray(epochs_saved)][-4:], marker='.', linestyle='None', markersize=20, color=color1)\n fig.tight_layout()\n\n if len(path_save)>0:\n fig.savefig(f'{path_save}ov_{filename}_{name}.png' )\n plt.close()\n\n# Cell\ndef channels_first_last(img, output='first'):\n '''just change an img to channels last or to channels first'''\n if output=='last':\n img = np.swapaxes(img,0,1)\n img = np.swapaxes(img,1,2)\n else:\n img = np.swapaxes(img,1,2)\n img = np.swapaxes(img,0,1)\n return img\n\n# Cell\ndef plot_distributions(img, images, mask_var):\n a = channels_first_last(img,'last')\n inp = images[-1]\n mask_ = mask_var.detach().cpu().numpy()\n mask_ = channels_first_last(mask_,'last')\n _mask_ = -mask_+1\n fig ,ax = plt.subplots(2,5, figsize=(18,7))\n ax[0,0].imshow(a)\n ax[1,0].hist(a.flatten()); ax[1,0].set_xlim([0,1]);\n ax[0,1].imshow(inp)\n ax[1,1].hist(inp.flatten()); ax[1,0].set_xlim([0,1]);\n ax[0,2].imshow(a*mask_)\n ax[1,2].hist(((a*mask_)[np.where(a*mask_>0)]).flatten()); ax[1,2].set_xlim([0,1]);\n ax[0,3].imshow(a*_mask_)\n ax[1,3].hist(((a*_mask_)[np.where(a*_mask_>0)]).flatten()); ax[1,3].set_xlim([0,1]);\n ax[0,4].imshow(inp*_mask_)\n ax[1,4].hist(((inp*_mask_)[np.where(a*_mask_>0)]).flatten()); ax[1,4].set_xlim([0,1]);\n for i in [ax[0,0], ax[0,1], ax[0,2], ax[0,3], ax[0,4]]: i.axis('off')\n for i in ax.ravel(): i.ticklabel_format(style='sci', scilimits=(0,0))\n\n# Cell\ndef get_saved_images(path_img_dest, filename, name):\n ii = np.load(f'{path_img_dest}mse/{filename}_{name}.npy')\n tmp = os.listdir(f'{path_img_dest}final/')\n tmp = [i for i in tmp if f'{filename}_{name}' in i]\n final_names = np.sort(tmp)\n tmp = os.listdir(f'{path_img_dest}final/')\n tmp = [i for i in tmp if f'{filename}_{name}' in i]\n raw_names = np.sort(tmp)\n final = [np.asarray(Image.open(f'{path_img_dest}final/{i}'))/255 for i in final_names]\n raw = [np.asarray(Image.open(f'{path_img_dest}raw/{i}'))/255 for i in raw_names]\n epochs = [int(i.split('_')[-1].split('.')[0]) for i in final_names]\n return ii, final, raw, epochs\n\n# Cell\ndef get_sample_distributions_per_channel(img, mask_var, len_lesion):\n '''For each channel, get a sample distribution of the outside skin\n of the same size of the lesion'''\n skin_only_ch0 = img[0][np.where((img[0] * mask_var[0])>0)]\n skin_only_ch1 = img[1][np.where((img[1] * mask_var[0])>0)]\n skin_only_ch2 = img[2][np.where((img[2] * mask_var[0])>0)]\n skin_sample_ch0 = random.sample(list(skin_only_ch0), len_lesion)\n skin_sample_ch1 = random.sample(list(skin_only_ch1), len_lesion)\n skin_sample_ch2 = random.sample(list(skin_only_ch2), len_lesion)\n return skin_sample_ch0, skin_sample_ch1, skin_sample_ch2" ]
[ [ "numpy.dot", "scipy.ndimage.morphology.binary_dilation", "numpy.median", "numpy.load", "numpy.where", "numpy.sort", "numpy.unique", "numpy.bincount", "matplotlib.pyplot.subplots", "numpy.swapaxes", "matplotlib.pyplot.tight_layout", "numpy.expand_dims", "numpy.reshape", "matplotlib.pyplot.close", "numpy.shape", "matplotlib.pyplot.figure", "numpy.fromfile", "numpy.clip", "scipy.ndimage.morphology.binary_fill_holes", "scipy.ndimage.morphology.binary_erosion", "matplotlib.gridspec.GridSpec", "numpy.asarray", "scipy.ndimage.label", "matplotlib.pyplot.imread" ] ]
maxibor/coproID
[ "7dc3362267bc89ce658651d47534455e01dc152b" ]
[ "bin/merge_bp_sp.py" ]
[ "#!/usr/bin/env python3\n\n\nimport argparse\nimport pandas as pd\nimport sys\n\n\ndef get_args():\n '''This function parses and return arguments passed in'''\n parser = argparse.ArgumentParser(\n prog='normalizedReadCount',\n description='Counts reads aligned to genome, and normalize by genome size')\n parser.add_argument(\n '-c',\n dest='countfile',\n default=None,\n help=\"normalized read count csv file\")\n parser.add_argument(\n '-s',\n dest='sourcepredict',\n default=None,\n help=\"sourcepredict csv file\")\n parser.add_argument(\n '-o',\n dest='output',\n default=None,\n help=\"output csv file\")\n\n args = parser.parse_args()\n cf = args.countfile\n sp = args.sourcepredict\n out = args.output\n\n return(cf, sp, out)\n\n\ndef indicator(x):\n if x > 0.5:\n return(0)\n return(1)\n\n\ndef check_learning(orga, col_list):\n if orga not in col_list:\n print(f\"{orga} not in machine learning dataset\")\n sys.exit(1)\n\n\ndef compute_coproba(indic, nrr, sp):\n return(indic*nrr*sp)\n\n\nif __name__ == \"__main__\":\n CF, SP, OUTPUT = get_args()\n\n dcf = pd.read_csv(CF, index_col=0)\n print(dcf.shape)\n orga1 = dcf['Organism_name1'][0]\n orga2 = dcf['Organism_name2'][0]\n try:\n orga3 = dcf['Organism_name3'][0]\n except:\n orga3 = None\n\n dsp = pd.read_csv(SP, index_col=0).T\n\n if orga3:\n check_learning(orga1, dsp.columns)\n check_learning(orga2, dsp.columns)\n check_learning(orga3, dsp.columns)\n else:\n check_learning(orga1, dsp.columns)\n check_learning(orga2, dsp.columns)\n\n d = dcf.merge(dsp, left_index=True, right_index=True)\n\n coproba_list_orga1 = [compute_coproba(\n indic=indicator(a), nrr=b, sp=c) for a, b, c in zip(list(d['unknown']), list(d['NormalizedReadRatio_1']), list(d[orga1]))]\n coproba_list_orga2 = [compute_coproba(\n indic=indicator(a), nrr=b, sp=c) for a, b, c in zip(list(d['unknown']), list(d['NormalizedReadRatio_2']), list(d[orga2]))]\n if orga3:\n coproba_list_orga3 = [compute_coproba(indic=indicator(a), nrr=b, sp=c) for a, b, c in zip(\n list(d['unknown']), list(d['NormalizedReadRatio_3']), list(d[orga3]))]\n\n d2 = pd.DataFrame()\n d2[f\"normalized_bp_proportion_aligned_{orga1}\"] = d['NormalizedReadRatio_1']\n d2[f\"normalized_bp_proportion_aligned_{orga2}\"] = d['NormalizedReadRatio_2']\n if orga3:\n d2[f\"normalized_bp_aligned_{orga3}\"] = d['NormalizedReadRatio_3']\n d2[f\"metagenomic_proportion_{orga1}\"] = d[orga1]\n d2[f\"metagenomic_proportion_{orga2}\"] = d[orga2]\n if orga3:\n d2[f\"metagenomic_proportion_{orga3}\"] = d[orga3]\n d2[f\"coproID_proba_{orga1}\"] = coproba_list_orga1\n d2[f\"coproID_proba_{orga2}\"] = coproba_list_orga2\n if orga3:\n d2[f\"coproID_proba_{orga3}\"] = coproba_list_orga3\n d2.index = d.index\n d2.to_csv(OUTPUT)\n" ]
[ [ "pandas.DataFrame", "pandas.read_csv" ] ]
pashatab1/tablab_fish
[ "4e49c19ca9eb94f059fa1c15231401ffc4405195" ]
[ "common/find_pixel_size.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"Determine pixel/inch ratio from linescan across ruler\n\nInputs:\nfilename - full path to csv file containing Position and Intensity Value\n\nAssumes:\nImage is taken of inch side of ruler, and smallest ticks are 1/8 inch increment\n\n@author: tabatabai\n\"\"\"\n\nimport numpy as np\nfrom math import nan\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Read in data\n# filename = '/Users/tabatabai/Desktop/linescan_closestToWindow.csv'\n# filename = '/Volumes/storage/pasha/2020/201210/linescan_rulerOnAcrylicSheetOnFiberBoardOnRecycleBin.csv'\n\n\ndef main(filepath,unit,unit_str):\n \"\"\"Calculates the pixel size for a given image based on a ruler\n inputs: filepath- full path to file containing ruler information\n (ex: '/Volumes/storage/pasha/2021/210226' )\n unit - number corresponding to repeating distance\n (ex: 0.125 for 1/8 ticks)\n unit_str - string of actual unit value corresponding to unit\n (ex: 'inch')\n \n outputs: saves file 'pixel_conversion.txt' in filepath\n saved file contains conversion information\n \n Example Execution\n main('/Volumes/storage/pasha/2021/210226',.125,'inch')\n \n \"\"\"\n fullfile = filepath + '/ruler_linescan.csv' #expects this naming for filename\n df = pd.read_csv(fullfile) \n \n # Plot raw data\n ax1 = plt.subplot(1,2,1)\n plt.plot(df['Distance_(pixels)'], df.Gray_Value) \n ax1.set_xlim(200,300)\n #ax1.set_ylim(60,90)\n ax1.set_ylabel('Pixel Gray Value')\n ax1.set_xlabel('Pixel Position')\n \n # Calculate FFT of Intensities\n y = np.fft.fft(df.Gray_Value.to_numpy()) #calculates fast fourier transform\n y[0] = nan #First position due to data shift (not about 0)\n yystar = y*np.conj(y) # Multiply by complex conjugate - now everything real\n \n # Generate frequencies corresponding to FFT\n xf = np.linspace(0,.5,int(np.floor(len(df.Gray_Value)/2))) # frequencies used in fft\n \n # Plot Power Spectrum\n ax2 = plt.subplot(1,2,2)\n plt.plot(xf,yystar[0:int(np.floor(len(df.Gray_Value)/2))])\n ax2.set_xlim(0, .25)\n ax2.set_ylabel('Power Spectrum')\n ax2.set_xlabel('Frequency (1/d)')\n # plt.savefig('Linescan.png')\n plt.show()\n \n # Convert from frequency to px/inch\n indx = np.nanargmax(yystar[2:int(np.floor(len(df.Gray_Value)/2))]) # Max of power spectrum occurs at this index\n frequency = xf[indx]\n repeating_distance = 1/frequency\n \n # Creates value for cm_per_pixel depending on the unit you used\n if unit_str == 'inch':\n cm_per_pixel = unit/repeating_distance * 2.54\n elif unit_str == 'cm':\n cm_per_pixel = unit/repeating_distance\n else:\n print('You have a unit that your code wasnt ready for')\n \n print('Max Frequency = ', str(frequency))\n print('Repeating distance = ', str(repeating_distance), ' pixels')\n file1 = open(filepath + '/pixel_conversion.txt',\"w\") \n \n #write required informational file\n L = ['The repeating distance is: ' + str(repeating_distance) + ' pixels\\n',\n 'The repeating unit in my image is: ' + str(unit) + unit_str + '\\n',\n 'Therefore, the pixel conversion is: ' + str(unit/repeating_distance) + ' ' +unit_str +' per pixel\\n',\n 'For trex, the cm_per_pixel parameter is: ' + str(cm_per_pixel) + '\\n'] \n file1.writelines(L) \n file1.close() #to change file access modes \n \n \n# inches_per_pixel = 1/(8*repeating_distance) # this is where 1/8 inch increment comes in\n# pixels_per_inch = 1/inches_per_pixel\n\n# # Print to screen relevant information \n# print('Repeating distance = ', str(repeating_distance))\n# print('Inches per pixel = ', str(inches_per_pixel))\n# print('Pixels per inch = ', str(pixels_per_inch))\n\n\n# Example Execution\n# main('/Volumes/storage/pasha/2021/210226',.125,'inch')\n\n" ]
[ [ "matplotlib.pyplot.plot", "numpy.conj", "matplotlib.pyplot.show", "pandas.read_csv", "matplotlib.pyplot.subplot" ] ]
GeoffKriston/deep-learning-v2-pytorch
[ "92f7b12e8afeb12753bc990829bfa8307b26ef6c" ]
[ "intro-to-pytorch/fc_model.py" ]
[ "import torch\nfrom torch import nn\nimport torch.nn.functional as F\n\n\nclass Network(nn.Module):\n def __init__(self, input_size, output_size, hidden_layers, drop_p=0.5):\n ''' Builds a feedforward network with arbitrary hidden layers.\n \n Arguments\n ---------\n input_size: integer, size of the input layer\n output_size: integer, size of the output layer\n hidden_layers: list of integers, the sizes of the hidden layers\n \n '''\n super(Network,self).__init__()\n # Input to a hidden layer\n self.hidden_layers = nn.ModuleList([nn.Linear(input_size, hidden_layers[0])])\n \n # Add a variable number of more hidden layers\n layer_sizes = zip(hidden_layers[:-1], hidden_layers[1:])\n self.hidden_layers.extend([nn.Linear(h1, h2) for h1, h2 in layer_sizes])\n \n self.output = nn.Linear(hidden_layers[-1], output_size)\n \n self.dropout = nn.Dropout(p=drop_p)\n \n def forward(self, x):\n ''' Forward pass through the network, returns the output logits '''\n \n for each in self.hidden_layers:\n x = F.relu(each(x))\n x = self.dropout(x)\n x = self.output(x)\n \n return F.log_softmax(x, dim=1)\n\n\ndef validation(model, testloader, criterion):\n accuracy = 0\n test_loss = 0\n for images, labels in testloader:\n\n images = images.resize_(images.size()[0], 784)\n\n output = model.forward(images)\n test_loss += criterion(output, labels).item()\n\n ## Calculating the accuracy \n # Model's output is log-softmax, take exponential to get the probabilities\n ps = torch.exp(output)\n # Class with highest probability is our predicted class, compare with true label\n equality = (labels.data == ps.max(1)[1])\n # Accuracy is number of correct predictions divided by all predictions, just take the mean\n accuracy += equality.type_as(torch.FloatTensor()).mean()\n\n return test_loss, accuracy\n\n\ndef train(model, trainloader, testloader, criterion, optimizer, epochs=5, print_every=40):\n \n steps = 0\n running_loss = 0\n for e in range(epochs):\n # Model in training mode, dropout is on\n model.train()\n for images, labels in trainloader:\n steps += 1\n \n # Flatten images into a 784 long vector\n images.resize_(images.size()[0], 784)\n \n optimizer.zero_grad()\n \n output = model.forward(images)\n loss = criterion(output, labels)\n loss.backward()\n optimizer.step()\n \n running_loss += loss.item()\n\n if steps % print_every == 0:\n # Model in inference mode, dropout is off\n model.eval()\n \n # Turn off gradients for validation, will speed up inference\n with torch.no_grad():\n test_loss, accuracy = validation(model, testloader, criterion)\n \n print(\"Epoch: {}/{}.. \".format(e+1, epochs),\n \"Training Loss: {:.3f}.. \".format(running_loss/print_every),\n \"Test Loss: {:.3f}.. \".format(test_loss/len(testloader)),\n \"Test Accuracy: {:.3f}\".format(accuracy/len(testloader)))\n \n running_loss = 0\n \n # Make sure dropout and grads are on for training\n model.train()\n" ]
[ [ "torch.nn.Linear", "torch.nn.Dropout", "torch.FloatTensor", "torch.no_grad", "torch.nn.functional.log_softmax", "torch.exp" ] ]
lauromoraes/promoter_paper
[ "62aea776cb318a13e142f84dd84bb0a29fb0e83f" ]
[ "mymodels/parent_models.py" ]
[ "#!/usr/bin/python\n# -*- encoding: utf-8 -*-\n\n\"\"\"\n@ide: PyCharm\n@author: Lauro Ângelo Gonçalves de Moraes\n@contact: [email protected]\n@created: 20/06/2020\n\"\"\"\nimport tensorflow as tf\nfrom tensorflow.keras import models\nfrom tensorflow.keras.layers import (\n Input,\n Embedding,\n Conv2D,\n Conv1D,\n MaxPooling1D,\n MaxPooling2D,\n AveragePooling1D,\n AveragePooling2D,\n Flatten,\n Dense,\n)\nfrom tensorflow.keras.optimizers import (Adam, Nadam, )\nfrom kerastuner import HyperModel\n\n\ndef conv_pool_block(input_tensor, n_filters=100, k_size=15, pad='same', p_size=2, p_stride=2, activ='relu'):\n x = input_tensor\n input_dim = tf.keras.backend.shape(x).shape[0]\n block1 = Conv2D(\n filters=n_filters,\n kernel_size=(k_size, input_dim),\n padding=pad,\n activation=activ)(x)\n block1 = MaxPooling2D(\n pool_size=(p_size, 1),\n strides=(p_stride, 1))(block1)\n output_tensor = block1\n return output_tensor\n\n\nclass BaseModel(object):\n def __init__(self, data_list, num_classes):\n self.num_classes = num_classes\n self.input_shapes = list()\n self.input_types = list()\n for d in data_list:\n self.input_shapes.append(d.shape()[1:])\n self.input_types.append(d.get_encode())\n self.num_branches = len(data_list)\n self.inputs = self.setup_input()\n self.inputs_tensors = list()\n self.outputs_tensors = list()\n\n def setup_input(self):\n inputs = list()\n for i, t in enumerate(self.input_types):\n # Setup input for this branch\n input_shape = self.input_shapes[i]\n # print('input_shape', input_shape)\n x = Input(shape=input_shape, name='Input_{}'.format(i))\n if self.input_types[i] == 'categorical':\n n_words = self.k ** 4\n emb_size = (n_words * 2) + 1\n x = Embedding(n_words, emb_size, input_length=input_shape[0])(x)\n inputs.append(x)\n self.inputs_tensors = inputs\n return inputs\n\n def build(self):\n raise NotImplementedError()\n\n\nclass BaseHyperModel(BaseModel, HyperModel):\n def __init__(self, data_list, num_classes):\n super(HyperModel, self).__init__()\n super(BaseModel, self).__init__(data_list, num_classes)\n\n def define_search_space(self):\n raise NotImplementedError()\n\n def build(self, hp):\n raise NotImplementedError()\n\n\nclass BaselineHotCNN(BaseModel):\n def __init__(self, data_list, num_classes):\n super(BaselineHotCNN, self).__init__(data_list, num_classes)\n\n def build(self):\n input_tensor = self.setup_input()[0]\n block1 = conv_pool_block(input_tensor, n_filters=100, k_size=15, pad='same', p_size=2, p_stride=2, activ='relu')\n block2 = conv_pool_block(block1, n_filters=250, k_size=17, pad='same', p_size=2, p_stride=2, activ='relu')\n\n # Flat tensors\n flat = Flatten()(block2)\n\n # Fully connected layers\n dense1 = Dense(128, activation='relu', name='fully_con')(flat)\n\n # Classification layer\n activ = 'sigmoid' if self.num_classes == 1 else 'softmax'\n output = Dense(self.num_classes, activation=activ, name='classification_layer')(dense1)\n self.outputs_tensors.append(output)\n\n # Create model object\n model = models.Model(inputs=self.inputs_tensors, outputs=self.outputs_tensors, name='Baseline_HotCNN_Bacillus')\n return model\n\n\n\ndef main():\n pass\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "tensorflow.keras.layers.Flatten", "tensorflow.keras.models.Model", "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.Conv2D", "tensorflow.keras.layers.Embedding", "tensorflow.keras.layers.MaxPooling2D", "tensorflow.keras.backend.shape" ] ]
TariniHardikar/OpenFermion
[ "1a1538c976d3c867c66c04a7b63766910ed73bf1" ]
[ "src/openfermion/ops/_quadratic_hamiltonian.py" ]
[ "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Class and functions to store and manipulate Hamiltonians that are quadratic\nin the fermionic ladder operators.\"\"\"\nfrom __future__ import absolute_import\nfrom scipy.linalg import schur\n\nimport numpy\n\nfrom openfermion.config import EQ_TOLERANCE\nfrom openfermion.ops import FermionOperator, PolynomialTensor\n\n\nclass QuadraticHamiltonianError(Exception):\n pass\n\n\nclass QuadraticHamiltonian(PolynomialTensor):\n \"\"\"Class for storing Hamiltonians that are quadratic in the fermionic\n ladder operators. The operators stored in this class take the form\n\n .. math::\n\n \\sum_{p, q} (M_{pq} - \\mu \\delta_{pq}) a^\\dagger_p a_q\n + \\\\frac12 \\sum_{p, q}\n (\\\\Delta_{pq} a^\\dagger_p a^\\dagger_q + \\\\text{h.c.})\n + \\\\text{constant}\n\n where\n\n - :math:`M` is a Hermitian `n_qubits` x `n_qubits` matrix.\n - :math:`\\\\Delta` is an antisymmetric `n_qubits` x `n_qubits` matrix.\n - :math:`\\mu` is a real number representing the chemical potential.\n - :math:`\\delta_{pq}` is the Kronecker delta symbol.\n\n We separate the chemical potential :math:`\\mu` from :math:`M` so that\n we can use it to adjust the expectation value of the total number of\n particles.\n\n Attributes:\n chemical_potential(float): The chemical potential :math:`\\mu`.\n \"\"\"\n\n def __init__(self, constant, hermitian_part,\n antisymmetric_part=None, chemical_potential=0.):\n \"\"\"\n Initialize the QuadraticHamiltonian class.\n\n Args:\n constant(float): A constant term in the operator.\n hermitian_part(ndarray): The matrix :math:`M`, which represents the\n coefficients of the particle-number-conserving terms.\n This is an `n_qubits` x `n_qubits` numpy array of complex\n numbers.\n antisymmetric_part(ndarray): The matrix :math:`\\\\Delta`,\n which represents the coefficients of the\n non-particle-number-conserving terms.\n This is an `n_qubits` x `n_qubits` numpy array of complex\n numbers.\n chemical_potential(float): The chemical potential :math:`\\mu`.\n \"\"\"\n n_qubits = hermitian_part.shape[0]\n\n # Initialize combined Hermitian part\n if not chemical_potential:\n combined_hermitian_part = hermitian_part\n else:\n combined_hermitian_part = (\n hermitian_part - chemical_potential * numpy.eye(n_qubits))\n\n # Initialize the PolynomialTensor\n if antisymmetric_part is None:\n super(QuadraticHamiltonian, self).__init__(\n {(): constant, (1, 0): combined_hermitian_part})\n else:\n super(QuadraticHamiltonian, self).__init__(\n {(): constant, (1, 0): combined_hermitian_part,\n (1, 1): 0.5 * antisymmetric_part,\n (0, 0): -0.5 * antisymmetric_part.conj()})\n\n # Add remaining attributes\n self.chemical_potential = chemical_potential\n\n @property\n def combined_hermitian_part(self):\n \"\"\"The Hermitian part including the chemical potential.\"\"\"\n return self.n_body_tensors[1, 0]\n\n @property\n def antisymmetric_part(self):\n \"\"\"The antisymmetric part.\"\"\"\n if (1, 1) in self.n_body_tensors:\n return 2. * self.n_body_tensors[1, 1]\n else:\n return numpy.zeros((self.n_qubits, self.n_qubits), complex)\n\n @property\n def hermitian_part(self):\n \"\"\"The Hermitian part not including the chemical potential.\"\"\"\n return (self.combined_hermitian_part +\n self.chemical_potential * numpy.eye(self.n_qubits))\n\n @property\n def conserves_particle_number(self):\n \"\"\"Whether this Hamiltonian conserves particle number.\"\"\"\n discrepancy = numpy.max(numpy.abs(self.antisymmetric_part))\n return discrepancy < EQ_TOLERANCE\n\n def add_chemical_potential(self, chemical_potential):\n \"\"\"Increase (or decrease) the chemical potential by some value.\"\"\"\n self.n_body_tensors[1, 0] -= (chemical_potential *\n numpy.eye(self.n_qubits))\n self.chemical_potential += chemical_potential\n\n def orbital_energies(self, non_negative=False):\n \"\"\"Return the orbital energies.\n\n Any quadratic Hamiltonian is unitarily equivalent to a Hamiltonian\n of the form\n\n .. math::\n\n \\sum_{j} \\\\varepsilon_j b^\\dagger_j b_j + \\\\text{constant}.\n\n We call the :math:`\\\\varepsilon_j` the orbital energies.\n The eigenvalues of the Hamiltonian are sums of subsets of the\n orbital energies (up to the additive constant).\n\n Args:\n non_negative(bool): If True, always return a list of orbital\n energies that are non-negative. This option is ignored if\n the Hamiltonian does not conserve particle number, in which\n case the returned orbital energies are always non-negative.\n\n Returns\n -------\n orbital_energies(ndarray)\n A one-dimensional array containing the :math:`\\\\varepsilon_j`\n constant(float)\n The constant\n \"\"\"\n if self.conserves_particle_number and not non_negative:\n hermitian_matrix = self.combined_hermitian_part\n orbital_energies, diagonalizing_unitary = numpy.linalg.eigh(\n hermitian_matrix)\n constant = self.constant\n else:\n majorana_matrix, majorana_constant = self.majorana_form()\n canonical, orthogonal = antisymmetric_canonical_form(\n majorana_matrix)\n orbital_energies = canonical[\n range(self.n_qubits), range(self.n_qubits, 2 * self.n_qubits)]\n constant = -0.5 * numpy.sum(orbital_energies) + majorana_constant\n\n return orbital_energies, constant\n\n def ground_energy(self):\n \"\"\"Return the ground energy.\"\"\"\n _, constant = self.orbital_energies(non_negative=True)\n return constant\n\n def majorana_form(self):\n \"\"\"Return the Majorana represention of the Hamiltonian.\n\n Any quadratic Hamiltonian can be written in the form\n\n .. math::\n\n \\\\frac{i}{2} \\sum_{j, k} A_{jk} f_j f_k + \\\\text{constant}\n\n where the :math:`f_i` are normalized Majorana fermion operators:\n\n .. math::\n\n f_j = \\\\frac{1}{\\\\sqrt{2}} (a^\\dagger_j + a_j)\n\n f_{j + N} = \\\\frac{i}{\\\\sqrt{2}} (a^\\dagger_j - a_j)\n\n and :math:`A` is a (2 * `n_qubits`) x (2 * `n_qubits`) real\n antisymmetric matrix. This function returns the matrix\n :math:`A` and the constant.\n \"\"\"\n hermitian_part = self.combined_hermitian_part\n antisymmetric_part = self.antisymmetric_part\n\n # Compute the Majorana matrix using block matrix manipulations\n majorana_matrix = numpy.zeros((2 * self.n_qubits, 2 * self.n_qubits))\n # Set upper left block\n majorana_matrix[:self.n_qubits, :self.n_qubits] = numpy.real(-0.5j * (\n hermitian_part - hermitian_part.conj() +\n antisymmetric_part - antisymmetric_part.conj()))\n # Set upper right block\n majorana_matrix[:self.n_qubits, self.n_qubits:] = numpy.real(0.5 * (\n hermitian_part + hermitian_part.conj() -\n antisymmetric_part - antisymmetric_part.conj()))\n # Set lower left block\n majorana_matrix[self.n_qubits:, :self.n_qubits] = numpy.real(-0.5 * (\n hermitian_part + hermitian_part.conj() +\n antisymmetric_part + antisymmetric_part.conj()))\n # Set lower right block\n majorana_matrix[self.n_qubits:, self.n_qubits:] = numpy.real(-0.5j * (\n hermitian_part - hermitian_part.conj() -\n antisymmetric_part + antisymmetric_part.conj()))\n\n # Compute the constant\n majorana_constant = (0.5 * numpy.real(numpy.trace(hermitian_part)) +\n self.n_body_tensors[()])\n\n return majorana_matrix, majorana_constant\n\n def diagonalizing_bogoliubov_transform(self):\n \"\"\"Compute the unitary that diagonalizes a quadratic Hamiltonian.\n\n Any quadratic Hamiltonian can be rewritten in the form\n\n .. math::\n\n \\sum_{j} \\\\varepsilon_j b^\\dagger_j b_j + \\\\text{constant},\n\n where the :math:`b_j` are a new set fermionic operators\n that satisfy the canonical anticommutation relations.\n The new fermionic operators are linear combinations of the\n original ones:\n\n .. math::\n\n \\\\begin{pmatrix}\n b^\\dagger_1 \\\\\\\\\n \\\\vdots \\\\\\\\\n b^\\dagger_N \\\\\\\\\n b_1 \\\\\\\\\n \\\\vdots \\\\\\\\\n b_N\n \\\\end{pmatrix}\n = W\n \\\\begin{pmatrix}\n a^\\dagger_1 \\\\\\\\\n \\\\vdots \\\\\\\\\n a^\\dagger_N \\\\\\\\\n a_1 \\\\\\\\\n \\\\vdots \\\\\\\\\n a_N\n \\\\end{pmatrix},\n\n where :math:`W` is a :math:`2N \\\\times 2N` unitary matrix.\n This method returns the matrix :math:`W`.\n\n Returns:\n diagonalizing_unitary (ndarray):\n A (2 * `n_qubits`) x (2 * `n_qubits`) matrix representing\n the transformation :math:`W` of the fermionic ladder operators.\n \"\"\"\n majorana_matrix, majorana_constant = self.majorana_form()\n\n # Get the orthogonal transformation that puts majorana_matrix\n # into canonical form\n canonical, orthogonal = antisymmetric_canonical_form(majorana_matrix)\n\n # Create the matrix that converts between fermionic ladder and\n # Majorana bases\n normalized_identity = (numpy.eye(self.n_qubits, dtype=complex) /\n numpy.sqrt(2.))\n majorana_basis_change = numpy.eye(\n 2 * self.n_qubits, dtype=complex) / numpy.sqrt(2.)\n majorana_basis_change[self.n_qubits:, self.n_qubits:] *= -1.j\n majorana_basis_change[:self.n_qubits,\n self.n_qubits:] = normalized_identity\n majorana_basis_change[self.n_qubits:,\n :self.n_qubits] = 1.j * normalized_identity\n\n # Compute the unitary and return\n diagonalizing_unitary = majorana_basis_change.T.conj().dot(\n orthogonal.dot(majorana_basis_change))\n\n return diagonalizing_unitary\n\n\ndef antisymmetric_canonical_form(antisymmetric_matrix):\n \"\"\"Compute the canonical form of an antisymmetric matrix.\n\n The input is a real, antisymmetric n x n matrix A, where n is even.\n Its canonical form is::\n\n A = R^T C R\n\n where R is a real, orthogonal matrix and C has the form::\n\n [ 0 D ]\n [ -D 0 ]\n\n where D is a diagonal matrix with nonnegative entries.\n\n Args:\n antisymmetric_matrix(ndarray): An antisymmetric matrix with even\n dimension.\n\n Returns:\n canonical(ndarray): The canonical form C of antisymmetric_matrix\n orthogonal(ndarray): The orthogonal transformation R.\n \"\"\"\n m, p = antisymmetric_matrix.shape\n\n if m != p or p % 2 != 0:\n raise ValueError('The input matrix must be square with even '\n 'dimension.')\n\n # Check that input matrix is antisymmetric\n matrix_plus_transpose = antisymmetric_matrix + antisymmetric_matrix.T\n maxval = numpy.max(numpy.abs(matrix_plus_transpose))\n if maxval > EQ_TOLERANCE:\n raise ValueError('The input matrix must be antisymmetric.')\n\n # Compute Schur decomposition\n canonical, orthogonal = schur(antisymmetric_matrix, output='real')\n\n # The returned form is block diagonal; we need to permute rows and columns\n # to put it into the form we want\n n = p // 2\n for i in range(1, n, 2):\n swap_rows(canonical, i, n + i - 1)\n swap_columns(canonical, i, n + i - 1)\n swap_columns(orthogonal, i, n + i - 1)\n if n % 2 != 0:\n swap_rows(canonical, n - 1, n + i)\n swap_columns(canonical, n - 1, n + i)\n swap_columns(orthogonal, n - 1, n + i)\n\n # Now we permute so that the upper right block is non-negative\n for i in range(n):\n if canonical[i, n + i] < -EQ_TOLERANCE:\n swap_rows(canonical, i, n + i)\n swap_columns(canonical, i, n + i)\n swap_columns(orthogonal, i, n + i)\n\n # Now we permute so that the nonzero entries are ordered by magnitude\n # We use insertion sort\n diagonal = canonical[range(n), range(n, 2 * n)]\n for i in range(n):\n # Insert the smallest element from the unsorted part of the list into\n # index i\n arg_min = numpy.argmin(diagonal[i:]) + i\n if arg_min != i:\n # Permute the upper right block\n swap_rows(canonical, i, arg_min)\n swap_columns(canonical, n + i, n + arg_min)\n swap_columns(orthogonal, n + i, n + arg_min)\n # Permute the lower left block\n swap_rows(canonical, n + i, n + arg_min)\n swap_columns(canonical, i, arg_min)\n swap_columns(orthogonal, i, arg_min)\n # Update diagonal\n swap_rows(diagonal, i, arg_min)\n\n return canonical, orthogonal.T\n\n\ndef swap_rows(M, i, j):\n \"\"\"Swap rows i and j of matrix M.\"\"\"\n if len(M.shape) == 1:\n M[i], M[j] = M[j], M[i]\n else:\n row_i = M[i, :].copy()\n row_j = M[j, :].copy()\n M[i, :], M[j, :] = row_j, row_i\n\n\ndef swap_columns(M, i, j):\n \"\"\"Swap columns i and j of matrix M.\"\"\"\n if len(M.shape) == 1:\n M[i], M[j] = M[j], M[i]\n else:\n column_i = M[:, i].copy()\n column_j = M[:, j].copy()\n M[:, i], M[:, j] = column_j, column_i\n" ]
[ [ "scipy.linalg.schur", "numpy.trace", "numpy.zeros", "numpy.argmin", "numpy.sum", "numpy.linalg.eigh", "numpy.eye", "numpy.abs", "numpy.sqrt" ] ]
vijayrgopu/neo4j-lib
[ "45a5abc43ee057ea0908fba0746727c36ab8f444" ]
[ "neo_lib.py" ]
[ "from contextlib import nullcontext\nimport pandas as pd\nfrom pprint import pprint\n\nfrom neo4j import GraphDatabase, basic_auth\nfrom py2neo import Graph\nempty_cq = \"\"\"\n// Your query goes here\n\n\"\"\"\n'''\nThis is a neo4j library 1.0\n'''\n\nclass Neo_lib:\n def __init__(self, neo_url, neo_user, neo_pwd, neo_database):\n self.driver = GraphDatabase.driver(neo_url, auth=basic_auth(neo_user, neo_pwd))\n self.neo_database = neo_database\n self.graph = Graph(neo_url, auth=(neo_user, neo_pwd))\n\n\n\n def run_cypher(self,cq,parameters=None,limit=10):\n try:\n tran = lambda tx: tx.run(cq,parameters=parameters,limit=limit).data()\n with self.driver.session(database=self.neo_database) as session:\n results = session.write_transaction(tran)\n except Exception as e:\n results = e.message \n return results\n\n def run_cypher_pd(self,cq,parameters=None):\n if cq == empty_cq:\n data = {'Empty CQ': [\"Please enter query and try again\"]}\n result_pd = pd.DataFrame.from_dict(data)\n else:\n try:\n result_pd = self.graph.run(cq,parameters).to_data_frame()\n except Exception as e:\n data = {'Message':[ e.message]}\n result_pd = pd.DataFrame.from_dict(data)\n return result_pd\n\n def reset_db(self):\n self.drop_constraints()\n cq = \"match (n) detach delete n\"\n return self.run_cypher(cq)\n \n\n def get_stats(self):\n cq = \"\"\"\n call apoc.meta.stats() yield labelCount, relTypeCount, propertyKeyCount, nodeCount, relCount\n with labelCount, relTypeCount, propertyKeyCount, nodeCount, relCount\n return labelCount, relTypeCount,propertyKeyCount, nodeCount,relCount\n \"\"\"\n return self.run_cypher_pd(cq)\n\n def checksum(self):\n cq = \"\"\"\n call apoc.meta.stats() yield labelCount, relTypeCount, propertyKeyCount, nodeCount, relCount\n with labelCount, relTypeCount, propertyKeyCount, nodeCount, relCount\n return labelCount+relTypeCount+propertyKeyCount+nodeCount+relCount as checksum\n \"\"\"\n res = self.run_cypher(cq)\n return res[0]['checksum']\n\n def node_count(self):\n cq = \"\"\"\n match (n) return count(n) as count\n \"\"\"\n return self.run_cypher_pd(cq)\n\n\n def get_stats_all(self):\n cq = \"\"\"\n call apoc.meta.stats()\n \"\"\"\n return self.run_cypher_pd(cq)\n\n\n def schema_view(self):\n cq = \"CALL db.schema.visualization()\"\n print (\"Run {} in Neo4j Browser to see a graphical view\".format(cq))\n return self.run_cypher(cq)\n\n\n def label_count(self):\n result = {\"Label\": [], \"Count\": []}\n for label in self.graph.run(\"CALL db.labels()\").to_series():\n query = f\"MATCH (:`{label}`) RETURN count(*) AS count\"\n count = self.graph.run(query).to_data_frame().iloc[0]['count']\n result[\"Label\"].append(label)\n result[\"Count\"].append(count)\n nodes_df = pd.DataFrame(data=result)\n return nodes_df\n\n def relationship_count(self):\n result = {\"From\":[], \"Relationship\": [], \"To\":[], \"Count\": []}\n x = self.schema_view()\n y = x[0]['relationships']\n for i in y:\n rel = i[1]\n query = f\"MATCH ()-[r:`{rel}`]-() RETURN count(r) AS count\"\n count = self.graph.run(query).to_data_frame().iloc[0]['count']\n result[\"From\"].append(i[0]['name'])\n result[\"Relationship\"].append(rel)\n result[\"To\"].append(i[2]['name'])\n result[\"Count\"].append(count)\n rels_df = pd.DataFrame(data=result)\n return rels_df\n\n def drop_constraints(self):\n cq = \"SHOW CONSTRAINTS\"\n x = self.run_cypher(cq)\n for c in x:\n cq = \"drop constraint \" + c[\"name\"]\n print(\"Dropping Constraint \", c[\"name\"])\n self.run_cypher(cq)" ]
[ [ "pandas.DataFrame.from_dict", "pandas.DataFrame" ] ]
wsustcid/FlowDriveNet
[ "3604495269ae45e5b43964046104f685ec66e383" ]
[ "eval.py" ]
[ "'''\n@Author: Shuai Wang\n@Github: https://github.com/wsustcid\n@Version: 1.0.0\n@Date: 2020-09-11 23:42:23\n@LastEditTime: 2020-10-13 22:32:20\n'''\n\nimport os\nimport sys\nimport argparse\nfrom datetime import datetime\nimport time\nfrom tqdm import tqdm\nimport time\n\nimport numpy as np\nimport tensorflow as tf\n\nbase_dir = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(base_dir)\nfrom utils.tf_util import log_string\n\nfrom data_gen import DataLoader\nfrom models.flowdrivenet import FlowDriveNet\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--data_root', default='/media/ubuntu16/Documents/Datasets/Udacity/CH2',\n help='data_root path [default: local path]')\nparser.add_argument('--input_cfg', default='GRAY', \n help='Input type: GRAY, GRAYF, GRAYF-T, XYZ, XYZF, XYZF-T, GRAYF-XYZF-T')\nparser.add_argument('--model_cfg', default='VFE',\n help='Model type: VFE, VFE-TFP, PFE, PFE-TFP, VFE-PFE-TFP')\nparser.add_argument('--height', type=int, default=200, help='img height')\nparser.add_argument('--width', type=int, default=200, help='img width')\nparser.add_argument('--seq_len', type=int, default=5, help='sel length')\nparser.add_argument('--aug_cfg', default='None', help='None, IA, RP, SC, BA, BS')\n#parser.add_argument('--use_side_cam', default=False, action='store_true')\nparser.add_argument('--num_point', type=int, default=10000, help='Point N')\nparser.add_argument('--log_dir', default='test',\n help='Log dir [default: test]')\nparser.add_argument('--batch_size', type=int, default=1,\n help='Batch Size during training [default: 16]')\nparser.add_argument('--decay_steps', type=int, default=300000,\n help='Decay step for lr decay [default: 200000]') # decay_steps = n_train * epochs\nparser.add_argument('--decay_rate', type=float, default=0.7,\n help='Decay rate for lr decay [default: 0.7]')\nparser.add_argument('--model_file', default='/media/ubuntu16/F/FlowDriveNet/logs/VFE/gray_base/model_best.ckpt',\n help='the model path to be evaluated')\n\n\nFLAGS = parser.parse_args()\n\nBATCH_SIZE = FLAGS.batch_size\n\nlog_dir = os.path.join(base_dir, 'logs', FLAGS.log_dir)\nos.makedirs(log_dir, exist_ok=True)\ntest_log_dir = os.path.join(log_dir, 'log_test.txt')\nlog_string(test_log_dir, str(FLAGS)+'\\n')\n\n# \ndataloader = DataLoader(FLAGS.data_root, FLAGS.input_cfg, \n FLAGS.height, FLAGS.width,\n FLAGS.seq_len, \n FLAGS.num_point,\n FLAGS.aug_cfg)\nmodel = FlowDriveNet(FLAGS.input_cfg, FLAGS.model_cfg, \n FLAGS.height, FLAGS.width, FLAGS.seq_len, FLAGS.num_point)\n\ndef get_bn_decay(batch):\n bn_momentum = tf.train.exponential_decay(\n 0.5,\n batch*BATCH_SIZE,\n float(FLAGS.decay_steps),\n 0.5,\n staircase=True)\n bn_decay = tf.minimum(0.99, 1 - bn_momentum)\n return bn_decay\n\ndef eval():\n with tf.Graph().as_default():\n image_pl, points_pl, _ = model.get_inputs_pl(BATCH_SIZE)\n is_training_pl = tf.placeholder(tf.bool, shape=())\n # define global_step; optimizer will increase it in every training loop\n batch = tf.get_variable('batch', [], \n initializer=tf.constant_initializer(0),\n trainable=False)\n bn_decay = get_bn_decay(batch) \n \n pred = model.get_model(image_pl, points_pl, is_training_pl, bn_decay)\n\n # Create a session\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n config.allow_soft_placement = True\n config.log_device_placement = False\n sess = tf.Session(config=config)\n\n # Init variables\n init = tf.global_variables_initializer()\n sess.run(init)\n \n # restore model\n saver = tf.train.Saver()\n saver.restore(sess, FLAGS.model_file)\n\n # save all tensor\n ops = {'image_pl': image_pl,\n 'points_pl': points_pl,\n 'is_training_pl': is_training_pl,\n 'pred': pred}\n\n ## evaluation \n is_training = False\n num_batches = dataloader.num_test // BATCH_SIZE\n rmse_angle_sum = 0.0\n rmse_speed_sum = 0.0\n result_all = np.zeros((0,4)) # pred_a, pred_s, label_a, label_s\n \n time_sum = 0.0\n for i in tqdm(range(num_batches)):\n X_image_batch, X_cloud_batch, y_batch = dataloader.load_test_batch(BATCH_SIZE)\n \n feed_dict = {ops['image_pl']: X_image_batch,\n ops['points_pl']: X_cloud_batch,\n ops['is_training_pl']: is_training}\n t1 = time.time()\n pred_batch = sess.run(ops['pred'],feed_dict=feed_dict)\n t2 = time.time()\n time_sum += (t2-t1) \n result_batch = np.hstack((pred_batch, y_batch))\n result_all = np.concatenate((result_all, result_batch), axis=0)\n \n \n np.savetxt(os.path.join(log_dir, 'results.csv'), result_all, delimiter=\",\")\n # b = np.loadtxt(\"temp.csv\", delimiter=\",\")\n\n rmse_angle = np.sqrt(np.mean(np.square(result_all[:,0] - result_all[:,2])))\n rmse_speed = np.sqrt(np.mean(np.square(result_all[:,1] - result_all[:,3])))\n log_string(test_log_dir, 'Test rmse_angle: %f' % (rmse_angle))\n log_string(test_log_dir, 'Test rmse_speed: %f' % (rmse_speed))\n log_string(test_log_dir, 'Test rmse_average: %f' % ((rmse_angle+rmse_speed)/2))\n log_string(test_log_dir, 'Test FPS: %f' % (1/(time_sum/num_batches)))\n\n\nif __name__ == \"__main__\":\n eval()\n" ]
[ [ "numpy.concatenate", "numpy.square", "tensorflow.minimum", "tensorflow.constant_initializer", "numpy.zeros", "tensorflow.Graph", "tensorflow.Session", "tensorflow.train.Saver", "tensorflow.ConfigProto", "tensorflow.placeholder", "numpy.hstack", "tensorflow.global_variables_initializer" ] ]
flowmatters/veneer-py
[ "af551b49038f5f93358b510fb893015c590bf6d4" ]
[ "veneer/losses.py" ]
[ "from types import MethodType\nimport pandas as pd\nimport numpy as np\nfrom .server_side import VeneerNetworkElementActions\nfrom .utils import _quote_string\n\nGET_LOSS_TABLE_SCRIPTLET='''\nignoreExceptions=False\nfn = target.lossFct\nfor row in fn:\n result.append((row.Key,row.Value))\n'''\n\nclass VeneerLossNodeActions(VeneerNetworkElementActions):\n def __init__(self,node_actions):\n self.node_actions = node_actions\n self._name_accessor = self.node_actions._name_accessor\n super(VeneerLossNodeActions, self).__init__(node_actions._ironpy)\n def _build_accessor(self, parameter=None, nodes=None):\n return self.node_actions._build_accessor(parameter,nodes=nodes,node_types='LossNodeModel')\n\n def loss_table(self,node):\n '''\n Retrieve the Loss table for a given loss node\n '''\n code = GET_LOSS_TABLE_SCRIPTLET\n vals = self.apply(code,init='[]',nodes=[node])\n return pd.DataFrame(vals,columns=['inflow','loss'])\n" ]
[ [ "pandas.DataFrame" ] ]
rudolfspetrovs/benchml
[ "896673f387a6bb9b185664ddd54f569a1ba54e51" ]
[ "benchml/models/mod_basic.py" ]
[ "import numpy as np\n\nimport benchml.transforms as btf\nfrom benchml.hyper import BayesianHyper, GridHyper, Hyper\nfrom benchml.models.mod_dscribe import compile_dscribe, compile_dscribe_periodic\n\n\ndef compile_null(**kwargs):\n return []\n\n\ndef compile_physchem(custom_fields=None, with_hyper=False, **kwargs):\n if custom_fields is None:\n custom_fields = []\n if with_hyper:\n hyper = BayesianHyper(\n Hyper(\n {\n \"pred.n_estimators\": [10, 200],\n \"pred.max_depth\": [2, 16],\n }\n ),\n convert={\"pred.n_estimators\": \"lambda x: int(x)\", \"pred.max_depth\": \"lambda x: int(x)\"},\n init_points=10,\n n_iter=30,\n )\n else:\n hyper = GridHyper(Hyper({\"pred.max_depth\": [None]}))\n return [\n btf.Module(\n tag=\"physchem\",\n transforms=[\n btf.ExtXyzInput(tag=\"input\"),\n btf.Physchem2D(tag=\"Physchem2D\", inputs={\"configs\": \"input.configs\"}),\n btf.PhyschemUser(\n tag=\"PhyschemUser\",\n args={\"fields\": custom_fields},\n inputs={\"configs\": \"input.configs\"},\n ),\n btf.Concatenate(tag=\"desc\", inputs={\"X\": [\"Physchem2D.X\", \"PhyschemUser.X\"]}),\n btf.RandomForestRegressor(tag=\"pred\", inputs={\"X\": \"desc.X\", \"y\": \"input.y\"}),\n ],\n hyper=hyper,\n broadcast={\"meta\": \"input.meta\"},\n outputs={\"y\": \"pred.y\"},\n ),\n ]\n\n\ndef make_soap_krr(tag):\n return btf.Module(\n tag=tag,\n transforms=[\n btf.ExtXyzInput(tag=\"input\"),\n btf.UniversalSoapGylmxx(tag=\"descriptor\", inputs={\"configs\": \"input.configs\"}),\n btf.ReduceTypedMatrix(tag=\"reduce\", inputs={\"X\": \"descriptor.X\", \"T\": \"descriptor.T\"}),\n btf.WhitenMatrix(tag=\"whiten\", inputs={\"X\": \"reduce.X\"}),\n btf.KernelDot(tag=\"kernel\", inputs={\"X\": \"whiten.X\"}),\n btf.KernelRidge(\n tag=\"predictor\", args={\"alpha\": None}, inputs={\"K\": \"kernel.K\", \"y\": \"input.y\"}\n ),\n ],\n hyper=GridHyper(\n Hyper(\n {\n \"predictor.alpha\": np.logspace(-7, +7, 15),\n }\n )\n ),\n broadcast={\"meta\": \"input.meta\"},\n outputs={\"y\": \"predictor.y\"},\n )\n\n\ndef compile_soap(basic=False, **kwargs):\n if basic:\n hyper = GridHyper(\n Hyper({\"descriptor.normalize\": [False]}),\n Hyper({\"descriptor.mode\": [\"minimal\"]}),\n Hyper({\"descriptor.crossover\": [True]}),\n Hyper({\"reduce.reduce_op\": [\"sum\"]}),\n Hyper({\"reduce.normalize\": [True]}),\n Hyper({\"reduce.reduce_by_type\": [False]}),\n Hyper({\"whiten.centre\": [False]}),\n Hyper({\"whiten.scale\": [False]}),\n Hyper({\"predictor.power\": [2]}),\n )\n else:\n hyper = GridHyper(\n Hyper({\"descriptor.normalize\": [True]}),\n Hyper({\"descriptor.mode\": [\"minimal\", \"smart\", \"longrange\"]}),\n Hyper({\"descriptor.crossover\": [False, True]}),\n Hyper({\"reduce.reduce_op\": [\"mean\"]}), # + \"sum\"\n Hyper({\"reduce.normalize\": [True]}),\n Hyper({\"reduce.reduce_by_type\": [False]}), # + True\n Hyper({\"whiten.centre\": [False]}), # + True\n Hyper({\"whiten.scale\": [False]}), # + True\n Hyper({\"predictor.power\": [2]}),\n )\n models = []\n for hidx, updates in enumerate(hyper):\n model = make_soap_krr(tag=\"soap_krr_%02d\" % hidx)\n model.hyperUpdate(updates)\n models.append(model)\n return models\n\n\ndef compile_morgan_krr(**kwargs):\n return [\n btf.Module(\n tag=\"morgan_krr\",\n transforms=[\n btf.ExtXyzInput(tag=\"input\"),\n btf.MorganFP(\n tag=\"desc\",\n args={\"length\": 4096, \"radius\": 2, \"normalize\": True},\n inputs={\"configs\": \"input.configs\"},\n ),\n btf.KernelDot(tag=\"kern\", inputs={\"X\": \"desc.X\"}),\n btf.KernelRidge(\n args={\"alpha\": 1e-5, \"power\": 2}, inputs={\"K\": \"kern.K\", \"y\": \"input.y\"}\n ),\n ],\n hyper=GridHyper(\n Hyper(\n {\n \"KernelRidge.alpha\": np.logspace(-6, +1, 8),\n }\n ),\n Hyper({\"KernelRidge.power\": [2.0]}),\n ),\n broadcast={\"meta\": \"input.meta\"},\n outputs={\"y\": \"KernelRidge.y\"},\n )\n ]\n\n\ndef compile_morgan(**kwargs):\n return [\n # Macro example\n # >>> Module(\n # >>> tag=\"morgan_krrx2\",\n # >>> transforms=[\n # >>> ExtXyzInput(tag=\"input\"),\n # >>> MorganKernel(\n # >>> tag=\"A\",\n # >>> args={\"x.fp_length\": 1024, \"x.fp_radius\": 2},\n # >>> inputs={\"x.configs\": \"input.configs\"}),\n # >>> MorganKernel(\n # >>> tag=\"B\",\n # >>> args={\"x.fp_length\": 2048, \"x.fp_radius\": 4},\n # >>> inputs={\"x.configs\": \"input.configs\"}),\n # >>> Add(\n # >>> args={\"coeffs\": [ 0.5, 0.5 ]},\n # >>> inputs={\"X\": [\"A/k.K\", \"B/k.K\"]}),\n # >>> KernelRidge(\n # >>> args={\"alpha\": 0.1, \"power\": 2},\n # >>> inputs={\"K\": \"Add.y\", \"y\": \"input.y\"})\n # >>> ],\n # >>> hyper=BayesianHyper(\n # >>> Hyper({ \"Add.coeffs\":\n # >>> list(map(lambda f: [ f, 1.-f ], np.linspace(0.25, 0.75, 3)))\n # >>> }),\n # >>> Hyper({ \"KernelRidge.alpha\":\n # >>> np.linspace(-3,+1, 5),\n # >>> }),\n # >>> n_iter=40,\n # >>> init_points=10,\n # >>> convert={\n # >>> \"KernelRidge.alpha\": lambda p: 10**p}),\n # >>> broadcast={ \"meta\": \"input.meta\" },\n # >>> outputs={ \"y\": \"KernelRidge.y\" },\n # >>> ),\n btf.Module(\n tag=\"morgan_krr_ext\",\n transforms=[\n btf.ExtXyzInput(tag=\"input\"),\n btf.MorganFP(\n tag=\"desc\",\n args={\"length\": 4096, \"radius\": 2},\n inputs={\"configs\": \"input.configs\"},\n ),\n btf.KernelDot(tag=\"kern\", inputs={\"X\": \"desc.X\"}),\n btf.KernelRidge(\n args={\"alpha\": 1e-5, \"power\": 2}, inputs={\"K\": \"kern.K\", \"y\": \"input.y\"}\n ),\n ],\n hyper=GridHyper(\n Hyper({\"desc.radius\": [1, 2, 3, 4]}),\n Hyper(\n {\n \"KernelRidge.alpha\": np.logspace(-5, +1, 7),\n }\n ),\n Hyper({\"KernelRidge.power\": [2.0]}),\n ),\n # >>> hyper=BayesianHyper(\n # >>> Hyper({ \"KernelRidge.alpha\": np.linspace(-3,+1, 5), }),\n # >>> Hyper({ \"KernelRidge.power\": [ 1., 4. ] }),\n # >>> n_iter=40,\n # >>> init_points=10,\n # >>> convert={\n # >>> \"KernelRidge.alpha\": \"lambda p: 10**p\"\n # >>> }),\n broadcast={\"meta\": \"input.meta\"},\n outputs={\"y\": \"KernelRidge.y\"},\n ),\n btf.Module(\n tag=\"morgan_ridge\",\n transforms=[\n btf.ExtXyzInput(tag=\"input\"),\n btf.MorganFP(args={\"length\": 2048}, inputs={\"configs\": \"input.configs\"}),\n btf.Ridge(inputs={\"X\": \"MorganFP.X\", \"y\": \"input.y\"}),\n ],\n hyper=BayesianHyper(\n Hyper({\"Ridge.alpha\": np.linspace(-2, 2, 5)}),\n convert={\"Ridge.alpha\": \"lambda p: 10**p\"},\n ),\n outputs={\"y\": \"Ridge.y\"},\n ),\n btf.Module(\n tag=\"morgan_gb\",\n transforms=[\n btf.ExtXyzInput(tag=\"input\"),\n btf.MorganFP(args={\"length\": 2048}, inputs={\"configs\": \"input.configs\"}),\n btf.GradientBoosting(inputs={\"X\": \"MorganFP.X\", \"y\": \"input.y\"}),\n ],\n hyper=GridHyper(Hyper({\"GradientBoosting.max_depth\": [1, 3, 5]})),\n outputs={\"y\": \"GradientBoosting.y\"},\n ),\n ]\n\n\ndef compile_gylm_match(**kwargs):\n return [\n btf.Module(\n tag=\"gylm_smooth_match\",\n transforms=[\n btf.ExtXyzInput(tag=\"input\"),\n btf.GylmAtomic(tag=\"desc\", inputs={\"configs\": \"input.configs\"}),\n btf.KernelSmoothMatch(inputs={\"X\": \"desc.X\"}),\n btf.KernelRidge(\n args={\"alpha\": 1e-5, \"power\": 2},\n inputs={\"K\": \"KernelSmoothMatch.K\", \"y\": \"input.y\"},\n ),\n ],\n hyper=GridHyper(\n Hyper(\n {\n \"KernelRidge.alpha\": np.logspace(-5, +1, 7),\n }\n ),\n Hyper({\"KernelRidge.power\": [2.0]}),\n ),\n broadcast={\"meta\": \"input.meta\"},\n outputs={\"y\": \"KernelRidge.y\"},\n ),\n ]\n\n\ndef compile_gylm(**kwargs):\n return [\n btf.Module(\n tag=\"gylm\",\n transforms=[\n btf.ExtXyzInput(tag=\"input\"),\n btf.GylmAverage(tag=\"desc\", inputs={\"configs\": \"input.configs\"}),\n btf.KernelDot(inputs={\"X\": \"desc.X\"}),\n btf.KernelRidge(\n args={\"alpha\": 1e-5, \"power\": 2}, inputs={\"K\": \"KernelDot.K\", \"y\": \"input.y\"}\n ),\n ],\n hyper=BayesianHyper(\n Hyper(\n {\n \"KernelRidge.alpha\": np.linspace(-5, +1, 7),\n }\n ),\n Hyper({\"KernelRidge.power\": [1.0, 4.0]}),\n init_points=10,\n n_iter=30,\n convert={\"KernelRidge.alpha\": \"lambda p: 10**p\"},\n ),\n broadcast={\"meta\": \"input.meta\"},\n outputs={\"y\": \"KernelRidge.y\"},\n ),\n ]\n\n\ndef compile_gylm_grid(**kwargs):\n return [\n btf.Module(\n tag=\"gylm_grid\",\n transforms=[\n btf.ExtXyzInput(tag=\"input\"),\n btf.GylmAverage(tag=\"desc\", inputs={\"configs\": \"input.configs\"}),\n btf.KernelDot(inputs={\"X\": \"desc.X\"}),\n btf.KernelRidge(\n args={\"alpha\": 1e-5, \"power\": 2}, inputs={\"K\": \"KernelDot.K\", \"y\": \"input.y\"}\n ),\n ],\n hyper=GridHyper(\n Hyper(\n {\n \"KernelRidge.alpha\": np.logspace(-5, +1, 7),\n }\n ),\n Hyper({\"KernelRidge.power\": [2.0]}),\n init_points=10,\n n_iter=30,\n convert={\"KernelRidge.alpha\": \"lambda p: 10**p\"},\n ),\n broadcast={\"meta\": \"input.meta\"},\n outputs={\"y\": \"KernelRidge.y\"},\n ),\n ]\n\n\ndef register_all():\n return {\n \"dscribe\": compile_dscribe,\n \"dscribe_periodic\": compile_dscribe_periodic,\n \"ecfp\": compile_morgan,\n \"gylm\": compile_gylm,\n \"gylm_match\": compile_gylm_match,\n \"gylm_grid\": compile_gylm_grid,\n \"morgan_krr\": compile_morgan_krr,\n \"null\": compile_null,\n \"physchem\": compile_physchem,\n \"soap\": compile_soap,\n }\n" ]
[ [ "numpy.linspace", "numpy.logspace" ] ]
catnlp/metaLSTM
[ "08b3086ebc558b936898022dd7eea7d726e6d491" ]
[ "NER/Module/crf.py" ]
[ "# encoding:utf-8\n'''\n@Author: catnlp\n@Email: [email protected]\n@Time: 2018/5/2 15:02\n'''\nimport torch\nimport torch.autograd as autograd\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nSTART_TAG = -2\nSTOP_TAG = -1\n\ndef log_sum_exp(vec, m_size):\n _, idx = torch.max(vec, 1)\n max_score = torch.gather(vec, 1, idx.view(-1, 1, m_size)).view(-1, 1, m_size)\n return max_score.view(-1, m_size) + torch.log(torch.sum(torch.exp(vec - max_score.expand_as(vec)), 1)).view(-1, m_size)\n\nclass CRF(nn.Module):\n def __init__(self, tagset_size, gpu):\n super(CRF, self).__init__()\n print('---build batched CRF---')\n self.tagset_size = tagset_size\n self.gpu = gpu\n\n init_transitions = torch.zeros(self.tagset_size+2, self.tagset_size+2)\n init_transitions[:, START_TAG] = -1000.0\n init_transitions[STOP_TAG, :] = -1000.0\n if gpu:\n init_transitions = init_transitions.cuda()\n self.transitions = nn.Parameter(init_transitions)\n\n def _calculate_PZ(self, feats, mask):\n batch_size = feats.size(0)\n seq_len = feats.size(1)\n tag_size = feats.size(2)\n assert(tag_size == self.tagset_size+2)\n mask = mask.transpose(1, 0).contiguous()\n ins_num = seq_len * batch_size\n\n feats = feats.transpose(1, 0).contiguous().view(ins_num, 1, tag_size).expand(ins_num, tag_size, tag_size)\n scores = feats + self.transitions.view(1, tag_size, tag_size).expand(ins_num, tag_size, tag_size)\n scores = scores.view(seq_len, batch_size, tag_size, tag_size)\n\n seq_iter = enumerate(scores)\n _, inivalues = seq_iter.__next__()\n\n partition = inivalues[:, START_TAG, :].clone().view(batch_size, tag_size, 1)\n for idx, cur_values in seq_iter:\n cur_values = cur_values + partition.contiguous().view(batch_size, tag_size, 1).expand(batch_size, tag_size, tag_size)\n cur_partition = log_sum_exp(cur_values, tag_size)\n\n mask_idx = mask[idx, :].view(batch_size, 1).expand(batch_size, tag_size)\n masked_cur_partition = cur_partition.masked_select(mask_idx)\n mask_idx = mask_idx.contiguous().view(batch_size, tag_size, 1)\n\n partition.masked_scatter_(mask_idx, masked_cur_partition)\n cur_values = self.transitions.view(1, tag_size, tag_size).expand(batch_size, tag_size, tag_size) + partition.contiguous().view(batch_size, tag_size, 1).expand(batch_size, tag_size, tag_size)\n cur_partition = log_sum_exp(cur_values, tag_size)\n final_partition = cur_partition[:, STOP_TAG]\n return final_partition.sum(), scores\n\n def viterbi_decode(self, feats, mask):\n batch_size = feats.size(0)\n seq_len = feats.size(1)\n tag_size = feats.size(2)\n assert(tag_size == self.tagset_size+2)\n\n length_mask = torch.sum(mask, dim=1).view(batch_size, 1).long()\n mask = mask.transpose(1, 0).contiguous()\n ins_num = seq_len * batch_size\n\n feats = feats.transpose(1, 0).contiguous().view(ins_num, 1, tag_size).expand(ins_num, tag_size, tag_size)\n scores = feats + self.transitions.view(1, tag_size, tag_size).expand(ins_num, tag_size, tag_size)\n scores = scores.view(seq_len, batch_size, tag_size, tag_size)\n\n seq_iter = enumerate(scores)\n back_points = list()\n partition_history = list()\n mask = (1 - mask.long()).byte()\n _, inivalues = seq_iter.__next__()\n partition = inivalues[:, START_TAG, :].clone().view(batch_size, tag_size)\n partition_history.append(partition)\n for idx, cur_values in seq_iter:\n cur_values = cur_values + partition.contiguous().view(batch_size, tag_size, 1).expand(batch_size, tag_size, tag_size)\n partition, cur_bp = torch.max(cur_values, 1)\n partition_history.append(partition)\n\n cur_bp.masked_fill_(mask[idx].view(batch_size, 1).expand(batch_size, tag_size), 0)\n back_points.append(cur_bp)\n partition_history = torch.cat(partition_history, 0)\n partition_history = partition_history.view(seq_len, batch_size, -1).transpose(1, 0).contiguous()\n last_position = length_mask.view(batch_size, 1, 1).expand(batch_size, 1, tag_size) - 1\n last_partition = torch.gather(partition_history, 1, last_position).view(batch_size, tag_size, 1)\n last_values = last_partition.expand(batch_size, tag_size, tag_size) + self.transitions.view(1, tag_size, tag_size).expand(batch_size, tag_size, tag_size)\n _, last_bp = torch.max(last_values, 1)\n pad_zero = autograd.Variable(torch.zeros(batch_size, tag_size)).long()\n if self.gpu:\n pad_zero = pad_zero.cuda()\n back_points.append(pad_zero)\n back_points = torch.cat(back_points).view(seq_len, batch_size, tag_size)\n\n pointer = last_bp[:, STOP_TAG]\n insert_last = pointer.contiguous().view(batch_size, 1, 1).expand(batch_size, 1, tag_size)\n back_points = back_points.transpose(1, 0).contiguous()\n back_points.scatter_(1, last_position, insert_last)\n back_points = back_points.transpose(1, 0).contiguous()\n\n decode_idx = autograd.Variable(torch.LongTensor(seq_len, batch_size))\n if self.gpu:\n decode_idx = decode_idx.cuda()\n decode_idx[-1] = pointer.data\n for idx in range(len(back_points)-2, -1, -1):\n pointer = torch.gather(back_points[idx], 1, pointer.contiguous().view(batch_size, 1))\n decode_idx[idx] = pointer.data\n path_score = None\n decode_idx = decode_idx.transpose(1, 0)\n return path_score, decode_idx\n\n def forward(self, feats, mask):\n path_score, best_path = self._viterbi_decode(feats, mask)\n return path_score, best_path\n\n def _score_sentence(self, scores, tags, mask):\n batch_size = scores.size(1)\n seq_len = scores.size(0)\n tag_size = scores.size(2)\n\n new_tags = autograd.Variable(torch.LongTensor(batch_size, seq_len))\n if self.gpu:\n new_tags = new_tags.cuda()\n for idx in range(seq_len):\n if idx == 0:\n new_tags[:, 0] = (tag_size - 2) * tag_size + tags[:, 0]\n else:\n new_tags[:, idx] = tags[:, idx-1] * tag_size + tags[:, idx]\n\n end_transition = self.transitions[:, STOP_TAG].contiguous().view(1, tag_size).expand(batch_size, tag_size)\n length_mask = torch.sum(mask, dim=1).view(batch_size, 1).long()\n end_ids = torch.gather(tags, 1, length_mask-1)\n\n end_energy = torch.gather(end_transition, 1, end_ids)\n\n new_tags = new_tags.transpose(1, 0).contiguous().view(seq_len, batch_size, 1)\n tg_energy = torch.gather(scores.view(seq_len, batch_size, -1), 2, new_tags).view(seq_len, batch_size)\n tg_energy = tg_energy.masked_select(mask.transpose(1, 0))\n\n gold_score = tg_energy.sum() + end_energy.sum()\n return gold_score\n\n def neg_log_likelihood_loss(self, feats, tags, mask):\n forward_score, scores = self._calculate_PZ(feats, mask)\n gold_score = self._score_sentence(scores, tags, mask)\n return forward_score - gold_score\n" ]
[ [ "torch.zeros", "torch.cat", "torch.gather", "torch.max", "torch.nn.Parameter", "torch.LongTensor", "torch.sum" ] ]
hjc3613/simpletransformers
[ "bce58639f3fa8f45f445b053b5aaae428c3c5429" ]
[ "simpletransformers/classification/classification_model.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n\nfrom __future__ import absolute_import, division, print_function\n\nimport json\nimport logging\nimport math\nimport os\nimport random\nimport warnings\nfrom multiprocessing import cpu_count\n\nimport numpy as np\nfrom scipy.stats import mode, pearsonr\nfrom sklearn.metrics import (\n confusion_matrix,\n label_ranking_average_precision_score,\n matthews_corrcoef,\n mean_squared_error,\n)\nfrom tqdm.auto import tqdm, trange\n\nimport pandas as pd\nimport torch\nfrom simpletransformers.classification.classification_utils import InputExample, convert_examples_to_features\nfrom simpletransformers.classification.transformer_models.albert_model import AlbertForSequenceClassification\nfrom simpletransformers.classification.transformer_models.bert_model import BertForSequenceClassification\nfrom simpletransformers.classification.transformer_models.camembert_model import CamembertForSequenceClassification\nfrom simpletransformers.classification.transformer_models.distilbert_model import DistilBertForSequenceClassification\nfrom simpletransformers.classification.transformer_models.flaubert_model import FlaubertForSequenceClassification\nfrom simpletransformers.classification.transformer_models.roberta_model import RobertaForSequenceClassification\nfrom simpletransformers.classification.transformer_models.xlm_model import XLMForSequenceClassification\nfrom simpletransformers.classification.transformer_models.xlm_roberta_model import XLMRobertaForSequenceClassification\nfrom simpletransformers.classification.transformer_models.xlnet_model import XLNetForSequenceClassification\nfrom simpletransformers.config.global_args import global_args\nfrom simpletransformers.custom_models.models import ElectraForSequenceClassification\nfrom tensorboardX import SummaryWriter\nfrom torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset\nfrom torch.utils.data.distributed import DistributedSampler\nfrom transformers import (\n WEIGHTS_NAME,\n AdamW,\n AlbertConfig,\n AlbertTokenizer,\n BertConfig,\n BertTokenizer,\n CamembertConfig,\n CamembertTokenizer,\n DistilBertConfig,\n DistilBertTokenizer,\n ElectraConfig,\n ElectraTokenizer,\n FlaubertConfig,\n FlaubertTokenizer,\n RobertaConfig,\n RobertaTokenizer,\n XLMConfig,\n XLMRobertaConfig,\n XLMRobertaTokenizer,\n XLMTokenizer,\n XLNetConfig,\n XLNetTokenizer,\n get_linear_schedule_with_warmup,\n)\n\ntry:\n import wandb\n\n wandb_available = True\nexcept ImportError:\n wandb_available = False\n\nlogger = logging.getLogger(__name__)\n\n\nclass ClassificationModel:\n def __init__(\n self, model_type, model_name, num_labels=None, weight=None, args=None, use_cuda=True, cuda_device=-1, **kwargs,\n ):\n\n \"\"\"\n Initializes a ClassificationModel model.\n\n Args:\n model_type: The type of model (bert, xlnet, xlm, roberta, distilbert)\n model_name: The exact architecture and trained weights to use. This may be a Hugging Face Transformers compatible pre-trained model, a community model, or the path to a directory containing model files.\n num_labels (optional): The number of labels or classes in the dataset.\n weight (optional): A list of length num_labels containing the weights to assign to each label for loss calculation.\n args (optional): Default args will be used if this parameter is not provided. If provided, it should be a dict containing the args that should be changed in the default args.\n use_cuda (optional): Use GPU if available. Setting to False will force model to use CPU only.\n cuda_device (optional): Specific GPU that should be used. Will use the first available GPU by default.\n **kwargs (optional): For providing proxies, force_download, resume_download, cache_dir and other options specific to the 'from_pretrained' implementation where this will be supplied.\n \"\"\" # noqa: ignore flake8\"\n\n MODEL_CLASSES = {\n \"bert\": (BertConfig, BertForSequenceClassification, BertTokenizer),\n \"xlnet\": (XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer),\n \"xlm\": (XLMConfig, XLMForSequenceClassification, XLMTokenizer),\n \"roberta\": (RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer),\n \"distilbert\": (DistilBertConfig, DistilBertForSequenceClassification, DistilBertTokenizer),\n \"albert\": (AlbertConfig, AlbertForSequenceClassification, AlbertTokenizer),\n \"camembert\": (CamembertConfig, CamembertForSequenceClassification, CamembertTokenizer),\n \"xlmroberta\": (XLMRobertaConfig, XLMRobertaForSequenceClassification, XLMRobertaTokenizer),\n \"flaubert\": (FlaubertConfig, FlaubertForSequenceClassification, FlaubertTokenizer),\n \"electra\": (ElectraConfig, ElectraForSequenceClassification, ElectraTokenizer),\n }\n\n if args and \"manual_seed\" in args:\n random.seed(args[\"manual_seed\"])\n np.random.seed(args[\"manual_seed\"])\n torch.manual_seed(args[\"manual_seed\"])\n if \"n_gpu\" in args and args[\"n_gpu\"] > 0:\n torch.cuda.manual_seed_all(args[\"manual_seed\"])\n\n self.args = {\n \"sliding_window\": False,\n \"tie_value\": 1,\n \"stride\": 0.8,\n \"regression\": False,\n }\n\n self.args.update(global_args)\n\n saved_model_args = self._load_model_args(model_name)\n if saved_model_args:\n self.args.update(saved_model_args)\n\n if args:\n self.args.update(args)\n\n config_class, model_class, tokenizer_class = MODEL_CLASSES[model_type]\n if num_labels:\n self.config = config_class.from_pretrained(model_name, num_labels=num_labels, **self.args[\"config\"])\n self.num_labels = num_labels\n else:\n self.config = config_class.from_pretrained(model_name, **self.args[\"config\"])\n self.num_labels = self.config.num_labels\n self.weight = weight\n\n if use_cuda:\n if torch.cuda.is_available():\n if cuda_device == -1:\n self.device = torch.device(\"cuda\")\n else:\n self.device = torch.device(f\"cuda:{cuda_device}\")\n else:\n raise ValueError(\n \"'use_cuda' set to True when cuda is unavailable.\"\n \" Make sure CUDA is available or set use_cuda=False.\"\n )\n else:\n self.device = \"cpu\"\n\n if self.weight:\n self.model = model_class.from_pretrained(\n model_name, config=self.config, weight=torch.Tensor(self.weight).to(self.device), **kwargs,\n )\n else:\n self.model = model_class.from_pretrained(model_name, config=self.config, **kwargs)\n\n self.results = {}\n\n if not use_cuda:\n self.args[\"fp16\"] = False\n\n self.tokenizer = tokenizer_class.from_pretrained(\n model_name, do_lower_case=self.args[\"do_lower_case\"], **kwargs\n )\n\n self.args[\"model_name\"] = model_name\n self.args[\"model_type\"] = model_type\n\n if model_type in [\"camembert\", \"xlmroberta\"]:\n warnings.warn(\n f\"use_multiprocessing automatically disabled as {model_type}\"\n \" fails when using multiprocessing for feature conversion.\"\n )\n self.args[\"use_multiprocessing\"] = False\n\n if self.args[\"wandb_project\"] and not wandb_available:\n warnings.warn(\"wandb_project specified but wandb is not available. Wandb disabled.\")\n self.args[\"wandb_project\"] = None\n\n def train_model(\n self,\n train_df,\n multi_label=False,\n output_dir=None,\n show_running_loss=True,\n args=None,\n eval_df=None,\n verbose=True,\n **kwargs,\n ):\n \"\"\"\n Trains the model using 'train_df'\n\n Args:\n train_df: Pandas Dataframe containing at least two columns. If the Dataframe has a header, it should contain a 'text' and a 'labels' column. If no header is present,\n the Dataframe should contain at least two columns, with the first column containing the text, and the second column containing the label. The model will be trained on this Dataframe.\n output_dir: The directory where model files will be saved. If not given, self.args['output_dir'] will be used.\n show_running_loss (optional): Set to False to prevent running loss from being printed to console. Defaults to True.\n args (optional): Optional changes to the args dict of the model. Any changes made will persist for the model.\n eval_df (optional): A DataFrame against which evaluation will be performed when evaluate_during_training is enabled. Is required if evaluate_during_training is enabled.\n **kwargs: Additional metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use). E.g. f1=sklearn.metrics.f1_score.\n A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions.\n\n Returns:\n None\n \"\"\" # noqa: ignore flake8\"\n\n if args:\n self.args.update(args)\n\n if self.args[\"silent\"]:\n show_running_loss = False\n\n if self.args[\"evaluate_during_training\"] and eval_df is None:\n raise ValueError(\n \"evaluate_during_training is enabled but eval_df is not specified.\"\n \" Pass eval_df to model.train_model() if using evaluate_during_training.\"\n )\n\n if not output_dir:\n output_dir = self.args[\"output_dir\"]\n\n if os.path.exists(output_dir) and os.listdir(output_dir) and not self.args[\"overwrite_output_dir\"]:\n raise ValueError(\n \"Output directory ({}) already exists and is not empty.\"\n \" Use --overwrite_output_dir to overcome.\".format(output_dir)\n )\n\n self._move_model_to_device()\n\n if \"text\" in train_df.columns and \"labels\" in train_df.columns:\n train_examples = [\n InputExample(i, text, None, label)\n for i, (text, label) in enumerate(zip(train_df[\"text\"], train_df[\"labels\"]))\n ]\n elif \"text_a\" in train_df.columns and \"text_b\" in train_df.columns:\n train_examples = [\n InputExample(i, text_a, text_b, label)\n for i, (text_a, text_b, label) in enumerate(\n zip(train_df[\"text_a\"], train_df[\"text_b\"], train_df[\"labels\"])\n )\n ]\n else:\n warnings.warn(\n \"Dataframe headers not specified. Falling back to using column 0 as text and column 1 as labels.\"\n )\n train_examples = [\n InputExample(i, text, None, label)\n for i, (text, label) in enumerate(zip(train_df.iloc[:, 0], train_df.iloc[:, 1]))\n ]\n\n train_dataset = self.load_and_cache_examples(train_examples, verbose=verbose)\n\n os.makedirs(output_dir, exist_ok=True)\n\n global_step, tr_loss = self.train(\n train_dataset,\n output_dir,\n multi_label=multi_label,\n show_running_loss=show_running_loss,\n eval_df=eval_df,\n verbose=verbose,\n **kwargs,\n )\n\n # model_to_save = self.model.module if hasattr(self.model, \"module\") else self.model\n # model_to_save.save_pretrained(output_dir)\n # self.tokenizer.save_pretrained(output_dir)\n # torch.save(self.args, os.path.join(output_dir, \"training_args.bin\"))\n self._save_model()\n\n if verbose:\n logger.info(\" Training of {} model complete. Saved to {}.\".format(self.args[\"model_type\"], output_dir))\n\n def train(\n self,\n train_dataset,\n output_dir,\n multi_label=False,\n show_running_loss=True,\n eval_df=None,\n verbose=True,\n **kwargs,\n ):\n \"\"\"\n Trains the model on train_dataset.\n\n Utility function to be used by the train_model() method. Not intended to be used directly.\n \"\"\"\n\n device = self.device\n model = self.model\n args = self.args\n\n tb_writer = SummaryWriter(logdir=args[\"tensorboard_dir\"])\n train_sampler = RandomSampler(train_dataset)\n train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args[\"train_batch_size\"])\n\n t_total = len(train_dataloader) // args[\"gradient_accumulation_steps\"] * args[\"num_train_epochs\"]\n\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": args[\"weight_decay\"],\n },\n {\n \"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],\n \"weight_decay\": 0.0,\n },\n ]\n\n warmup_steps = math.ceil(t_total * args[\"warmup_ratio\"])\n args[\"warmup_steps\"] = warmup_steps if args[\"warmup_steps\"] == 0 else args[\"warmup_steps\"]\n\n optimizer = AdamW(optimizer_grouped_parameters, lr=args[\"learning_rate\"], eps=args[\"adam_epsilon\"])\n scheduler = get_linear_schedule_with_warmup(\n optimizer, num_warmup_steps=args[\"warmup_steps\"], num_training_steps=t_total\n )\n\n if args[\"fp16\"]:\n try:\n from apex import amp\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.\")\n\n model, optimizer = amp.initialize(model, optimizer, opt_level=args[\"fp16_opt_level\"])\n\n if args[\"n_gpu\"] > 1:\n model = torch.nn.DataParallel(model)\n\n global_step = 0\n tr_loss, logging_loss = 0.0, 0.0\n model.zero_grad()\n train_iterator = trange(int(args[\"num_train_epochs\"]), desc=\"Epoch\", disable=args[\"silent\"], mininterval=0)\n epoch_number = 0\n best_eval_metric = None\n early_stopping_counter = 0\n steps_trained_in_current_epoch = 0\n epochs_trained = 0\n\n if args[\"model_name\"] and os.path.exists(args[\"model_name\"]):\n try:\n # set global_step to gobal_step of last saved checkpoint from model path\n checkpoint_suffix = args[\"model_name\"].split(\"/\")[-1].split(\"-\")\n if len(checkpoint_suffix) > 2:\n checkpoint_suffix = checkpoint_suffix[1]\n else:\n checkpoint_suffix = checkpoint_suffix[-1]\n global_step = int(checkpoint_suffix)\n epochs_trained = global_step // (len(train_dataloader) // args[\"gradient_accumulation_steps\"])\n steps_trained_in_current_epoch = global_step % (\n len(train_dataloader) // args[\"gradient_accumulation_steps\"]\n )\n\n logger.info(\" Continuing training from checkpoint, will skip to saved global_step\")\n logger.info(\" Continuing training from epoch %d\", epochs_trained)\n logger.info(\" Continuing training from global step %d\", global_step)\n logger.info(\" Will skip the first %d steps in the current epoch\", steps_trained_in_current_epoch)\n except ValueError:\n logger.info(\" Starting fine-tuning.\")\n\n if args[\"evaluate_during_training\"]:\n training_progress_scores = self._create_training_progress_scores(multi_label, **kwargs)\n\n if args[\"wandb_project\"]:\n wandb.init(project=args[\"wandb_project\"], config={**args}, **args[\"wandb_kwargs\"])\n wandb.watch(self.model)\n\n model.train()\n for _ in train_iterator:\n if epochs_trained > 0:\n epochs_trained -= 1\n continue\n # epoch_iterator = tqdm(train_dataloader, desc=\"Iteration\")\n for step, batch in enumerate(tqdm(train_dataloader, desc=\"Current iteration\", disable=args[\"silent\"])):\n if steps_trained_in_current_epoch > 0:\n steps_trained_in_current_epoch -= 1\n continue\n batch = tuple(t.to(device) for t in batch)\n\n inputs = self._get_inputs_dict(batch)\n outputs = model(**inputs)\n # model outputs are always tuple in pytorch-transformers (see doc)\n loss = outputs[0]\n\n if args[\"n_gpu\"] > 1:\n loss = loss.mean() # mean() to average on multi-gpu parallel training\n\n current_loss = loss.item()\n\n if show_running_loss:\n print(\"\\rRunning loss: %f\" % loss, end=\"\")\n\n if args[\"gradient_accumulation_steps\"] > 1:\n loss = loss / args[\"gradient_accumulation_steps\"]\n\n if args[\"fp16\"]:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n # torch.nn.utils.clip_grad_norm_(\n # amp.master_params(optimizer), args[\"max_grad_norm\"]\n # )\n else:\n loss.backward()\n # torch.nn.utils.clip_grad_norm_(\n # model.parameters(), args[\"max_grad_norm\"]\n # )\n\n tr_loss += loss.item()\n if (step + 1) % args[\"gradient_accumulation_steps\"] == 0:\n if args[\"fp16\"]:\n torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args[\"max_grad_norm\"])\n else:\n torch.nn.utils.clip_grad_norm_(model.parameters(), args[\"max_grad_norm\"])\n\n optimizer.step()\n scheduler.step() # Update learning rate schedule\n model.zero_grad()\n global_step += 1\n\n if args[\"logging_steps\"] > 0 and global_step % args[\"logging_steps\"] == 0:\n # Log metrics\n tb_writer.add_scalar(\"lr\", scheduler.get_lr()[0], global_step)\n tb_writer.add_scalar(\"loss\", (tr_loss - logging_loss) / args[\"logging_steps\"], global_step)\n logging_loss = tr_loss\n if args[\"wandb_project\"]:\n wandb.log(\n {\n \"Training loss\": current_loss,\n \"lr\": scheduler.get_lr()[0],\n \"global_step\": global_step,\n }\n )\n\n if args[\"save_steps\"] > 0 and global_step % args[\"save_steps\"] == 0:\n # Save model checkpoint\n output_dir_current = os.path.join(output_dir, \"checkpoint-{}\".format(global_step))\n\n self._save_model(output_dir_current, optimizer, scheduler, model=model)\n\n if args[\"evaluate_during_training\"] and (\n args[\"evaluate_during_training_steps\"] > 0\n and global_step % args[\"evaluate_during_training_steps\"] == 0\n ):\n # Only evaluate when single GPU otherwise metrics may not average well\n results, _, _ = self.eval_model(\n eval_df,\n verbose=verbose and args[\"evaluate_during_training_verbose\"],\n silent=True,\n **kwargs,\n )\n for key, value in results.items():\n tb_writer.add_scalar(\"eval_{}\".format(key), value, global_step)\n\n output_dir_current = os.path.join(output_dir, \"checkpoint-{}\".format(global_step))\n\n if args[\"save_eval_checkpoints\"]:\n self._save_model(output_dir_current, optimizer, scheduler, model=model, results=results)\n\n training_progress_scores[\"global_step\"].append(global_step)\n training_progress_scores[\"train_loss\"].append(current_loss)\n for key in results:\n training_progress_scores[key].append(results[key])\n report = pd.DataFrame(training_progress_scores)\n report.to_csv(\n os.path.join(args[\"output_dir\"], \"training_progress_scores.csv\"), index=False,\n )\n\n if args[\"wandb_project\"]:\n wandb.log(self._get_last_metrics(training_progress_scores))\n\n if not best_eval_metric:\n best_eval_metric = results[args[\"early_stopping_metric\"]]\n self._save_model(\n args[\"best_model_dir\"], optimizer, scheduler, model=model, results=results\n )\n if best_eval_metric and args[\"early_stopping_metric_minimize\"]:\n if (\n results[args[\"early_stopping_metric\"]] - best_eval_metric\n < args[\"early_stopping_delta\"]\n ):\n best_eval_metric = results[args[\"early_stopping_metric\"]]\n self._save_model(\n args[\"best_model_dir\"], optimizer, scheduler, model=model, results=results\n )\n early_stopping_counter = 0\n else:\n if args[\"use_early_stopping\"]:\n if early_stopping_counter < args[\"early_stopping_patience\"]:\n early_stopping_counter += 1\n if verbose:\n logger.info(f\" No improvement in {args['early_stopping_metric']}\")\n logger.info(f\" Current step: {early_stopping_counter}\")\n logger.info(f\" Early stopping patience: {args['early_stopping_patience']}\")\n else:\n if verbose:\n logger.info(\n f\" Patience of {args['early_stopping_patience']} steps reached\"\n )\n logger.info(\" Training terminated.\")\n train_iterator.close()\n return global_step, tr_loss / global_step\n else:\n if (\n results[args[\"early_stopping_metric\"]] - best_eval_metric\n > args[\"early_stopping_delta\"]\n ):\n best_eval_metric = results[args[\"early_stopping_metric\"]]\n self._save_model(\n args[\"best_model_dir\"], optimizer, scheduler, model=model, results=results\n )\n early_stopping_counter = 0\n else:\n if args[\"use_early_stopping\"]:\n if early_stopping_counter < args[\"early_stopping_patience\"]:\n early_stopping_counter += 1\n if verbose:\n logger.info(f\" No improvement in {args['early_stopping_metric']}\")\n logger.info(f\" Current step: {early_stopping_counter}\")\n logger.info(f\" Early stopping patience: {args['early_stopping_patience']}\")\n else:\n if verbose:\n logger.info(\n f\" Patience of {args['early_stopping_patience']} steps reached\"\n )\n logger.info(\" Training terminated.\")\n train_iterator.close()\n return global_step, tr_loss / global_step\n\n epoch_number += 1\n output_dir_current = os.path.join(output_dir, \"checkpoint-{}-epoch-{}\".format(global_step, epoch_number))\n\n if args[\"save_model_every_epoch\"] or args[\"evaluate_during_training\"]:\n os.makedirs(output_dir_current, exist_ok=True)\n\n if args[\"save_model_every_epoch\"]:\n self._save_model(output_dir_current, optimizer, scheduler, model=model)\n\n if args[\"evaluate_during_training\"]:\n results, _, _ = self.eval_model(\n eval_df, verbose=verbose and args[\"evaluate_during_training_verbose\"], silent=True, **kwargs\n )\n\n self._save_model(output_dir_current, optimizer, scheduler, results=results)\n\n training_progress_scores[\"global_step\"].append(global_step)\n training_progress_scores[\"train_loss\"].append(current_loss)\n for key in results:\n training_progress_scores[key].append(results[key])\n report = pd.DataFrame(training_progress_scores)\n report.to_csv(os.path.join(args[\"output_dir\"], \"training_progress_scores.csv\"), index=False)\n\n if args[\"wandb_project\"]:\n wandb.log(self._get_last_metrics(training_progress_scores))\n\n if not best_eval_metric:\n best_eval_metric = results[args[\"early_stopping_metric\"]]\n self._save_model(args[\"best_model_dir\"], optimizer, scheduler, model=model, results=results)\n if best_eval_metric and args[\"early_stopping_metric_minimize\"]:\n if results[args[\"early_stopping_metric\"]] - best_eval_metric < args[\"early_stopping_delta\"]:\n best_eval_metric = results[args[\"early_stopping_metric\"]]\n self._save_model(args[\"best_model_dir\"], optimizer, scheduler, model=model, results=results)\n early_stopping_counter = 0\n else:\n if args[\"use_early_stopping\"] and args[\"early_stopping_consider_epochs\"]:\n if early_stopping_counter < args[\"early_stopping_patience\"]:\n early_stopping_counter += 1\n if verbose:\n logger.info(f\" No improvement in {args['early_stopping_metric']}\")\n logger.info(f\" Current step: {early_stopping_counter}\")\n logger.info(f\" Early stopping patience: {args['early_stopping_patience']}\")\n else:\n if verbose:\n logger.info(f\" Patience of {args['early_stopping_patience']} steps reached\")\n logger.info(\" Training terminated.\")\n train_iterator.close()\n return global_step, tr_loss / global_step\n else:\n if results[args[\"early_stopping_metric\"]] - best_eval_metric > args[\"early_stopping_delta\"]:\n best_eval_metric = results[args[\"early_stopping_metric\"]]\n self._save_model(args[\"best_model_dir\"], optimizer, scheduler, model=model, results=results)\n early_stopping_counter = 0\n else:\n if args[\"use_early_stopping\"] and args[\"early_stopping_consider_epochs\"]:\n if early_stopping_counter < args[\"early_stopping_patience\"]:\n early_stopping_counter += 1\n if verbose:\n logger.info(f\" No improvement in {args['early_stopping_metric']}\")\n logger.info(f\" Current step: {early_stopping_counter}\")\n logger.info(f\" Early stopping patience: {args['early_stopping_patience']}\")\n else:\n if verbose:\n logger.info(f\" Patience of {args['early_stopping_patience']} steps reached\")\n logger.info(\" Training terminated.\")\n train_iterator.close()\n return global_step, tr_loss / global_step\n\n return global_step, tr_loss / global_step\n\n def eval_model(self, eval_df, multi_label=False, output_dir=None, verbose=True, silent=False, **kwargs):\n \"\"\"\n Evaluates the model on eval_df. Saves results to output_dir.\n\n Args:\n eval_df: Pandas Dataframe containing at least two columns. If the Dataframe has a header, it should contain a 'text' and a 'labels' column. If no header is present,\n the Dataframe should contain at least two columns, with the first column containing the text, and the second column containing the label. The model will be evaluated on this Dataframe.\n output_dir: The directory where model files will be saved. If not given, self.args['output_dir'] will be used.\n verbose: If verbose, results will be printed to the console on completion of evaluation.\n silent: If silent, tqdm progress bars will be hidden.\n **kwargs: Additional metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use). E.g. f1=sklearn.metrics.f1_score.\n A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions.\n\n Returns:\n result: Dictionary containing evaluation results.\n model_outputs: List of model outputs for each row in eval_df\n wrong_preds: List of InputExample objects corresponding to each incorrect prediction by the model\n \"\"\" # noqa: ignore flake8\"\n\n if not output_dir:\n output_dir = self.args[\"output_dir\"]\n\n self._move_model_to_device()\n\n result, model_outputs, wrong_preds = self.evaluate(\n eval_df, output_dir, multi_label=multi_label, verbose=verbose, silent=silent, **kwargs\n )\n self.results.update(result)\n\n if verbose:\n logger.info(self.results)\n\n return result, model_outputs, wrong_preds\n\n def evaluate(self, eval_df, output_dir, multi_label=False, prefix=\"\", verbose=True, silent=False, **kwargs):\n \"\"\"\n Evaluates the model on eval_df.\n\n Utility function to be used by the eval_model() method. Not intended to be used directly.\n \"\"\"\n\n device = self.device\n model = self.model\n args = self.args\n eval_output_dir = output_dir\n\n results = {}\n\n if \"text\" in eval_df.columns and \"labels\" in eval_df.columns:\n eval_examples = [\n InputExample(i, text, None, label)\n for i, (text, label) in enumerate(zip(eval_df[\"text\"], eval_df[\"labels\"]))\n ]\n elif \"text_a\" in eval_df.columns and \"text_b\" in eval_df.columns:\n eval_examples = [\n InputExample(i, text_a, text_b, label)\n for i, (text_a, text_b, label) in enumerate(\n zip(eval_df[\"text_a\"], eval_df[\"text_b\"], eval_df[\"labels\"])\n )\n ]\n else:\n warnings.warn(\n \"Dataframe headers not specified. Falling back to using column 0 as text and column 1 as labels.\"\n )\n eval_examples = [\n InputExample(i, text, None, label)\n for i, (text, label) in enumerate(zip(eval_df.iloc[:, 0], eval_df.iloc[:, 1]))\n ]\n\n if args[\"sliding_window\"]:\n eval_dataset, window_counts = self.load_and_cache_examples(\n eval_examples, evaluate=True, verbose=verbose, silent=silent\n )\n else:\n eval_dataset = self.load_and_cache_examples(eval_examples, evaluate=True, verbose=verbose, silent=silent)\n os.makedirs(eval_output_dir, exist_ok=True)\n\n eval_sampler = SequentialSampler(eval_dataset)\n eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args[\"eval_batch_size\"])\n\n eval_loss = 0.0\n nb_eval_steps = 0\n preds = None\n out_label_ids = None\n model.eval()\n\n for batch in tqdm(eval_dataloader, disable=args[\"silent\"] or silent):\n batch = tuple(t.to(device) for t in batch)\n\n with torch.no_grad():\n inputs = self._get_inputs_dict(batch)\n\n outputs = model(**inputs)\n tmp_eval_loss, logits = outputs[:2]\n\n if multi_label:\n logits = logits.sigmoid()\n eval_loss += tmp_eval_loss.mean().item()\n\n nb_eval_steps += 1\n\n if preds is None:\n preds = logits.detach().cpu().numpy()\n out_label_ids = inputs[\"labels\"].detach().cpu().numpy()\n else:\n preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)\n out_label_ids = np.append(out_label_ids, inputs[\"labels\"].detach().cpu().numpy(), axis=0)\n\n eval_loss = eval_loss / nb_eval_steps\n\n if args[\"sliding_window\"]:\n count = 0\n window_ranges = []\n for n_windows in window_counts:\n window_ranges.append([count, count + n_windows])\n count += n_windows\n\n preds = [preds[window_range[0] : window_range[1]] for window_range in window_ranges]\n out_label_ids = [\n out_label_ids[i] for i in range(len(out_label_ids)) if i in [window[0] for window in window_ranges]\n ]\n\n model_outputs = preds\n\n preds = [np.argmax(pred, axis=1) for pred in preds]\n final_preds = []\n for pred_row in preds:\n mode_pred, counts = mode(pred_row)\n if len(counts) > 1 and counts[0] == counts[1]:\n final_preds.append(args[\"tie_value\"])\n else:\n final_preds.append(mode_pred[0])\n preds = np.array(final_preds)\n elif not multi_label and args[\"regression\"] is True:\n preds = np.squeeze(preds)\n model_outputs = preds\n else:\n model_outputs = preds\n\n if not multi_label:\n preds = np.argmax(preds, axis=1)\n\n result, wrong = self.compute_metrics(preds, out_label_ids, eval_examples, **kwargs)\n result[\"eval_loss\"] = eval_loss\n results.update(result)\n\n output_eval_file = os.path.join(eval_output_dir, \"eval_results.txt\")\n with open(output_eval_file, \"w\") as writer:\n for key in sorted(result.keys()):\n writer.write(\"{} = {}\\n\".format(key, str(result[key])))\n\n return results, model_outputs, wrong\n\n def load_and_cache_examples(\n self, examples, evaluate=False, no_cache=False, multi_label=False, verbose=True, silent=False\n ):\n \"\"\"\n Converts a list of InputExample objects to a TensorDataset containing InputFeatures. Caches the InputFeatures.\n\n Utility function for train() and eval() methods. Not intended to be used directly.\n \"\"\"\n\n process_count = self.args[\"process_count\"]\n\n tokenizer = self.tokenizer\n args = self.args\n\n if not no_cache:\n no_cache = args[\"no_cache\"]\n\n if not multi_label and args[\"regression\"]:\n output_mode = \"regression\"\n else:\n output_mode = \"classification\"\n\n os.makedirs(self.args[\"cache_dir\"], exist_ok=True)\n\n mode = \"dev\" if evaluate else \"train\"\n cached_features_file = os.path.join(\n args[\"cache_dir\"],\n \"cached_{}_{}_{}_{}_{}\".format(\n mode, args[\"model_type\"], args[\"max_seq_length\"], self.num_labels, len(examples),\n ),\n )\n\n if os.path.exists(cached_features_file) and (\n (not args[\"reprocess_input_data\"] and not no_cache)\n or (mode == \"dev\" and args[\"use_cached_eval_features\"] and not no_cache)\n ):\n features = torch.load(cached_features_file)\n if verbose:\n logger.info(f\" Features loaded from cache at {cached_features_file}\")\n else:\n if verbose:\n logger.info(f\" Converting to features started. Cache is not used.\")\n if args[\"sliding_window\"]:\n logger.info(\" Sliding window enabled\")\n features = convert_examples_to_features(\n examples,\n args[\"max_seq_length\"],\n tokenizer,\n output_mode,\n # XLNet has a CLS token at the end\n cls_token_at_end=bool(args[\"model_type\"] in [\"xlnet\"]),\n cls_token=tokenizer.cls_token,\n cls_token_segment_id=2 if args[\"model_type\"] in [\"xlnet\"] else 0,\n sep_token=tokenizer.sep_token,\n # RoBERTa uses an extra separator b/w pairs of sentences,\n # cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805\n sep_token_extra=bool(args[\"model_type\"] in [\"roberta\", \"camembert\", \"xlmroberta\"]),\n # PAD on the left for XLNet\n pad_on_left=bool(args[\"model_type\"] in [\"xlnet\"]),\n pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],\n pad_token_segment_id=4 if args[\"model_type\"] in [\"xlnet\"] else 0,\n process_count=process_count,\n multi_label=multi_label,\n silent=args[\"silent\"] or silent,\n use_multiprocessing=args[\"use_multiprocessing\"],\n sliding_window=args[\"sliding_window\"],\n flatten=not evaluate,\n stride=args[\"stride\"],\n )\n if verbose and args[\"sliding_window\"]:\n logger.info(f\" {len(features)} features created from {len(examples)} samples.\")\n\n if not no_cache:\n torch.save(features, cached_features_file)\n\n if args[\"sliding_window\"] and evaluate:\n window_counts = [len(sample) for sample in features]\n features = [feature for feature_set in features for feature in feature_set]\n\n all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)\n all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)\n all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)\n\n if output_mode == \"classification\":\n all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long)\n elif output_mode == \"regression\":\n all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.float)\n\n dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)\n\n if args[\"sliding_window\"] and evaluate:\n return dataset, window_counts\n else:\n return dataset\n\n def compute_metrics(self, preds, labels, eval_examples, multi_label=False, **kwargs):\n \"\"\"\n Computes the evaluation metrics for the model predictions.\n\n Args:\n preds: Model predictions\n labels: Ground truth labels\n eval_examples: List of examples on which evaluation was performed\n **kwargs: Additional metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use). E.g. f1=sklearn.metrics.f1_score.\n A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions.\n\n Returns:\n result: Dictionary containing evaluation results. (Matthews correlation coefficient, tp, tn, fp, fn)\n wrong: List of InputExample objects corresponding to each incorrect prediction by the model\n \"\"\" # noqa: ignore flake8\"\n\n assert len(preds) == len(labels)\n\n extra_metrics = {}\n for metric, func in kwargs.items():\n extra_metrics[metric] = func(labels, preds)\n\n mismatched = labels != preds\n\n wrong = [i for (i, v) in zip(eval_examples, mismatched) if v.any()]\n\n if multi_label:\n label_ranking_score = label_ranking_average_precision_score(labels, preds)\n return {**{\"LRAP\": label_ranking_score}, **extra_metrics}, wrong\n elif self.args[\"regression\"]:\n return {**extra_metrics}, wrong\n\n mcc = matthews_corrcoef(labels, preds)\n\n if self.model.num_labels == 2:\n tn, fp, fn, tp = confusion_matrix(labels, preds, labels=[0, 1]).ravel()\n return (\n {**{\"mcc\": mcc, \"tp\": tp, \"tn\": tn, \"fp\": fp, \"fn\": fn}, **extra_metrics},\n wrong,\n )\n else:\n return {**{\"mcc\": mcc}, **extra_metrics}, wrong\n\n def predict(self, to_predict, multi_label=False):\n \"\"\"\n Performs predictions on a list of text.\n\n Args:\n to_predict: A python list of text (str) to be sent to the model for prediction.\n\n Returns:\n preds: A python list of the predictions (0 or 1) for each text.\n model_outputs: A python list of the raw model outputs for each text.\n \"\"\"\n\n device = self.device\n model = self.model\n args = self.args\n\n self._move_model_to_device()\n\n if multi_label:\n eval_examples = [\n InputExample(i, text, None, [0 for i in range(self.num_labels)]) for i, text in enumerate(to_predict)\n ]\n else:\n if isinstance(to_predict[0], list):\n eval_examples = [InputExample(i, text[0], text[1], 0) for i, text in enumerate(to_predict)]\n else:\n eval_examples = [InputExample(i, text, None, 0) for i, text in enumerate(to_predict)]\n if args[\"sliding_window\"]:\n eval_dataset, window_counts = self.load_and_cache_examples(eval_examples, evaluate=True, no_cache=True)\n else:\n eval_dataset = self.load_and_cache_examples(\n eval_examples, evaluate=True, multi_label=multi_label, no_cache=True\n )\n\n eval_sampler = SequentialSampler(eval_dataset)\n eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args[\"eval_batch_size\"])\n\n eval_loss = 0.0\n nb_eval_steps = 0\n preds = None\n out_label_ids = None\n\n if self.config.output_hidden_states:\n for batch in tqdm(eval_dataloader, disable=args[\"silent\"]):\n model.eval()\n batch = tuple(t.to(device) for t in batch)\n\n with torch.no_grad():\n inputs = self._get_inputs_dict(batch)\n outputs = model(**inputs)\n tmp_eval_loss, logits = outputs[:2]\n embedding_outputs, layer_hidden_states = outputs[2][0], outputs[2][1:]\n\n if multi_label:\n logits = logits.sigmoid()\n\n eval_loss += tmp_eval_loss.mean().item()\n\n nb_eval_steps += 1\n\n if preds is None:\n preds = logits.detach().cpu().numpy()\n out_label_ids = inputs[\"labels\"].detach().cpu().numpy()\n all_layer_hidden_states = [state.detach().cpu().numpy() for state in layer_hidden_states]\n all_embedding_outputs = embedding_outputs.detach().cpu().numpy()\n else:\n preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)\n out_label_ids = np.append(out_label_ids, inputs[\"labels\"].detach().cpu().numpy(), axis=0)\n all_layer_hidden_states = np.append(\n [state.detach().cpu().numpy() for state in layer_hidden_states], axis=0\n )\n all_embedding_outputs = np.append(embedding_outputs.detach().cpu().numpy(), axis=0)\n else:\n for batch in tqdm(eval_dataloader, disable=args[\"silent\"]):\n model.eval()\n batch = tuple(t.to(device) for t in batch)\n\n with torch.no_grad():\n inputs = self._get_inputs_dict(batch)\n outputs = model(**inputs)\n tmp_eval_loss, logits = outputs[:2]\n\n if multi_label:\n logits = logits.sigmoid()\n\n eval_loss += tmp_eval_loss.mean().item()\n\n nb_eval_steps += 1\n\n if preds is None:\n preds = logits.detach().cpu().numpy()\n out_label_ids = inputs[\"labels\"].detach().cpu().numpy()\n else:\n preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)\n out_label_ids = np.append(out_label_ids, inputs[\"labels\"].detach().cpu().numpy(), axis=0)\n\n eval_loss = eval_loss / nb_eval_steps\n\n if args[\"sliding_window\"]:\n count = 0\n window_ranges = []\n for n_windows in window_counts:\n window_ranges.append([count, count + n_windows])\n count += n_windows\n\n preds = [preds[window_range[0] : window_range[1]] for window_range in window_ranges]\n\n model_outputs = preds\n\n preds = [np.argmax(pred, axis=1) for pred in preds]\n final_preds = []\n for pred_row in preds:\n mode_pred, counts = mode(pred_row)\n if len(counts) > 1 and counts[0] == counts[1]:\n final_preds.append(args[\"tie_value\"])\n else:\n final_preds.append(mode_pred[0])\n preds = np.array(final_preds)\n elif not multi_label and args[\"regression\"] is True:\n preds = np.squeeze(preds)\n model_outputs = preds\n else:\n model_outputs = preds\n if multi_label:\n if isinstance(args[\"threshold\"], list):\n threshold_values = args[\"threshold\"]\n preds = [\n [self._threshold(pred, threshold_values[i]) for i, pred in enumerate(example)]\n for example in preds\n ]\n else:\n preds = [[self._threshold(pred, args[\"threshold\"]) for pred in example] for example in preds]\n else:\n preds = np.argmax(preds, axis=1)\n\n if self.config.output_hidden_states:\n return preds, model_outputs, all_embedding_outputs, all_layer_hidden_states\n else:\n return preds, model_outputs\n\n def _threshold(self, x, threshold):\n if x >= threshold:\n return 1\n return 0\n\n def _move_model_to_device(self):\n self.model.to(self.device)\n\n def _get_inputs_dict(self, batch):\n inputs = {\"input_ids\": batch[0], \"attention_mask\": batch[1], \"labels\": batch[3]}\n\n # XLM, DistilBERT and RoBERTa don't use segment_ids\n if self.args[\"model_type\"] != \"distilbert\":\n inputs[\"token_type_ids\"] = batch[2] if self.args[\"model_type\"] in [\"bert\", \"xlnet\", \"albert\"] else None\n\n return inputs\n\n def _get_last_metrics(self, metric_values):\n return {metric: values[-1] for metric, values in metric_values.items()}\n\n def _create_training_progress_scores(self, multi_label, **kwargs):\n extra_metrics = {key: [] for key in kwargs}\n if multi_label:\n training_progress_scores = {\n \"global_step\": [],\n \"LRAP\": [],\n \"train_loss\": [],\n \"eval_loss\": [],\n **extra_metrics,\n }\n else:\n if self.model.num_labels == 2:\n training_progress_scores = {\n \"global_step\": [],\n \"tp\": [],\n \"tn\": [],\n \"fp\": [],\n \"fn\": [],\n \"mcc\": [],\n \"train_loss\": [],\n \"eval_loss\": [],\n **extra_metrics,\n }\n elif self.model.num_labels == 1:\n training_progress_scores = {\n \"global_step\": [],\n \"train_loss\": [],\n \"eval_loss\": [],\n **extra_metrics,\n }\n else:\n training_progress_scores = {\n \"global_step\": [],\n \"mcc\": [],\n \"train_loss\": [],\n \"eval_loss\": [],\n **extra_metrics,\n }\n\n return training_progress_scores\n\n def _save_model(self, output_dir=None, optimizer=None, scheduler=None, model=None, results=None):\n if not output_dir:\n output_dir = self.args[\"output_dir\"]\n os.makedirs(output_dir, exist_ok=True)\n\n if model and not self.args[\"no_save\"]:\n # Take care of distributed/parallel training\n model_to_save = model.module if hasattr(model, \"module\") else model\n model_to_save.save_pretrained(output_dir)\n self.tokenizer.save_pretrained(output_dir)\n torch.save(self.args, os.path.join(output_dir, \"training_args.bin\"))\n if optimizer and scheduler:\n torch.save(optimizer.state_dict(), os.path.join(output_dir, \"optimizer.pt\"))\n torch.save(scheduler.state_dict(), os.path.join(output_dir, \"scheduler.pt\"))\n self._save_model_args(output_dir)\n\n if results:\n output_eval_file = os.path.join(output_dir, \"eval_results.txt\")\n with open(output_eval_file, \"w\") as writer:\n for key in sorted(results.keys()):\n writer.write(\"{} = {}\\n\".format(key, str(results[key])))\n\n def _save_model_args(self, output_dir):\n os.makedirs(output_dir, exist_ok=True)\n with open(os.path.join(output_dir, \"model_args.json\"), \"w\") as f:\n json.dump(self.args, f)\n\n def _load_model_args(self, input_dir):\n model_args_file = os.path.join(input_dir, \"model_args.json\")\n if os.path.isfile(model_args_file):\n with open(model_args_file, \"r\") as f:\n model_args = json.load(f)\n return model_args\n" ]
[ [ "scipy.stats.mode", "sklearn.metrics.confusion_matrix", "torch.utils.data.RandomSampler", "torch.cuda.is_available", "torch.load", "torch.nn.DataParallel", "pandas.DataFrame", "torch.manual_seed", "torch.tensor", "torch.utils.data.DataLoader", "numpy.argmax", "torch.Tensor", "torch.device", "numpy.array", "torch.cuda.manual_seed_all", "sklearn.metrics.matthews_corrcoef", "torch.save", "torch.utils.data.SequentialSampler", "numpy.squeeze", "torch.utils.data.TensorDataset", "numpy.random.seed", "torch.no_grad", "sklearn.metrics.label_ranking_average_precision_score" ] ]
NarendraPatwardhan/gym_venv
[ "9c7456cc64d416556f1d1d8eca7a72df0821cf00" ]
[ "model.py" ]
[ "import numpy as np\nimport mxnet as mx\nimport matplotlib.pyplot as plt\n\n#-----------------------------------------------------------------------------\n\nclass StateModel(mx.gluon.Block):\n def __init__(self,config):\n super(StateModel, self).__init__()\n self.config = config\n x = mx.nd.array(self.config['S0A'])\n y = mx.nd.array(self.config['S1'])\n self.dataset = mx.gluon.data.dataset.ArrayDataset(x,y)\n self.dataloader = mx.gluon.data.DataLoader(self.dataset,batch_size=self.config['batch_size'])\n with self.name_scope():\n self.state_transition = mx.gluon.nn.Sequential('state_transition_')\n with self.state_transition.name_scope():\n self.state_transition.add(mx.gluon.nn.Dense(10, activation='relu'))\n self.state_transition.add(mx.gluon.nn.Dense(20, activation='relu'))\n self.state_transition.add(mx.gluon.nn.Dense(10, activation='relu'))\n self.state_transition.add(mx.gluon.nn.Dense(self.config['S1'].shape[1]))\n\n def forward(self, x):\n return self.state_transition(x)\n\n def fit(self):\n self.collect_params().initialize(mx.init.Xavier(), ctx=mx.cpu())\n criterion = mx.gluon.loss.HuberLoss()\n optimizer = mx.gluon.Trainer(self.collect_params(), 'adam',{'learning_rate': self.config['learning_rate'],'wd': self.config['weight_decay']})\n errors = []\n for epoch in range(self.config['max_epochs']):\n running_loss = 0.0\n n_total = 0.0\n for data in self.dataloader:\n x, y = data\n with mx.autograd.record():\n output = self.forward(x)\n loss = criterion(output, y)\n loss.backward()\n optimizer.step(self.config['batch_size'])\n running_loss += mx.nd.sum(loss).asscalar()\n n_total += x.shape[0]\n errors.append(running_loss / n_total)\n if epoch%self.config['verbosity']==0:\n print('epoch [{}/{}], loss:{:.4f}'\n .format(epoch + 1, self.config['max_epochs'], running_loss / n_total))\n fig,ax = plt.subplots()\n ax.plot(range(len(errors)),np.array(errors))\n ax.set_title('State Modelling')\n ax.set_ylabel('Huber Loss')\n ax.set_xlabel('Epoch')\n fig.savefig('state_modelling')\n\n#-----------------------------------------------------------------------------\n\nclass RewardModel(mx.gluon.Block):\n def __init__(self,config):\n super(RewardModel, self).__init__()\n self.config = config\n x = mx.nd.array(self.config['S0AS1'])\n y = mx.nd.array(self.config['R'])\n self.dataset = mx.gluon.data.dataset.ArrayDataset(x,y)\n self.dataloader = mx.gluon.data.DataLoader(self.dataset,batch_size=self.config['batch_size'])\n with self.name_scope():\n self.reward_function = mx.gluon.nn.Sequential('reward_function_')\n with self.reward_function.name_scope():\n self.reward_function.add(mx.gluon.nn.Dense(10, activation='relu'))\n self.reward_function.add(mx.gluon.nn.Dense(20, activation='relu'))\n self.reward_function.add(mx.gluon.nn.Dense(10, activation='relu'))\n self.reward_function.add(mx.gluon.nn.Dense(1))\n\n def forward(self, x):\n return self.reward_function(x)\n\n def fit(self):\n self.collect_params().initialize(mx.init.Xavier(), ctx=mx.cpu())\n criterion = mx.gluon.loss.HuberLoss()\n optimizer = mx.gluon.Trainer(self.collect_params(), 'adam',{'learning_rate': self.config['learning_rate'],'wd': self.config['weight_decay']})\n errors = []\n for epoch in range(self.config['max_epochs']):\n running_loss = 0.0\n n_total = 0.0\n for data in self.dataloader:\n x, y = data\n with mx.autograd.record():\n output = self.forward(x)\n loss = criterion(output, y)\n loss.backward()\n optimizer.step(self.config['batch_size'])\n running_loss += mx.nd.sum(loss).asscalar()\n n_total += x.shape[0]\n errors.append(running_loss / n_total)\n if epoch%self.config['verbosity']==0:\n print('epoch [{}/{}], loss:{:.4f}'\n .format(epoch + 1, self.config['max_epochs'], running_loss / n_total))\n fig,ax = plt.subplots()\n ax.plot(range(len(errors)),np.array(errors))\n ax.set_title('Reward Modelling')\n ax.set_ylabel('Huber Loss')\n ax.set_xlabel('Epoch')\n fig.savefig('reward_modelling')\n\n#-----------------------------------------------------------------------------\n\nif __name__ == '__main__':\n x = np.random.randn(100,4)\n xt = np.random.randn(100,4)\n y = x[:,:3]\n yt = xt[:,:3]\n random_config = {\n 'max_epochs': 5000,\n 'batch_size': 64,\n 'learning_rate': 1e-3,\n 'weight_decay': 1e-5,\n 'verbosity': 25,\n 'S0A': x,\n 'S1': y\n }\n random_sm = StateModel(random_config)\n random_sm.fit()\n yp = random_sm(mx.nd.array(xt))\n print(abs(yp.asnumpy() - yt).sum())\n\n" ]
[ [ "numpy.array", "numpy.random.randn", "matplotlib.pyplot.subplots" ] ]
AlexanderDokuchaev/mmsegmentation
[ "0c443ee370cce6227661b802184072174c4e3f64" ]
[ "mmseg/apis/ote/apis/segmentation/openvino_task.py" ]
[ "# Copyright (C) 2021 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions\n# and limitations under the License.\n\nimport logging\nimport json\nimport os\nimport tempfile\nfrom addict import Dict as ADDict\nfrom typing import Any, Dict, Tuple, List, Optional, Union\n\nimport cv2\nimport numpy as np\n\nfrom ote_sdk.utils.segmentation_utils import (create_hard_prediction_from_soft_prediction,\n create_annotation_from_segmentation_map)\nfrom ote_sdk.entities.datasets import DatasetEntity\nfrom ote_sdk.entities.annotation import AnnotationSceneEntity, AnnotationSceneKind\nfrom ote_sdk.entities.inference_parameters import InferenceParameters, default_progress_callback\nfrom ote_sdk.entities.label import LabelEntity\nfrom ote_sdk.entities.model import (\n ModelStatus,\n ModelEntity,\n ModelFormat,\n OptimizationMethod,\n ModelPrecision,\n)\nfrom ote_sdk.entities.optimization_parameters import OptimizationParameters\nfrom ote_sdk.entities.resultset import ResultSetEntity\nfrom ote_sdk.entities.task_environment import TaskEnvironment\nfrom ote_sdk.usecases.evaluation.metrics_helper import MetricsHelper\nfrom ote_sdk.usecases.exportable_code.inference import BaseOpenVINOInferencer\nfrom ote_sdk.usecases.tasks.interfaces.evaluate_interface import IEvaluationTask\nfrom ote_sdk.usecases.tasks.interfaces.inference_interface import IInferenceTask\nfrom ote_sdk.usecases.tasks.interfaces.optimization_interface import IOptimizationTask, OptimizationType\n\nfrom compression.api import DataLoader\nfrom compression.engines.ie_engine import IEEngine\nfrom compression.graph import load_model, save_model\nfrom compression.graph.model_utils import compress_model_weights, get_nodes_by_type\nfrom compression.pipeline.initializer import create_pipeline\nfrom ote_sdk.serialization.label_mapper import label_schema_to_bytes\n\nfrom .configuration import OTESegmentationConfig\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_output(net, outputs, name):\n try:\n key = net.get_ov_name_for_tensor(name)\n assert key in outputs, f'\"{key}\" is not a valid output identifier'\n except KeyError:\n if name not in outputs:\n raise KeyError(f'Failed to identify output \"{name}\"')\n key = name\n\n return outputs[key]\n\n\nclass OpenVINOSegmentationInferencer(BaseOpenVINOInferencer):\n def __init__(\n self,\n hparams: OTESegmentationConfig,\n labels: List[LabelEntity],\n model_file: Union[str, bytes],\n weight_file: Union[str, bytes, None] = None,\n device: str = \"CPU\",\n num_requests: int = 1,\n ):\n \"\"\"\n Inferencer implementation for OTESegmentation using OpenVINO backend.\n\n :param hparams: Hyper parameters that the model should use.\n :param model_file: Path to model to load, `.xml`, `.bin` or `.onnx` file.\n :param device: Device to run inference on, such as CPU, GPU or MYRIAD. Defaults to \"CPU\".\n :param num_requests: Maximum number of requests that the inferencer can make.\n Good value is the number of available cores. Defaults to 1.\n \"\"\"\n\n super().__init__(model_file, weight_file, device, num_requests)\n\n self.labels = labels\n self.input_blob_name = 'input'\n self.n, self.c, self.h, self.w = self.net.input_info[self.input_blob_name].tensor_desc.dims\n self.keep_aspect_ratio_resize = False\n self.pad_value = 0\n self.soft_threshold = float(hparams.postprocessing.soft_threshold)\n self.blur_strength = int(hparams.postprocessing.blur_strength)\n\n @staticmethod\n def resize_image(image: np.ndarray, size: Tuple[int], keep_aspect_ratio: bool = False) -> np.ndarray:\n if not keep_aspect_ratio:\n resized_frame = cv2.resize(image, size)\n else:\n h, w = image.shape[:2]\n scale = min(size[1] / h, size[0] / w)\n resized_frame = cv2.resize(image, None, fx=scale, fy=scale)\n return resized_frame\n\n def pre_process(self, image: np.ndarray) -> Tuple[Dict[str, np.ndarray], Dict[str, Any]]:\n resized_image = self.resize_image(image, (self.w, self.h), self.keep_aspect_ratio_resize)\n meta = {'original_shape': image.shape,\n 'resized_shape': resized_image.shape}\n\n h, w = resized_image.shape[:2]\n if h != self.h or w != self.w:\n resized_image = np.pad(resized_image,\n ((0, self.h - h), (0, self.w - w), (0, 0)),\n mode='constant',\n constant_values=self.pad_value)\n\n resized_image = resized_image.transpose((2, 0, 1)) # Change data layout from HWC to CHW\n resized_image = resized_image.reshape((self.n, self.c, self.h, self.w))\n dict_inputs = {self.input_blob_name: resized_image}\n\n return dict_inputs, meta\n\n def post_process(self, prediction: Dict[str, np.ndarray], metadata: Dict[str, Any]) -> AnnotationSceneEntity:\n pred_class_maps = prediction['output']\n assert pred_class_maps.shape[0] == 1\n pred_class_map = pred_class_maps[0]\n\n soft_prediction = np.transpose(pred_class_map, axes=(1, 2, 0))\n\n hard_prediction = create_hard_prediction_from_soft_prediction(\n soft_prediction=soft_prediction,\n soft_threshold=self.soft_threshold,\n blur_strength=self.blur_strength\n )\n\n label_dictionary = {i + 1: self.labels[i] for i in range(len(self.labels))}\n annotations = create_annotation_from_segmentation_map(\n hard_prediction=hard_prediction,\n soft_prediction=soft_prediction,\n label_map=label_dictionary\n )\n\n return AnnotationSceneEntity(\n kind=AnnotationSceneKind.PREDICTION,\n annotations=annotations\n )\n\n def forward(self, inputs: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:\n return self.model.infer(inputs)\n\n\nclass OTEOpenVinoDataLoader(DataLoader):\n def __init__(self, dataset: DatasetEntity, inferencer: BaseOpenVINOInferencer):\n self.dataset = dataset\n self.inferencer = inferencer\n\n def __getitem__(self, index):\n image = self.dataset[index].numpy\n annotation = self.dataset[index].annotation_scene\n inputs, metadata = self.inferencer.pre_process(image)\n\n return (index, annotation), inputs, metadata\n\n def __len__(self):\n return len(self.dataset)\n\n\nclass OpenVINOSegmentationTask(IInferenceTask, IEvaluationTask, IOptimizationTask):\n def __init__(self,\n task_environment: TaskEnvironment):\n self.task_environment = task_environment\n self.model = self.task_environment.model\n self.inferencer = self.load_inferencer()\n\n template_file_path = task_environment.model_template.model_template_path\n self._base_dir = os.path.abspath(os.path.dirname(template_file_path))\n\n @property\n def hparams(self):\n return self.task_environment.get_hyper_parameters(OTESegmentationConfig)\n\n def load_inferencer(self) -> OpenVINOSegmentationInferencer:\n labels = self.task_environment.label_schema.get_labels(include_empty=False)\n return OpenVINOSegmentationInferencer(self.hparams,\n labels,\n self.model.get_data(\"openvino.xml\"),\n self.model.get_data(\"openvino.bin\"))\n\n def infer(self,\n dataset: DatasetEntity,\n inference_parameters: Optional[InferenceParameters] = None) -> DatasetEntity:\n update_progress_callback = default_progress_callback\n if inference_parameters is not None:\n update_progress_callback = inference_parameters.update_progress\n dataset_size = len(dataset)\n for i, dataset_item in enumerate(dataset, 1):\n predicted_scene = self.inferencer.predict(dataset_item.numpy)\n dataset_item.append_annotations(predicted_scene.annotations)\n update_progress_callback(int(i / dataset_size * 100))\n return dataset\n\n def evaluate(self,\n output_result_set: ResultSetEntity,\n evaluation_metric: Optional[str] = None):\n logger.info('Computing mDice')\n metrics = MetricsHelper.compute_dice_averaged_over_pixels(\n output_result_set\n )\n logger.info(f\"mDice after evaluation: {metrics.overall_dice.value}\")\n\n output_result_set.performance = metrics.get_performance()\n\n def optimize(self,\n optimization_type: OptimizationType,\n dataset: DatasetEntity,\n output_model: ModelEntity,\n optimization_parameters: Optional[OptimizationParameters]):\n\n if optimization_type is not OptimizationType.POT:\n raise ValueError(\"POT is the only supported optimization type for OpenVino models\")\n\n data_loader = OTEOpenVinoDataLoader(dataset, self.inferencer)\n\n with tempfile.TemporaryDirectory() as tempdir:\n xml_path = os.path.join(tempdir, \"model.xml\")\n bin_path = os.path.join(tempdir, \"model.bin\")\n with open(xml_path, \"wb\") as f:\n f.write(self.model.get_data(\"openvino.xml\"))\n with open(bin_path, \"wb\") as f:\n f.write(self.model.get_data(\"openvino.bin\"))\n\n model_config = ADDict({\n 'model_name': 'openvino_model',\n 'model': xml_path,\n 'weights': bin_path\n })\n\n model = load_model(model_config)\n\n if get_nodes_by_type(model, ['FakeQuantize']):\n logger.warning(\"Model is already optimized by POT\")\n output_model.model_status = ModelStatus.FAILED\n return\n\n engine_config = ADDict({\n 'device': 'CPU'\n })\n\n optimization_config_path = os.path.join(self._base_dir, 'pot_optimization_config.json')\n if os.path.exists(optimization_config_path):\n with open(optimization_config_path) as f_src:\n algorithms = ADDict(json.load(f_src))['algorithms']\n else:\n algorithms = [\n ADDict({\n 'name': 'DefaultQuantization',\n 'params': {\n 'target_device': 'ANY'\n }\n })\n ]\n for algo in algorithms:\n algo.params.stat_subset_size = self.hparams.pot_parameters.stat_subset_size\n algo.params.shuffle_data = True\n if 'Quantization' in algo['name']:\n algo.params.preset = self.hparams.pot_parameters.preset.name.lower()\n\n engine = IEEngine(config=engine_config, data_loader=data_loader, metric=None)\n\n pipeline = create_pipeline(algorithms, engine)\n\n compressed_model = pipeline.run(model)\n\n compress_model_weights(compressed_model)\n\n with tempfile.TemporaryDirectory() as tempdir:\n save_model(compressed_model, tempdir, model_name=\"model\")\n with open(os.path.join(tempdir, \"model.xml\"), \"rb\") as f:\n output_model.set_data(\"openvino.xml\", f.read())\n with open(os.path.join(tempdir, \"model.bin\"), \"rb\") as f:\n output_model.set_data(\"openvino.bin\", f.read())\n \n output_model.set_data(\"label_schema.json\", label_schema_to_bytes(self.task_environment.label_schema))\n\n # set model attributes for quantized model\n output_model.model_status = ModelStatus.SUCCESS\n output_model.model_format = ModelFormat.OPENVINO\n output_model.optimization_type = OptimizationType.POT\n output_model.optimization_methods = [OptimizationMethod.QUANTIZATION]\n output_model.precision = [ModelPrecision.INT8]\n\n self.model = output_model\n self.inferencer = self.load_inferencer()\n" ]
[ [ "numpy.pad", "numpy.transpose" ] ]
USGS-WiM/Gage-Cam-Sensor-AI
[ "6e38517cbf90a82b6f679b8eee289cfdc12dd1b1" ]
[ "sensor_AI/run_lite.py" ]
[ "from tensorflow import keras\nimport numpy as np\nimport pidash\nimport os\n#import gc\n\nPATH = os.path.dirname(__file__)\n\n# This is a prototype implementation of the sensor AI deployment. \n#This is not final code and should not be reguarded as a best practices.\n\n\n\n# get_exposed() is a simple pixel count routine. It established the pixel count on the x and the y axis using simple n^2 logic loops\n\ndef get_exposed(y_hat):\n\timg = y_hat.ravel()\n\timg = img[2::3]\n\timg = np.resize(img, (256, 256))\n\th = []\n\tfor i, obj in enumerate(img):\n\t\tfor j in obj:\n\t\t\tif j:\n\t\t\t\th.append(i)\n\t\t\t\tbreak\n\tw=[]\n\tfor i, obj in enumerate(img.T):\n\t\tfor j in obj:\t\t\t\t\n\t\t\tif j:\n\t\t\t\tw.append(i)\n\t\t\t\tbreak\n\th = len(h)\n\tw = len(w)\n\t\n\treturn h, w\n\n\ndef execute(): #on_dek, meta, id):\n\t#gc.collect()\n\t#Load keras pretrained model from .h5 file\n\tmodel = keras.models.load_model(PATH + \"/model/UnetM-relu_output.h5\") \n\t# summarize model \n\tmodel.summary()\n\tpidash.dashboard()\n\t#get px height and px width from image\n\tpxH, pxW = run_on_dek(model)\n\toutputtxt = 'Height: '+ str(pxH) + ' px '+ ' H(p): ' + str((3.36 - (pxH/pxW) * .333)) + ' width: '+ str(pxW) + ' px'\n\ttext_file = open(\"complete.txt\", \"w\") \n\tn = text_file.write(outputtxt) \n\ttext_file.close()\n\tprint (outputtxt)\n\n\ndef run_on_dek(model):\n\t# Load img\n\timg = np.load(PATH + \"/on_dek/rdy.npy\")\n\tprint(\"Image loaded...\" + '\\n\\n' + \"Running model...\")\n\tpidash.dashboard()\n\tresult = model.predict(img)\n\tprint(\"\\n\\nModel ran successfully...\")\n\tresult = result >=.995\n\t#print (result)\n\tpx, w = get_exposed(result)\n\treturn px, w\n\n#execute()\n" ]
[ [ "numpy.resize", "tensorflow.keras.models.load_model", "numpy.load" ] ]
asplos2020/DRTest
[ "85c3c9b2a46cafa7184130f2596c5f9eb3b20bff" ]
[ "attack_metrics/rgb.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport sys\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.python.platform import flags\n\nsys.path.append(\"../\")\n\nfrom nmutant_model.model_operation import model_load\nfrom nmutant_util.utils_file import get_data_file, get_data_file_with_Gaussian\nfrom nmutant_util.utils_imgproc import preprocess_image_1\n\nFLAGS = flags.FLAGS\n\nua = [\"fgsm\", \"blackbox\"]\nta = [\"jsma\", 'cw']\n\ndef rgb(datasets, model, samples_path, radius, epoch=49):\n \"\"\"\n :param datasets\n :param model\n :param samples_path\n :return:\n \"\"\"\n # Object used to keep track of (and return) key accuracies\n tf.reset_default_graph()\n sess, preds, x, y, model, feed_dict = model_load(datasets, model, epoch=epoch)\n\n [image_list, image_files, real_labels, predicted_labels] = get_data_file_with_Gaussian(datasets, samples_path, radius)\n\n samples = np.asarray([preprocess_image_1(image.astype('float64')) for image in image_list])\n\n RGB_UA=0\n n_batches = int(np.ceil(samples.shape[0] / 256))\n for i in range(n_batches):\n start = i * 256\n end = np.minimum(len(samples), (i + 1) * 256)\n feed = {x: samples[start:end]}\n if feed_dict is not None:\n feed.update(feed_dict)\n probabilities = sess.run(preds, feed)\n #print(probabilities[1])\n for j in range(len(probabilities)): \n if np.argmax(probabilities[j])!=real_labels[start+j]:\n RGB_UA+=1\n\n result = RGB_UA / len(image_list)\n print('Robustness to Gaussian Blur %.4f' %(result))\n\n # Close TF session\n sess.close()\n\n return result\n\n\ndef main(argv=None):\n rgb(datasets = FLAGS.datasets,\n model=FLAGS.model,\n samples_path=FLAGS.samples,\n radius=FLAGS.radius)\n\nif __name__ == '__main__':\n flags.DEFINE_string('datasets', 'mnist', 'The target datasets.')\n flags.DEFINE_string('model', 'lenet1', 'The name of model')\n flags.DEFINE_string('samples', '../adv_result/mnist/lenet1_fgsm_test', 'The path to load samples.')\n flags.DEFINE_string('radius', '1', 'The Gaussion radius.')\n tf.app.run()\n" ]
[ [ "numpy.ceil", "tensorflow.reset_default_graph", "tensorflow.python.platform.flags.DEFINE_string", "numpy.argmax", "tensorflow.app.run" ] ]
Sinha-Raunak/gan-toolkit
[ "6d2d86833bb00833b2d9cd11a1a83476f44b65fd" ]
[ "agant/models/pytorch/loss/NLL.py" ]
[ "import torch\nimport numpy as np\nfrom torch.autograd import Variable\n\nclass loss_block:\n def __init__(self):\n super(loss_block, self).__init__()\n self.criterion = torch.nn.NLLLoss(size_average=False)\n cuda = True if torch.cuda.is_available() else False\n if cuda:\n self.criterion.cuda()\n def loss(self,input_vals,lables):\n return self.criterion(input_vals,lables)" ]
[ [ "torch.nn.NLLLoss", "torch.cuda.is_available" ] ]
jake-is-ESD-protected/scipy
[ "d7283ff75c218c300f372b5fdd960b987c1709a1", "d7283ff75c218c300f372b5fdd960b987c1709a1", "d7283ff75c218c300f372b5fdd960b987c1709a1", "d7283ff75c218c300f372b5fdd960b987c1709a1" ]
[ "doc/source/tutorial/examples/optimize_global_1.py", "scipy/special/utils/makenpz.py", "scipy/linalg/interpolative.py", "scipy/stats/_crosstab.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import optimize\n\n\ndef eggholder(x):\n return (-(x[1] + 47) * np.sin(np.sqrt(abs(x[0]/2 + (x[1] + 47))))\n -x[0] * np.sin(np.sqrt(abs(x[0] - (x[1] + 47)))))\n\nbounds = [(-512, 512), (-512, 512)]\n\nx = np.arange(-512, 513)\ny = np.arange(-512, 513)\nxgrid, ygrid = np.meshgrid(x, y)\nxy = np.stack([xgrid, ygrid])\n\nresults = dict()\nresults['shgo'] = optimize.shgo(eggholder, bounds)\nresults['DA'] = optimize.dual_annealing(eggholder, bounds)\nresults['DE'] = optimize.differential_evolution(eggholder, bounds)\nresults['shgo_sobol'] = optimize.shgo(eggholder, bounds, n=256, iters=5,\n sampling_method='sobol')\n\nfig = plt.figure(figsize=(4.5, 4.5))\nax = fig.add_subplot(111)\nim = ax.imshow(eggholder(xy), interpolation='bilinear', origin='lower',\n cmap='gray')\nax.set_xlabel('x')\nax.set_ylabel('y')\n\ndef plot_point(res, marker='o', color=None):\n ax.plot(512+res.x[0], 512+res.x[1], marker=marker, color=color, ms=10)\n\nplot_point(results['DE'], color='c') # differential_evolution - cyan\nplot_point(results['DA'], color='w') # dual_annealing. - white\n\n# SHGO produces multiple minima, plot them all (with a smaller marker size)\nplot_point(results['shgo'], color='r', marker='+')\nplot_point(results['shgo_sobol'], color='r', marker='x')\nfor i in range(results['shgo_sobol'].xl.shape[0]):\n ax.plot(512 + results['shgo_sobol'].xl[i, 0],\n 512 + results['shgo_sobol'].xl[i, 1],\n 'ro', ms=2)\n\nax.set_xlim([-4, 514*2])\nax.set_ylim([-4, 514*2])\n\nfig.tight_layout()\nplt.show()\n\n\n", "\"\"\"\npython makenpz.py DIRECTORY\n\nBuild a npz containing all data files in the directory.\n\n\"\"\"\n\nimport os\nimport numpy as np\nimport argparse\nfrom stat import ST_MTIME\n\n\ndef newer(source, target):\n \"\"\"\n Return true if 'source' exists and is more recently modified than\n 'target', or if 'source' exists and 'target' doesn't. Return false if\n both exist and 'target' is the same age or younger than 'source'.\n \"\"\"\n if not os.path.exists(source):\n raise ValueError(\"file '%s' does not exist\" % os.path.abspath(source))\n if not os.path.exists(target):\n return 1\n\n mtime1 = os.stat(source)[ST_MTIME]\n mtime2 = os.stat(target)[ST_MTIME]\n\n return mtime1 > mtime2\n\n\ndef main():\n p = argparse.ArgumentParser(usage=(__doc__ or '').strip())\n p.add_argument('--use-timestamp', action='store_true', default=False,\n help=\"don't rewrite npz file if it is newer than sources\")\n p.add_argument('dirname') # for Meson: 'boost' or 'gsl'\n p.add_argument(\"-o\", \"--outdir\", type=str,\n help=\"Relative path to the output directory\")\n args = p.parse_args()\n\n if not args.outdir:\n # We're dealing with a distutils build here, write in-place:\n inp = os.path.normpath(args.dirname)\n outp = inp + \".npz\"\n else:\n inp = os.path.join(os.path.abspath(os.path.dirname(__file__)),\n '..', 'tests', 'data', args.dirname)\n outdir_abs = os.path.join(os.getcwd(), args.outdir)\n outp = os.path.join(outdir_abs, args.dirname + \".npz\")\n\n # Skip rebuilding if no sources\n if os.path.isfile(outp) and not os.path.isdir(inp):\n return\n\n # Find source files\n files = []\n for dirpath, dirnames, filenames in os.walk(inp):\n for fn in filenames:\n if fn.endswith('.txt'):\n key = dirpath[len(inp)+1:] + '-' + fn[:-4]\n key = key.strip('-')\n files.append((key, os.path.join(dirpath, fn)))\n\n # Check if changes required\n if args.use_timestamp and os.path.isfile(outp):\n try:\n old_data = np.load(outp)\n try:\n changed = set(old_data.keys()) != set(key for key, _ in files)\n finally:\n old_data.close()\n except OSError:\n # corrupted file\n changed = True\n\n changed = changed or any(newer(fn, outp) for key, fn in files)\n changed = changed or newer(__file__, outp)\n if not changed:\n return\n\n data = {}\n for key, fn in files:\n data[key] = np.loadtxt(fn)\n\n np.savez_compressed(outp, **data)\n\n\nif __name__ == \"__main__\":\n main()\n", "#******************************************************************************\n# Copyright (C) 2013 Kenneth L. Ho\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer. Redistributions in binary\n# form must reproduce the above copyright notice, this list of conditions and\n# the following disclaimer in the documentation and/or other materials\n# provided with the distribution.\n#\n# None of the names of the copyright holders may be used to endorse or\n# promote products derived from this software without specific prior written\n# permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n#******************************************************************************\n\n# Python module for interfacing with `id_dist`.\n\nr\"\"\"\n======================================================================\nInterpolative matrix decomposition (:mod:`scipy.linalg.interpolative`)\n======================================================================\n\n.. moduleauthor:: Kenneth L. Ho <[email protected]>\n\n.. versionadded:: 0.13\n\n.. currentmodule:: scipy.linalg.interpolative\n\nAn interpolative decomposition (ID) of a matrix :math:`A \\in\n\\mathbb{C}^{m \\times n}` of rank :math:`k \\leq \\min \\{ m, n \\}` is a\nfactorization\n\n.. math::\n A \\Pi =\n \\begin{bmatrix}\n A \\Pi_{1} & A \\Pi_{2}\n \\end{bmatrix} =\n A \\Pi_{1}\n \\begin{bmatrix}\n I & T\n \\end{bmatrix},\n\nwhere :math:`\\Pi = [\\Pi_{1}, \\Pi_{2}]` is a permutation matrix with\n:math:`\\Pi_{1} \\in \\{ 0, 1 \\}^{n \\times k}`, i.e., :math:`A \\Pi_{2} =\nA \\Pi_{1} T`. This can equivalently be written as :math:`A = BP`,\nwhere :math:`B = A \\Pi_{1}` and :math:`P = [I, T] \\Pi^{\\mathsf{T}}`\nare the *skeleton* and *interpolation matrices*, respectively.\n\nIf :math:`A` does not have exact rank :math:`k`, then there exists an\napproximation in the form of an ID such that :math:`A = BP + E`, where\n:math:`\\| E \\| \\sim \\sigma_{k + 1}` is on the order of the :math:`(k +\n1)`-th largest singular value of :math:`A`. Note that :math:`\\sigma_{k\n+ 1}` is the best possible error for a rank-:math:`k` approximation\nand, in fact, is achieved by the singular value decomposition (SVD)\n:math:`A \\approx U S V^{*}`, where :math:`U \\in \\mathbb{C}^{m \\times\nk}` and :math:`V \\in \\mathbb{C}^{n \\times k}` have orthonormal columns\nand :math:`S = \\mathop{\\mathrm{diag}} (\\sigma_{i}) \\in \\mathbb{C}^{k\n\\times k}` is diagonal with nonnegative entries. The principal\nadvantages of using an ID over an SVD are that:\n\n- it is cheaper to construct;\n- it preserves the structure of :math:`A`; and\n- it is more efficient to compute with in light of the identity submatrix of :math:`P`.\n\nRoutines\n========\n\nMain functionality:\n\n.. autosummary::\n :toctree: generated/\n\n interp_decomp\n reconstruct_matrix_from_id\n reconstruct_interp_matrix\n reconstruct_skel_matrix\n id_to_svd\n svd\n estimate_spectral_norm\n estimate_spectral_norm_diff\n estimate_rank\n\nSupport functions:\n\n.. autosummary::\n :toctree: generated/\n\n seed\n rand\n\n\nReferences\n==========\n\nThis module uses the ID software package [1]_ by Martinsson, Rokhlin,\nShkolnisky, and Tygert, which is a Fortran library for computing IDs\nusing various algorithms, including the rank-revealing QR approach of\n[2]_ and the more recent randomized methods described in [3]_, [4]_,\nand [5]_. This module exposes its functionality in a way convenient\nfor Python users. Note that this module does not add any functionality\nbeyond that of organizing a simpler and more consistent interface.\n\nWe advise the user to consult also the `documentation for the ID package\n<http://tygert.com/id_doc.4.pdf>`_.\n\n.. [1] P.G. Martinsson, V. Rokhlin, Y. Shkolnisky, M. Tygert. \"ID: a\n software package for low-rank approximation of matrices via interpolative\n decompositions, version 0.2.\" http://tygert.com/id_doc.4.pdf.\n\n.. [2] H. Cheng, Z. Gimbutas, P.G. Martinsson, V. Rokhlin. \"On the\n compression of low rank matrices.\" *SIAM J. Sci. Comput.* 26 (4): 1389--1404,\n 2005. :doi:`10.1137/030602678`.\n\n.. [3] E. Liberty, F. Woolfe, P.G. Martinsson, V. Rokhlin, M.\n Tygert. \"Randomized algorithms for the low-rank approximation of matrices.\"\n *Proc. Natl. Acad. Sci. U.S.A.* 104 (51): 20167--20172, 2007.\n :doi:`10.1073/pnas.0709640104`.\n\n.. [4] P.G. Martinsson, V. Rokhlin, M. Tygert. \"A randomized\n algorithm for the decomposition of matrices.\" *Appl. Comput. Harmon. Anal.* 30\n (1): 47--68, 2011. :doi:`10.1016/j.acha.2010.02.003`.\n\n.. [5] F. Woolfe, E. Liberty, V. Rokhlin, M. Tygert. \"A fast\n randomized algorithm for the approximation of matrices.\" *Appl. Comput.\n Harmon. Anal.* 25 (3): 335--366, 2008. :doi:`10.1016/j.acha.2007.12.002`.\n\n\nTutorial\n========\n\nInitializing\n------------\n\nThe first step is to import :mod:`scipy.linalg.interpolative` by issuing the\ncommand:\n\n>>> import scipy.linalg.interpolative as sli\n\nNow let's build a matrix. For this, we consider a Hilbert matrix, which is well\nknow to have low rank:\n\n>>> from scipy.linalg import hilbert\n>>> n = 1000\n>>> A = hilbert(n)\n\nWe can also do this explicitly via:\n\n>>> import numpy as np\n>>> n = 1000\n>>> A = np.empty((n, n), order='F')\n>>> for j in range(n):\n>>> for i in range(m):\n>>> A[i,j] = 1. / (i + j + 1)\n\nNote the use of the flag ``order='F'`` in :func:`numpy.empty`. This\ninstantiates the matrix in Fortran-contiguous order and is important for\navoiding data copying when passing to the backend.\n\nWe then define multiplication routines for the matrix by regarding it as a\n:class:`scipy.sparse.linalg.LinearOperator`:\n\n>>> from scipy.sparse.linalg import aslinearoperator\n>>> L = aslinearoperator(A)\n\nThis automatically sets up methods describing the action of the matrix and its\nadjoint on a vector.\n\nComputing an ID\n---------------\n\nWe have several choices of algorithm to compute an ID. These fall largely\naccording to two dichotomies:\n\n1. how the matrix is represented, i.e., via its entries or via its action on a\n vector; and\n2. whether to approximate it to a fixed relative precision or to a fixed rank.\n\nWe step through each choice in turn below.\n\nIn all cases, the ID is represented by three parameters:\n\n1. a rank ``k``;\n2. an index array ``idx``; and\n3. interpolation coefficients ``proj``.\n\nThe ID is specified by the relation\n``np.dot(A[:,idx[:k]], proj) == A[:,idx[k:]]``.\n\nFrom matrix entries\n...................\n\nWe first consider a matrix given in terms of its entries.\n\nTo compute an ID to a fixed precision, type:\n\n>>> k, idx, proj = sli.interp_decomp(A, eps)\n\nwhere ``eps < 1`` is the desired precision.\n\nTo compute an ID to a fixed rank, use:\n\n>>> idx, proj = sli.interp_decomp(A, k)\n\nwhere ``k >= 1`` is the desired rank.\n\nBoth algorithms use random sampling and are usually faster than the\ncorresponding older, deterministic algorithms, which can be accessed via the\ncommands:\n\n>>> k, idx, proj = sli.interp_decomp(A, eps, rand=False)\n\nand:\n\n>>> idx, proj = sli.interp_decomp(A, k, rand=False)\n\nrespectively.\n\nFrom matrix action\n..................\n\nNow consider a matrix given in terms of its action on a vector as a\n:class:`scipy.sparse.linalg.LinearOperator`.\n\nTo compute an ID to a fixed precision, type:\n\n>>> k, idx, proj = sli.interp_decomp(L, eps)\n\nTo compute an ID to a fixed rank, use:\n\n>>> idx, proj = sli.interp_decomp(L, k)\n\nThese algorithms are randomized.\n\nReconstructing an ID\n--------------------\n\nThe ID routines above do not output the skeleton and interpolation matrices\nexplicitly but instead return the relevant information in a more compact (and\nsometimes more useful) form. To build these matrices, write:\n\n>>> B = sli.reconstruct_skel_matrix(A, k, idx)\n\nfor the skeleton matrix and:\n\n>>> P = sli.reconstruct_interp_matrix(idx, proj)\n\nfor the interpolation matrix. The ID approximation can then be computed as:\n\n>>> C = np.dot(B, P)\n\nThis can also be constructed directly using:\n\n>>> C = sli.reconstruct_matrix_from_id(B, idx, proj)\n\nwithout having to first compute ``P``.\n\nAlternatively, this can be done explicitly as well using:\n\n>>> B = A[:,idx[:k]]\n>>> P = np.hstack([np.eye(k), proj])[:,np.argsort(idx)]\n>>> C = np.dot(B, P)\n\nComputing an SVD\n----------------\n\nAn ID can be converted to an SVD via the command:\n\n>>> U, S, V = sli.id_to_svd(B, idx, proj)\n\nThe SVD approximation is then:\n\n>>> C = np.dot(U, np.dot(np.diag(S), np.dot(V.conj().T)))\n\nThe SVD can also be computed \"fresh\" by combining both the ID and conversion\nsteps into one command. Following the various ID algorithms above, there are\ncorrespondingly various SVD algorithms that one can employ.\n\nFrom matrix entries\n...................\n\nWe consider first SVD algorithms for a matrix given in terms of its entries.\n\nTo compute an SVD to a fixed precision, type:\n\n>>> U, S, V = sli.svd(A, eps)\n\nTo compute an SVD to a fixed rank, use:\n\n>>> U, S, V = sli.svd(A, k)\n\nBoth algorithms use random sampling; for the determinstic versions, issue the\nkeyword ``rand=False`` as above.\n\nFrom matrix action\n..................\n\nNow consider a matrix given in terms of its action on a vector.\n\nTo compute an SVD to a fixed precision, type:\n\n>>> U, S, V = sli.svd(L, eps)\n\nTo compute an SVD to a fixed rank, use:\n\n>>> U, S, V = sli.svd(L, k)\n\nUtility routines\n----------------\n\nSeveral utility routines are also available.\n\nTo estimate the spectral norm of a matrix, use:\n\n>>> snorm = sli.estimate_spectral_norm(A)\n\nThis algorithm is based on the randomized power method and thus requires only\nmatrix-vector products. The number of iterations to take can be set using the\nkeyword ``its`` (default: ``its=20``). The matrix is interpreted as a\n:class:`scipy.sparse.linalg.LinearOperator`, but it is also valid to supply it\nas a :class:`numpy.ndarray`, in which case it is trivially converted using\n:func:`scipy.sparse.linalg.aslinearoperator`.\n\nThe same algorithm can also estimate the spectral norm of the difference of two\nmatrices ``A1`` and ``A2`` as follows:\n\n>>> diff = sli.estimate_spectral_norm_diff(A1, A2)\n\nThis is often useful for checking the accuracy of a matrix approximation.\n\nSome routines in :mod:`scipy.linalg.interpolative` require estimating the rank\nof a matrix as well. This can be done with either:\n\n>>> k = sli.estimate_rank(A, eps)\n\nor:\n\n>>> k = sli.estimate_rank(L, eps)\n\ndepending on the representation. The parameter ``eps`` controls the definition\nof the numerical rank.\n\nFinally, the random number generation required for all randomized routines can\nbe controlled via :func:`scipy.linalg.interpolative.seed`. To reset the seed\nvalues to their original values, use:\n\n>>> sli.seed('default')\n\nTo specify the seed values, use:\n\n>>> sli.seed(s)\n\nwhere ``s`` must be an integer or array of 55 floats. If an integer, the array\nof floats is obtained by using ``numpy.random.rand`` with the given integer\nseed.\n\nTo simply generate some random numbers, type:\n\n>>> sli.rand(n)\n\nwhere ``n`` is the number of random numbers to generate.\n\nRemarks\n-------\n\nThe above functions all automatically detect the appropriate interface and work\nwith both real and complex data types, passing input arguments to the proper\nbackend routine.\n\n\"\"\"\n\nimport scipy.linalg._interpolative_backend as _backend\nimport numpy as np\nimport sys\n\n__all__ = [\n 'estimate_rank',\n 'estimate_spectral_norm',\n 'estimate_spectral_norm_diff',\n 'id_to_svd',\n 'interp_decomp',\n 'rand',\n 'reconstruct_interp_matrix',\n 'reconstruct_matrix_from_id',\n 'reconstruct_skel_matrix',\n 'seed',\n 'svd',\n]\n\n_DTYPE_ERROR = ValueError(\"invalid input dtype (input must be float64 or complex128)\")\n_TYPE_ERROR = TypeError(\"invalid input type (must be array or LinearOperator)\")\n_32BIT_ERROR = ValueError(\"interpolative decomposition on 32-bit systems \"\n \"with complex128 is buggy\")\n_IS_32BIT = (sys.maxsize < 2**32)\n\n\ndef _is_real(A):\n try:\n if A.dtype == np.complex128:\n return False\n elif A.dtype == np.float64:\n return True\n else:\n raise _DTYPE_ERROR\n except AttributeError as e:\n raise _TYPE_ERROR from e\n\n\ndef seed(seed=None):\n \"\"\"\n Seed the internal random number generator used in this ID package.\n\n The generator is a lagged Fibonacci method with 55-element internal state.\n\n Parameters\n ----------\n seed : int, sequence, 'default', optional\n If 'default', the random seed is reset to a default value.\n\n If `seed` is a sequence containing 55 floating-point numbers\n in range [0,1], these are used to set the internal state of\n the generator.\n\n If the value is an integer, the internal state is obtained\n from `numpy.random.RandomState` (MT19937) with the integer\n used as the initial seed.\n\n If `seed` is omitted (None), ``numpy.random.rand`` is used to\n initialize the generator.\n\n \"\"\"\n # For details, see :func:`_backend.id_srand`, :func:`_backend.id_srandi`,\n # and :func:`_backend.id_srando`.\n\n if isinstance(seed, str) and seed == 'default':\n _backend.id_srando()\n elif hasattr(seed, '__len__'):\n state = np.asfortranarray(seed, dtype=float)\n if state.shape != (55,):\n raise ValueError(\"invalid input size\")\n elif state.min() < 0 or state.max() > 1:\n raise ValueError(\"values not in range [0,1]\")\n _backend.id_srandi(state)\n elif seed is None:\n _backend.id_srandi(np.random.rand(55))\n else:\n rnd = np.random.RandomState(seed)\n _backend.id_srandi(rnd.rand(55))\n\n\ndef rand(*shape):\n \"\"\"\n Generate standard uniform pseudorandom numbers via a very efficient lagged\n Fibonacci method.\n\n This routine is used for all random number generation in this package and\n can affect ID and SVD results.\n\n Parameters\n ----------\n *shape\n Shape of output array\n\n \"\"\"\n # For details, see :func:`_backend.id_srand`, and :func:`_backend.id_srando`.\n return _backend.id_srand(np.prod(shape)).reshape(shape)\n\n\ndef interp_decomp(A, eps_or_k, rand=True):\n \"\"\"\n Compute ID of a matrix.\n\n An ID of a matrix `A` is a factorization defined by a rank `k`, a column\n index array `idx`, and interpolation coefficients `proj` such that::\n\n numpy.dot(A[:,idx[:k]], proj) = A[:,idx[k:]]\n\n The original matrix can then be reconstructed as::\n\n numpy.hstack([A[:,idx[:k]],\n numpy.dot(A[:,idx[:k]], proj)]\n )[:,numpy.argsort(idx)]\n\n or via the routine :func:`reconstruct_matrix_from_id`. This can\n equivalently be written as::\n\n numpy.dot(A[:,idx[:k]],\n numpy.hstack([numpy.eye(k), proj])\n )[:,np.argsort(idx)]\n\n in terms of the skeleton and interpolation matrices::\n\n B = A[:,idx[:k]]\n\n and::\n\n P = numpy.hstack([numpy.eye(k), proj])[:,np.argsort(idx)]\n\n respectively. See also :func:`reconstruct_interp_matrix` and\n :func:`reconstruct_skel_matrix`.\n\n The ID can be computed to any relative precision or rank (depending on the\n value of `eps_or_k`). If a precision is specified (`eps_or_k < 1`), then\n this function has the output signature::\n\n k, idx, proj = interp_decomp(A, eps_or_k)\n\n Otherwise, if a rank is specified (`eps_or_k >= 1`), then the output\n signature is::\n\n idx, proj = interp_decomp(A, eps_or_k)\n\n .. This function automatically detects the form of the input parameters\n and passes them to the appropriate backend. For details, see\n :func:`_backend.iddp_id`, :func:`_backend.iddp_aid`,\n :func:`_backend.iddp_rid`, :func:`_backend.iddr_id`,\n :func:`_backend.iddr_aid`, :func:`_backend.iddr_rid`,\n :func:`_backend.idzp_id`, :func:`_backend.idzp_aid`,\n :func:`_backend.idzp_rid`, :func:`_backend.idzr_id`,\n :func:`_backend.idzr_aid`, and :func:`_backend.idzr_rid`.\n\n Parameters\n ----------\n A : :class:`numpy.ndarray` or :class:`scipy.sparse.linalg.LinearOperator` with `rmatvec`\n Matrix to be factored\n eps_or_k : float or int\n Relative error (if `eps_or_k < 1`) or rank (if `eps_or_k >= 1`) of\n approximation.\n rand : bool, optional\n Whether to use random sampling if `A` is of type :class:`numpy.ndarray`\n (randomized algorithms are always used if `A` is of type\n :class:`scipy.sparse.linalg.LinearOperator`).\n\n Returns\n -------\n k : int\n Rank required to achieve specified relative precision if\n `eps_or_k < 1`.\n idx : :class:`numpy.ndarray`\n Column index array.\n proj : :class:`numpy.ndarray`\n Interpolation coefficients.\n \"\"\"\n from scipy.sparse.linalg import LinearOperator\n\n real = _is_real(A)\n\n if isinstance(A, np.ndarray):\n if eps_or_k < 1:\n eps = eps_or_k\n if rand:\n if real:\n k, idx, proj = _backend.iddp_aid(eps, A)\n else:\n if _IS_32BIT:\n raise _32BIT_ERROR\n k, idx, proj = _backend.idzp_aid(eps, A)\n else:\n if real:\n k, idx, proj = _backend.iddp_id(eps, A)\n else:\n k, idx, proj = _backend.idzp_id(eps, A)\n return k, idx - 1, proj\n else:\n k = int(eps_or_k)\n if rand:\n if real:\n idx, proj = _backend.iddr_aid(A, k)\n else:\n if _IS_32BIT:\n raise _32BIT_ERROR\n idx, proj = _backend.idzr_aid(A, k)\n else:\n if real:\n idx, proj = _backend.iddr_id(A, k)\n else:\n idx, proj = _backend.idzr_id(A, k)\n return idx - 1, proj\n elif isinstance(A, LinearOperator):\n m, n = A.shape\n matveca = A.rmatvec\n if eps_or_k < 1:\n eps = eps_or_k\n if real:\n k, idx, proj = _backend.iddp_rid(eps, m, n, matveca)\n else:\n if _IS_32BIT:\n raise _32BIT_ERROR\n k, idx, proj = _backend.idzp_rid(eps, m, n, matveca)\n return k, idx - 1, proj\n else:\n k = int(eps_or_k)\n if real:\n idx, proj = _backend.iddr_rid(m, n, matveca, k)\n else:\n if _IS_32BIT:\n raise _32BIT_ERROR\n idx, proj = _backend.idzr_rid(m, n, matveca, k)\n return idx - 1, proj\n else:\n raise _TYPE_ERROR\n\n\ndef reconstruct_matrix_from_id(B, idx, proj):\n \"\"\"\n Reconstruct matrix from its ID.\n\n A matrix `A` with skeleton matrix `B` and ID indices and coefficients `idx`\n and `proj`, respectively, can be reconstructed as::\n\n numpy.hstack([B, numpy.dot(B, proj)])[:,numpy.argsort(idx)]\n\n See also :func:`reconstruct_interp_matrix` and\n :func:`reconstruct_skel_matrix`.\n\n .. This function automatically detects the matrix data type and calls the\n appropriate backend. For details, see :func:`_backend.idd_reconid` and\n :func:`_backend.idz_reconid`.\n\n Parameters\n ----------\n B : :class:`numpy.ndarray`\n Skeleton matrix.\n idx : :class:`numpy.ndarray`\n Column index array.\n proj : :class:`numpy.ndarray`\n Interpolation coefficients.\n\n Returns\n -------\n :class:`numpy.ndarray`\n Reconstructed matrix.\n \"\"\"\n if _is_real(B):\n return _backend.idd_reconid(B, idx + 1, proj)\n else:\n return _backend.idz_reconid(B, idx + 1, proj)\n\n\ndef reconstruct_interp_matrix(idx, proj):\n \"\"\"\n Reconstruct interpolation matrix from ID.\n\n The interpolation matrix can be reconstructed from the ID indices and\n coefficients `idx` and `proj`, respectively, as::\n\n P = numpy.hstack([numpy.eye(proj.shape[0]), proj])[:,numpy.argsort(idx)]\n\n The original matrix can then be reconstructed from its skeleton matrix `B`\n via::\n\n numpy.dot(B, P)\n\n See also :func:`reconstruct_matrix_from_id` and\n :func:`reconstruct_skel_matrix`.\n\n .. This function automatically detects the matrix data type and calls the\n appropriate backend. For details, see :func:`_backend.idd_reconint` and\n :func:`_backend.idz_reconint`.\n\n Parameters\n ----------\n idx : :class:`numpy.ndarray`\n Column index array.\n proj : :class:`numpy.ndarray`\n Interpolation coefficients.\n\n Returns\n -------\n :class:`numpy.ndarray`\n Interpolation matrix.\n \"\"\"\n if _is_real(proj):\n return _backend.idd_reconint(idx + 1, proj)\n else:\n return _backend.idz_reconint(idx + 1, proj)\n\n\ndef reconstruct_skel_matrix(A, k, idx):\n \"\"\"\n Reconstruct skeleton matrix from ID.\n\n The skeleton matrix can be reconstructed from the original matrix `A` and its\n ID rank and indices `k` and `idx`, respectively, as::\n\n B = A[:,idx[:k]]\n\n The original matrix can then be reconstructed via::\n\n numpy.hstack([B, numpy.dot(B, proj)])[:,numpy.argsort(idx)]\n\n See also :func:`reconstruct_matrix_from_id` and\n :func:`reconstruct_interp_matrix`.\n\n .. This function automatically detects the matrix data type and calls the\n appropriate backend. For details, see :func:`_backend.idd_copycols` and\n :func:`_backend.idz_copycols`.\n\n Parameters\n ----------\n A : :class:`numpy.ndarray`\n Original matrix.\n k : int\n Rank of ID.\n idx : :class:`numpy.ndarray`\n Column index array.\n\n Returns\n -------\n :class:`numpy.ndarray`\n Skeleton matrix.\n \"\"\"\n if _is_real(A):\n return _backend.idd_copycols(A, k, idx + 1)\n else:\n return _backend.idz_copycols(A, k, idx + 1)\n\n\ndef id_to_svd(B, idx, proj):\n \"\"\"\n Convert ID to SVD.\n\n The SVD reconstruction of a matrix with skeleton matrix `B` and ID indices and\n coefficients `idx` and `proj`, respectively, is::\n\n U, S, V = id_to_svd(B, idx, proj)\n A = numpy.dot(U, numpy.dot(numpy.diag(S), V.conj().T))\n\n See also :func:`svd`.\n\n .. This function automatically detects the matrix data type and calls the\n appropriate backend. For details, see :func:`_backend.idd_id2svd` and\n :func:`_backend.idz_id2svd`.\n\n Parameters\n ----------\n B : :class:`numpy.ndarray`\n Skeleton matrix.\n idx : :class:`numpy.ndarray`\n Column index array.\n proj : :class:`numpy.ndarray`\n Interpolation coefficients.\n\n Returns\n -------\n U : :class:`numpy.ndarray`\n Left singular vectors.\n S : :class:`numpy.ndarray`\n Singular values.\n V : :class:`numpy.ndarray`\n Right singular vectors.\n \"\"\"\n if _is_real(B):\n U, V, S = _backend.idd_id2svd(B, idx + 1, proj)\n else:\n U, V, S = _backend.idz_id2svd(B, idx + 1, proj)\n return U, S, V\n\n\ndef estimate_spectral_norm(A, its=20):\n \"\"\"\n Estimate spectral norm of a matrix by the randomized power method.\n\n .. This function automatically detects the matrix data type and calls the\n appropriate backend. For details, see :func:`_backend.idd_snorm` and\n :func:`_backend.idz_snorm`.\n\n Parameters\n ----------\n A : :class:`scipy.sparse.linalg.LinearOperator`\n Matrix given as a :class:`scipy.sparse.linalg.LinearOperator` with the\n `matvec` and `rmatvec` methods (to apply the matrix and its adjoint).\n its : int, optional\n Number of power method iterations.\n\n Returns\n -------\n float\n Spectral norm estimate.\n \"\"\"\n from scipy.sparse.linalg import aslinearoperator\n A = aslinearoperator(A)\n m, n = A.shape\n matvec = lambda x: A. matvec(x)\n matveca = lambda x: A.rmatvec(x)\n if _is_real(A):\n return _backend.idd_snorm(m, n, matveca, matvec, its=its)\n else:\n return _backend.idz_snorm(m, n, matveca, matvec, its=its)\n\n\ndef estimate_spectral_norm_diff(A, B, its=20):\n \"\"\"\n Estimate spectral norm of the difference of two matrices by the randomized\n power method.\n\n .. This function automatically detects the matrix data type and calls the\n appropriate backend. For details, see :func:`_backend.idd_diffsnorm` and\n :func:`_backend.idz_diffsnorm`.\n\n Parameters\n ----------\n A : :class:`scipy.sparse.linalg.LinearOperator`\n First matrix given as a :class:`scipy.sparse.linalg.LinearOperator` with the\n `matvec` and `rmatvec` methods (to apply the matrix and its adjoint).\n B : :class:`scipy.sparse.linalg.LinearOperator`\n Second matrix given as a :class:`scipy.sparse.linalg.LinearOperator` with\n the `matvec` and `rmatvec` methods (to apply the matrix and its adjoint).\n its : int, optional\n Number of power method iterations.\n\n Returns\n -------\n float\n Spectral norm estimate of matrix difference.\n \"\"\"\n from scipy.sparse.linalg import aslinearoperator\n A = aslinearoperator(A)\n B = aslinearoperator(B)\n m, n = A.shape\n matvec1 = lambda x: A. matvec(x)\n matveca1 = lambda x: A.rmatvec(x)\n matvec2 = lambda x: B. matvec(x)\n matveca2 = lambda x: B.rmatvec(x)\n if _is_real(A):\n return _backend.idd_diffsnorm(\n m, n, matveca1, matveca2, matvec1, matvec2, its=its)\n else:\n return _backend.idz_diffsnorm(\n m, n, matveca1, matveca2, matvec1, matvec2, its=its)\n\n\ndef svd(A, eps_or_k, rand=True):\n \"\"\"\n Compute SVD of a matrix via an ID.\n\n An SVD of a matrix `A` is a factorization::\n\n A = numpy.dot(U, numpy.dot(numpy.diag(S), V.conj().T))\n\n where `U` and `V` have orthonormal columns and `S` is nonnegative.\n\n The SVD can be computed to any relative precision or rank (depending on the\n value of `eps_or_k`).\n\n See also :func:`interp_decomp` and :func:`id_to_svd`.\n\n .. This function automatically detects the form of the input parameters and\n passes them to the appropriate backend. For details, see\n :func:`_backend.iddp_svd`, :func:`_backend.iddp_asvd`,\n :func:`_backend.iddp_rsvd`, :func:`_backend.iddr_svd`,\n :func:`_backend.iddr_asvd`, :func:`_backend.iddr_rsvd`,\n :func:`_backend.idzp_svd`, :func:`_backend.idzp_asvd`,\n :func:`_backend.idzp_rsvd`, :func:`_backend.idzr_svd`,\n :func:`_backend.idzr_asvd`, and :func:`_backend.idzr_rsvd`.\n\n Parameters\n ----------\n A : :class:`numpy.ndarray` or :class:`scipy.sparse.linalg.LinearOperator`\n Matrix to be factored, given as either a :class:`numpy.ndarray` or a\n :class:`scipy.sparse.linalg.LinearOperator` with the `matvec` and\n `rmatvec` methods (to apply the matrix and its adjoint).\n eps_or_k : float or int\n Relative error (if `eps_or_k < 1`) or rank (if `eps_or_k >= 1`) of\n approximation.\n rand : bool, optional\n Whether to use random sampling if `A` is of type :class:`numpy.ndarray`\n (randomized algorithms are always used if `A` is of type\n :class:`scipy.sparse.linalg.LinearOperator`).\n\n Returns\n -------\n U : :class:`numpy.ndarray`\n Left singular vectors.\n S : :class:`numpy.ndarray`\n Singular values.\n V : :class:`numpy.ndarray`\n Right singular vectors.\n \"\"\"\n from scipy.sparse.linalg import LinearOperator\n\n real = _is_real(A)\n\n if isinstance(A, np.ndarray):\n if eps_or_k < 1:\n eps = eps_or_k\n if rand:\n if real:\n U, V, S = _backend.iddp_asvd(eps, A)\n else:\n if _IS_32BIT:\n raise _32BIT_ERROR\n U, V, S = _backend.idzp_asvd(eps, A)\n else:\n if real:\n U, V, S = _backend.iddp_svd(eps, A)\n else:\n U, V, S = _backend.idzp_svd(eps, A)\n else:\n k = int(eps_or_k)\n if k > min(A.shape):\n raise ValueError(\"Approximation rank %s exceeds min(A.shape) = \"\n \" %s \" % (k, min(A.shape)))\n if rand:\n if real:\n U, V, S = _backend.iddr_asvd(A, k)\n else:\n if _IS_32BIT:\n raise _32BIT_ERROR\n U, V, S = _backend.idzr_asvd(A, k)\n else:\n if real:\n U, V, S = _backend.iddr_svd(A, k)\n else:\n U, V, S = _backend.idzr_svd(A, k)\n elif isinstance(A, LinearOperator):\n m, n = A.shape\n matvec = lambda x: A.matvec(x)\n matveca = lambda x: A.rmatvec(x)\n if eps_or_k < 1:\n eps = eps_or_k\n if real:\n U, V, S = _backend.iddp_rsvd(eps, m, n, matveca, matvec)\n else:\n if _IS_32BIT:\n raise _32BIT_ERROR\n U, V, S = _backend.idzp_rsvd(eps, m, n, matveca, matvec)\n else:\n k = int(eps_or_k)\n if real:\n U, V, S = _backend.iddr_rsvd(m, n, matveca, matvec, k)\n else:\n if _IS_32BIT:\n raise _32BIT_ERROR\n U, V, S = _backend.idzr_rsvd(m, n, matveca, matvec, k)\n else:\n raise _TYPE_ERROR\n return U, S, V\n\n\ndef estimate_rank(A, eps):\n \"\"\"\n Estimate matrix rank to a specified relative precision using randomized\n methods.\n\n The matrix `A` can be given as either a :class:`numpy.ndarray` or a\n :class:`scipy.sparse.linalg.LinearOperator`, with different algorithms used\n for each case. If `A` is of type :class:`numpy.ndarray`, then the output\n rank is typically about 8 higher than the actual numerical rank.\n\n .. This function automatically detects the form of the input parameters and\n passes them to the appropriate backend. For details,\n see :func:`_backend.idd_estrank`, :func:`_backend.idd_findrank`,\n :func:`_backend.idz_estrank`, and :func:`_backend.idz_findrank`.\n\n Parameters\n ----------\n A : :class:`numpy.ndarray` or :class:`scipy.sparse.linalg.LinearOperator`\n Matrix whose rank is to be estimated, given as either a\n :class:`numpy.ndarray` or a :class:`scipy.sparse.linalg.LinearOperator`\n with the `rmatvec` method (to apply the matrix adjoint).\n eps : float\n Relative error for numerical rank definition.\n\n Returns\n -------\n int\n Estimated matrix rank.\n \"\"\"\n from scipy.sparse.linalg import LinearOperator\n\n real = _is_real(A)\n\n if isinstance(A, np.ndarray):\n if real:\n rank = _backend.idd_estrank(eps, A)\n else:\n rank = _backend.idz_estrank(eps, A)\n if rank == 0:\n # special return value for nearly full rank\n rank = min(A.shape)\n return rank\n elif isinstance(A, LinearOperator):\n m, n = A.shape\n matveca = A.rmatvec\n if real:\n return _backend.idd_findrank(eps, m, n, matveca)\n else:\n return _backend.idz_findrank(eps, m, n, matveca)\n else:\n raise _TYPE_ERROR\n", "import numpy as np\nfrom scipy.sparse import coo_matrix\n\n\ndef crosstab(*args, levels=None, sparse=False):\n \"\"\"\n Return table of counts for each possible unique combination in ``*args``.\n\n When ``len(args) > 1``, the array computed by this function is\n often referred to as a *contingency table* [1]_.\n\n The arguments must be sequences with the same length. The second return\n value, `count`, is an integer array with ``len(args)`` dimensions. If\n `levels` is None, the shape of `count` is ``(n0, n1, ...)``, where ``nk``\n is the number of unique elements in ``args[k]``.\n\n Parameters\n ----------\n *args : sequences\n A sequence of sequences whose unique aligned elements are to be\n counted. The sequences in args must all be the same length.\n levels : sequence, optional\n If `levels` is given, it must be a sequence that is the same length as\n `args`. Each element in `levels` is either a sequence or None. If it\n is a sequence, it gives the values in the corresponding sequence in\n `args` that are to be counted. If any value in the sequences in `args`\n does not occur in the corresponding sequence in `levels`, that value\n is ignored and not counted in the returned array `count`. The default\n value of `levels` for ``args[i]`` is ``np.unique(args[i])``\n sparse : bool, optional\n If True, return a sparse matrix. The matrix will be an instance of\n the `scipy.sparse.coo_matrix` class. Because SciPy's sparse matrices\n must be 2-d, only two input sequences are allowed when `sparse` is\n True. Default is False.\n\n Returns\n -------\n elements : tuple of numpy.ndarrays.\n Tuple of length ``len(args)`` containing the arrays of elements that\n are counted in `count`. These can be interpreted as the labels of\n the corresponding dimensions of `count`.\n If `levels` was given, then if ``levels[i]`` is not None,\n ``elements[i]`` will hold the values given in ``levels[i]``.\n count : numpy.ndarray or scipy.sparse.coo_matrix\n Counts of the unique elements in ``zip(*args)``, stored in an array.\n Also known as a *contingency table* when ``len(args) > 1``.\n\n See Also\n --------\n numpy.unique\n\n Notes\n -----\n .. versionadded:: 1.7.0\n\n References\n ----------\n .. [1] \"Contingency table\", http://en.wikipedia.org/wiki/Contingency_table\n\n Examples\n --------\n >>> from scipy.stats.contingency import crosstab\n\n Given the lists `a` and `x`, create a contingency table that counts the\n frequencies of the corresponding pairs.\n\n >>> a = ['A', 'B', 'A', 'A', 'B', 'B', 'A', 'A', 'B', 'B']\n >>> x = ['X', 'X', 'X', 'Y', 'Z', 'Z', 'Y', 'Y', 'Z', 'Z']\n >>> (avals, xvals), count = crosstab(a, x)\n >>> avals\n array(['A', 'B'], dtype='<U1')\n >>> xvals\n array(['X', 'Y', 'Z'], dtype='<U1')\n >>> count\n array([[2, 3, 0],\n [1, 0, 4]])\n\n So `('A', 'X')` occurs twice, `('A', 'Y')` occurs three times, etc.\n\n Higher dimensional contingency tables can be created.\n\n >>> p = [0, 0, 0, 0, 1, 1, 1, 0, 0, 1]\n >>> (avals, xvals, pvals), count = crosstab(a, x, p)\n >>> count\n array([[[2, 0],\n [2, 1],\n [0, 0]],\n [[1, 0],\n [0, 0],\n [1, 3]]])\n >>> count.shape\n (2, 3, 2)\n\n The values to be counted can be set by using the `levels` argument.\n It allows the elements of interest in each input sequence to be\n given explicitly instead finding the unique elements of the sequence.\n\n For example, suppose one of the arguments is an array containing the\n answers to a survey question, with integer values 1 to 4. Even if the\n value 1 does not occur in the data, we want an entry for it in the table.\n\n >>> q1 = [2, 3, 3, 2, 4, 4, 2, 3, 4, 4, 4, 3, 3, 3, 4] # 1 does not occur.\n >>> q2 = [4, 4, 2, 2, 2, 4, 1, 1, 2, 2, 4, 2, 2, 2, 4] # 3 does not occur.\n >>> options = [1, 2, 3, 4]\n >>> vals, count = crosstab(q1, q2, levels=(options, options))\n >>> count\n array([[0, 0, 0, 0],\n [1, 1, 0, 1],\n [1, 4, 0, 1],\n [0, 3, 0, 3]])\n\n If `levels` is given, but an element of `levels` is None, the unique values\n of the corresponding argument are used. For example,\n\n >>> vals, count = crosstab(q1, q2, levels=(None, options))\n >>> vals\n [array([2, 3, 4]), [1, 2, 3, 4]]\n >>> count\n array([[1, 1, 0, 1],\n [1, 4, 0, 1],\n [0, 3, 0, 3]])\n\n If we want to ignore the pairs where 4 occurs in ``q2``, we can\n give just the values [1, 2] to `levels`, and the 4 will be ignored:\n\n >>> vals, count = crosstab(q1, q2, levels=(None, [1, 2]))\n >>> vals\n [array([2, 3, 4]), [1, 2]]\n >>> count\n array([[1, 1],\n [1, 4],\n [0, 3]])\n\n Finally, let's repeat the first example, but return a sparse matrix:\n\n >>> (avals, xvals), count = crosstab(a, x, sparse=True)\n >>> count\n <2x3 sparse matrix of type '<class 'numpy.int64'>'\n with 4 stored elements in COOrdinate format>\n >>> count.A\n array([[2, 3, 0],\n [1, 0, 4]])\n\n \"\"\"\n nargs = len(args)\n if nargs == 0:\n raise TypeError(\"At least one input sequence is required.\")\n\n len0 = len(args[0])\n if not all(len(a) == len0 for a in args[1:]):\n raise ValueError(\"All input sequences must have the same length.\")\n\n if sparse and nargs != 2:\n raise ValueError(\"When `sparse` is True, only two input sequences \"\n \"are allowed.\")\n\n if levels is None:\n # Call np.unique with return_inverse=True on each argument.\n actual_levels, indices = zip(*[np.unique(a, return_inverse=True)\n for a in args])\n else:\n # `levels` is not None...\n if len(levels) != nargs:\n raise ValueError('len(levels) must equal the number of input '\n 'sequences')\n\n args = [np.asarray(arg) for arg in args]\n mask = np.zeros((nargs, len0), dtype=np.bool_)\n inv = np.zeros((nargs, len0), dtype=np.intp)\n actual_levels = []\n for k, (levels_list, arg) in enumerate(zip(levels, args)):\n if levels_list is None:\n levels_list, inv[k, :] = np.unique(arg, return_inverse=True)\n mask[k, :] = True\n else:\n q = arg == np.asarray(levels_list).reshape(-1, 1)\n mask[k, :] = np.any(q, axis=0)\n qnz = q.T.nonzero()\n inv[k, qnz[0]] = qnz[1]\n actual_levels.append(levels_list)\n\n mask_all = mask.all(axis=0)\n indices = tuple(inv[:, mask_all])\n\n if sparse:\n count = coo_matrix((np.ones(len(indices[0]), dtype=int),\n (indices[0], indices[1])))\n count.sum_duplicates()\n else:\n shape = [len(u) for u in actual_levels]\n count = np.zeros(shape, dtype=int)\n np.add.at(count, indices, 1)\n\n return actual_levels, count\n" ]
[ [ "scipy.optimize.shgo", "scipy.optimize.dual_annealing", "scipy.optimize.differential_evolution", "matplotlib.pyplot.figure", "numpy.stack", "numpy.arange", "matplotlib.pyplot.show", "numpy.meshgrid" ], [ "numpy.loadtxt", "numpy.savez_compressed", "numpy.load" ], [ "numpy.random.rand", "scipy.linalg._interpolative_backend.idd_copycols", "scipy.linalg._interpolative_backend.idz_copycols", "scipy.linalg._interpolative_backend.idd_diffsnorm", "numpy.asfortranarray", "scipy.linalg._interpolative_backend.idd_id2svd", "scipy.linalg._interpolative_backend.idzp_id", "scipy.linalg._interpolative_backend.idd_findrank", "scipy.linalg._interpolative_backend.idd_snorm", "scipy.linalg._interpolative_backend.id_srandi", "scipy.linalg._interpolative_backend.idd_reconint", "scipy.linalg._interpolative_backend.idzr_aid", "scipy.linalg._interpolative_backend.iddp_svd", "scipy.linalg._interpolative_backend.iddp_rsvd", "scipy.linalg._interpolative_backend.iddr_rsvd", "scipy.linalg._interpolative_backend.idzp_rsvd", "scipy.linalg._interpolative_backend.idz_reconint", "numpy.prod", "scipy.linalg._interpolative_backend.idzr_svd", "scipy.linalg._interpolative_backend.idz_reconid", "scipy.linalg._interpolative_backend.idzr_asvd", "scipy.linalg._interpolative_backend.idd_reconid", "scipy.linalg._interpolative_backend.idz_id2svd", "scipy.linalg._interpolative_backend.iddp_aid", "scipy.linalg._interpolative_backend.idzp_aid", "scipy.sparse.linalg.aslinearoperator", "scipy.linalg._interpolative_backend.id_srando", "scipy.linalg._interpolative_backend.idd_estrank", "scipy.linalg._interpolative_backend.idzr_rid", "scipy.linalg._interpolative_backend.iddr_asvd", "scipy.linalg._interpolative_backend.iddp_asvd", "scipy.linalg._interpolative_backend.idzp_rid", "scipy.linalg._interpolative_backend.iddr_svd", "scipy.linalg._interpolative_backend.idzr_id", "scipy.linalg._interpolative_backend.idz_findrank", "numpy.random.RandomState", "scipy.linalg._interpolative_backend.iddp_id", "scipy.linalg._interpolative_backend.idzp_svd", "scipy.linalg._interpolative_backend.idzp_asvd", "scipy.linalg._interpolative_backend.iddr_rid", "scipy.linalg._interpolative_backend.iddr_id", "scipy.linalg._interpolative_backend.iddp_rid", "scipy.linalg._interpolative_backend.iddr_aid", "scipy.linalg._interpolative_backend.idz_diffsnorm", "scipy.linalg._interpolative_backend.idz_estrank", "scipy.linalg._interpolative_backend.idzr_rsvd", "scipy.linalg._interpolative_backend.idz_snorm" ], [ "numpy.asarray", "numpy.zeros", "numpy.any", "numpy.add.at", "numpy.unique" ] ]
rexdivakar/Telegram-Notify
[ "7d4f317548e6c1fa14db1c636c328aac02224dc9" ]
[ "temp.py" ]
[ "import ssl\nfrom notifly import tf_notifier\nimport tensorflow as tf\nfrom dotenv import load_dotenv\nimport os\n\n\nload_dotenv()\n\nssl._create_default_https_context = ssl._create_unverified_context\ntoken = os.getenv('TOKEN')\nnotifier = tf_notifier.TfNotifier(token=token, platform='discord')\n\n\nclass TestCallback(tf.keras.callbacks.Callback):\n\n @notifier.notify_on_epoch_begin(epoch_interval=1, graph_interval=10)\n def on_epoch_begin(self, epoch, logs=None):\n pass\n\n @notifier.notify_on_epoch_end(epoch_interval=1, graph_interval=10)\n def on_epoch_end(self, epoch, logs=None):\n pass\n\n @notifier.notify_on_train_begin()\n def on_train_begin(self, logs=None):\n pass\n\n @notifier.notify_on_train_end()\n def on_train_end(self, logs=None):\n pass\n\n\nfashion_mnist = tf.keras.datasets.fashion_mnist\n\n(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()\n\nclass_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',\n 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']\n\nmodel = tf.keras.Sequential([\n tf.keras.layers.Flatten(input_shape=(28, 28)),\n tf.keras.layers.Dense(5, activation='relu'),\n tf.keras.layers.Dense(10)\n])\n\nmodel.compile(optimizer='adam',\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'])\n\nmodel.fit(train_images, train_labels, epochs=5, callbacks=[TestCallback()])\n\ntest_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)\n\nprint('\\nTest accuracy:', test_acc)\n" ]
[ [ "tensorflow.keras.layers.Flatten", "tensorflow.keras.losses.SparseCategoricalCrossentropy", "tensorflow.keras.layers.Dense" ] ]
realfolkcode/PyTorch-VAE
[ "6abff8c2483e04bbec936bcd1cf20f8f2705266d" ]
[ "models/vanilla_vae.py" ]
[ "import torch\nfrom models import BaseVAE\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom .types_ import *\n\n\nclass VanillaVAE(BaseVAE):\n\n\n def __init__(self,\n in_channels: int,\n latent_dim: int,\n hidden_dims: List = None,\n **kwargs) -> None:\n super(VanillaVAE, self).__init__()\n\n self.latent_dim = latent_dim\n self.in_channels = in_channels\n\n modules = []\n if hidden_dims is None:\n hidden_dims = [32, 64, 128, 256, 512]\n\n # Build Encoder\n for h_dim in hidden_dims:\n modules.append(\n nn.Sequential(\n nn.Linear(in_channels, h_dim),\n nn.Tanh())\n )\n in_channels = h_dim\n\n self.encoder = nn.Sequential(*modules)\n self.fc_mu = nn.Linear(hidden_dims[-1], latent_dim) \n self.fc_var = nn.Linear(hidden_dims[-1], latent_dim)\n\n\n # Build Decoder\n modules = []\n\n self.decoder = nn.Sequential(\n nn.Linear(latent_dim, hidden_dims[-1]),\n nn.Tanh()\n )\n\n self.final_layer = nn.Sequential(nn.Linear(hidden_dims[-1], self.in_channels),\n nn.Sigmoid())\n\n def encode(self, input: Tensor) -> List[Tensor]:\n \"\"\"\n Encodes the input by passing through the encoder network\n and returns the latent codes.\n :param input: (Tensor) Input tensor to encoder [N x C x H x W]\n :return: (Tensor) List of latent codes\n \"\"\"\n input = input.view(input.shape[0], -1)\n result = self.encoder(input)\n result = torch.flatten(result, start_dim=1)\n\n # Split the result into mu and var components\n # of the latent Gaussian distribution\n mu = self.fc_mu(result)\n log_var = self.fc_var(result)\n\n return [mu, log_var]\n\n def decode(self, z: Tensor) -> Tensor:\n \"\"\"\n Maps the given latent codes\n onto the image space.\n :param z: (Tensor) [B x D]\n :return: (Tensor) [B x C x H x W]\n \"\"\"\n \n result = self.decoder(z)\n result = self.final_layer(result)\n result = result.view(result.shape[0], 28, 28)\n return result\n\n def reparameterize(self, mu: Tensor, logvar: Tensor) -> Tensor:\n \"\"\"\n Reparameterization trick to sample from N(mu, var) from\n N(0,1).\n :param mu: (Tensor) Mean of the latent Gaussian [B x D]\n :param logvar: (Tensor) Standard deviation of the latent Gaussian [B x D]\n :return: (Tensor) [B x D]\n \"\"\"\n std = torch.exp(0.5 * logvar)\n eps = torch.randn_like(std)\n return eps * std + mu\n\n def forward(self, input: Tensor, **kwargs) -> List[Tensor]:\n mu, log_var = self.encode(input)\n z = self.reparameterize(mu, log_var)\n return [self.decode(z), input, mu, log_var]\n\n def loss_function(self,\n *args,\n **kwargs) -> dict:\n \"\"\"\n Computes the VAE loss function.\n KL(N(\\mu, \\sigma), N(0, 1)) = \\log \\frac{1}{\\sigma} + \\frac{\\sigma^2 + \\mu^2}{2} - \\frac{1}{2}\n :param args:\n :param kwargs:\n :return:\n \"\"\"\n recons = args[0]\n input = args[1]\n mu = args[2]\n log_var = args[3]\n kld_weight = kwargs['M_N'] # Account for the minibatch samples from the dataset\n recons_loss =F.mse_loss(recons, input.view(input.shape[0], 28, 28))\n\n\n kld_loss = torch.mean(-0.5 * torch.sum(1 + log_var - mu ** 2 - log_var.exp(), dim = 1), dim = 0)\n\n loss = recons_loss + kld_weight * kld_loss\n return {'loss': loss, 'Reconstruction_Loss':recons_loss, 'KLD':-kld_loss}\n\n def sample(self,\n num_samples:int,\n current_device: int, **kwargs) -> Tensor:\n \"\"\"\n Samples from the latent space and return the corresponding\n image space map.\n :param num_samples: (Int) Number of samples\n :param current_device: (Int) Device to run the model\n :return: (Tensor)\n \"\"\"\n z = torch.randn(num_samples,\n self.latent_dim)\n\n z = z.to(current_device)\n\n samples = self.decode(z)\n return samples\n\n def generate(self, x: Tensor, **kwargs) -> Tensor:\n \"\"\"\n Given an input image x, returns the reconstructed image\n :param x: (Tensor) [B x C x H x W]\n :return: (Tensor) [B x C x H x W]\n \"\"\"\n\n return self.forward(x)[0]\n" ]
[ [ "torch.nn.Linear", "torch.nn.Sigmoid", "torch.nn.Sequential", "torch.nn.Tanh", "torch.randn_like", "torch.flatten", "torch.exp", "torch.randn" ] ]
kantharajucn/job_seniority_prediction
[ "cad9147ffddab1c5ead878c2f9d9e48199dc0da9" ]
[ "src/dataset.py" ]
[ "import torch\nfrom sklearn.preprocessing import LabelEncoder\nfrom torch.utils.data import Dataset, DataLoader\n\n\nclass JobsDataset(Dataset):\n def __init__(self, X, y, tokenizer, max_len=512):\n self.len = len(X)\n self.data = X\n self.y = y\n self.tokenizer = tokenizer\n self.max_len = max_len\n self._label_encode()\n\n def _label_encode(self):\n self.label_encoder = LabelEncoder()\n self.y = self.label_encoder.fit_transform(self.y)\n\n def __getitem__(self, index):\n title = str(self.data.title[index])\n title = \" \".join(title.split())\n description = str(self.data.description[index])\n description = \" \".join(description.split())\n inputs = self.tokenizer.encode_plus(\n text=title,\n text_pair=description,\n add_special_tokens=True,\n max_length=self.max_len,\n padding='max_length',\n return_token_type_ids=True,\n truncation=True\n )\n ids = inputs['input_ids']\n mask = inputs['attention_mask']\n\n return {\n 'ids': torch.tensor(ids, dtype=torch.long),\n 'mask': torch.tensor(mask, dtype=torch.long),\n 'targets': torch.tensor(self.y[index], dtype=torch.long)\n }\n\n def __len__(self):\n return self.len\n\n\ndef get_data_loader(X_train, X_valid, y_train, y_valid, tokenizer, batch_size=16, num_workers=1):\n training_set = JobsDataset(X_train, y_train, tokenizer, max_len=512)\n validation_set = JobsDataset(X_valid, y_valid, tokenizer, max_len=512)\n train_params = {'batch_size': batch_size,\n 'shuffle': True,\n 'num_workers': num_workers\n }\n\n test_params = {'batch_size': batch_size,\n 'shuffle': True,\n 'num_workers': num_workers\n }\n\n training_loader = DataLoader(training_set, **train_params)\n validation_loader = DataLoader(validation_set, **test_params)\n return training_loader, validation_loader\n" ]
[ [ "sklearn.preprocessing.LabelEncoder", "torch.tensor", "torch.utils.data.DataLoader" ] ]
MATHplus-Young-Academy/P3-Morph-Scoring
[ "0e2ba66cf28e30525b22706cc50d23b9de09a58a" ]
[ "morphomatics_med/manifold/Bezierfold.py" ]
[ "################################################################################\n# #\n# This file is part of the Morphomatics library #\n# see https://github.com/morphomatics/morphomatics #\n# #\n# Copyright (C) 2021 Zuse Institute Berlin #\n# #\n# Morphomatics is distributed under the terms of the ZIB Academic License. #\n# see $MORPHOMATICS/LICENSE #\n# #\n################################################################################\n\nimport numpy as np\n\nimport scipy.integrate as integrate\n\nfrom . import Manifold\n\nfrom joblib import Parallel, delayed\n\nimport time\n\nimport copy\n\nfrom ..stats.RiemannianRegression import RiemannianRegression\nfrom ..stats import ExponentialBarycenter\nfrom ..geom.BezierSpline import BezierSpline\n\n\nclass Bezierfold(Manifold):\n \"\"\"Manifold of Bézier curves (of fixed degree)\n\n Only for single-segment curves for now.\n \"\"\"\n\n def __init__(self, M: Manifold, degree):\n \"\"\"\n\n :arg M: base manifold in which the curves lie\n :arg degree: degree of the Bézier curves\n \"\"\"\n assert M is not None\n\n self._M = M\n\n self._degree = degree\n\n name = 'Manifold of Bézier curves of degree {d} through '.format(d=degree)+M.__str__\n K = np.sum(self._degree) - 1\n dimension = K * M.dim\n point_shape = [K, M.point_shape]\n super().__init__(name, dimension, point_shape)\n\n @property\n def typicaldist(self):\n return\n\n def inner(self, bet, X, Y):\n \"\"\"Functional-based metric\n Vector fields must be given as functions.\n\n :arg bet: Bézier curve in M\n :arg X: generalized Jacobi Field along B\n :arg Y: generalized Jacobi Field along B\n :return: inner product of X and Y at B\n \"\"\"\n assert bet.degrees == self._degree\n # TODO\n return\n\n def norm(self, bet, X):\n \"\"\"Norm of tangent vectors induced by the functional-based metric\n\n :arg bet: Bézier curve in M\n :arg X: generalized Jacobi Field along B\n :return: norm of X\n \"\"\"\n assert bet.degrees() == self._degree\n\n return np.sqrt(self.inner(bet, X, X))\n\n def proj(self, X, H):\n # TODO\n return\n\n egrad2rgrad = proj\n\n def ehess2rhess(self, p, G, H, X):\n \"\"\"Converts the Euclidean gradient G and Hessian H of a function at\n a point p along a tangent vector X to the Riemannian Hessian\n along X on the manifold.\n \"\"\"\n return\n\n def retr(self, R, X):\n # TODO\n return self.exp(R, X)\n\n def exp(self, R, X):\n # TODO\n return\n\n def log(self, R, Q):\n # TODO\n return\n\n def geopoint(self, R, Q, t):\n # TODO\n return\n\n def discgeodesic(self, alp, bet, n=5, eps=1e-10, nsteps=30, verbosity=1):\n \"\"\"Discrete shortest path through space of Bézier curves of same degree\n\n :param alp: Bézier curve in manifold M\n :param bet: Bézier curve in manifold M\n :param n: create discrete n-geodesic\n :param eps: iteration stops when the difference in energy between the new and old iterate drops below eps\n :param nsteps : maximal number of steps\n :param verbosity: 0 (no text) or 1 (print information on convergence)\n :return: control points of the Bézier curves along the shortest path\n \"\"\"\n\n assert alp.degrees == self._degree and bet.degrees == self._degree\n\n def init_disc_curve(alp, bet, n):\n \"\"\"Initialize discrete curve by aligning control points along geodesics\n \"\"\"\n\n # initial discrete curve\n m = np.array(alp.control_points[0].shape)\n m[0] = self._degree + 1\n H = [alp]\n # logs between corresponding control points\n X = np.zeros(m)\n for j in range(self._degree + 1):\n X[j] = self._M.connec.log(alp.control_points[0][j], bet.control_points[0][j])\n # initialize control points along geodesics\n for i in range(1, n):\n P = np.zeros(m)\n for j in range(self._degree + 1):\n P[j] = self._M.connec.exp(alp.control_points[0][j], i / n * X[j])\n\n H.append(BezierSpline(self._M, [P]))\n\n H.append(bet)\n\n return H\n\n # initialize path\n H = init_disc_curve(alp, bet, n)\n\n Eold = self.disc_path_energy(H)\n Enew = self.disc_path_energy(H)\n step = 0\n # optimize path\n while (np.abs(Enew - Eold) > eps and step < nsteps) or step == 0:\n step += 1\n Eold = Enew\n H_old = copy.deepcopy(H)\n\n for i in range(1, n):\n t = np.linspace(0, 1, num=self._degree + 1)\n double_t = np.concatenate((t, t))\n\n h1 = H[i - 1].eval(t)\n h2 = H[i + 1].eval(t)\n Y = np.concatenate((h1, h2))\n\n regression = RiemannianRegression(self._M, Y, double_t, self._degree, verbosity=11*verbosity)\n\n H[i] = regression.trend\n\n Enew = self.disc_path_energy(H)\n\n # check whether energy has increased\n if Enew > Eold:\n print('Stopped computing discrete geodesic because the energy increased in step '+str(step)+'.')\n return H_old\n\n if np.isnan(Enew):\n # repeat\n H = H_old\n print('Had to repeat because of Nan-value.')\n else:\n if verbosity:\n print('Disc-Geo-Step', step, 'Energy:', Enew, 'Enew - Eold:', Enew - Eold)\n\n return H\n\n def loc_dist(self, alp: BezierSpline, bet: BezierSpline, t=np.array([0, 1 / 2, 1])):\n \"\"\" Evaluate distance between two Bézier curves in M at several points\n\n :param alp: Bézier curve\n :param bet: Bézier curve\n :param t: vector with elements in [0,1]\n\n :return: vector with point-wise distances\n \"\"\"\n a_val = alp.eval(t)\n b_val = bet.eval(t)\n d_M = []\n for i in range(len(t)):\n d_M.append(self._M.metric.dist(a_val[i], b_val[i]))\n return np.array(d_M), t\n\n def disc_path_energy(self, H):\n \"\"\"Discrete path energy\n\n :param H: discrete path given as ordered list of Bézier curves of the same degree\n :return: energy of H\n \"\"\"\n # test ¨regression-conform¨ distance\n t = np.linspace(0, 1, num=self._degree + 1)\n d = 0\n for i in range(len(H) - 1):\n dh, _ = self.loc_dist(H[i], H[i + 1], t)\n d += np.sum(dh**2, axis=0)\n\n return d\n\n def rand(self):\n # TODO\n return\n\n def randvec(self, X):\n # TODO\n return\n\n def zerovec(self):\n # TODO\n return\n\n def transp(self, R, Q, X):\n # TODO\n return\n\n def pairmean(self, alp, bet):\n # TODO\n return\n\n def dist(self, alp, bet, l=5):\n \"\"\"Approximate the distance between two Bézier splines\n\n :param alp: Bézier spline\n :param bet: Bézier spline\n :param l: work with discrete l-geodesic\n :return: length of l-geodesic between alp and bet (approximation of the distance)\n \"\"\"\n\n Gam = self.discgeodesic(alp, bet, n=l)\n\n d = 0\n for i in range(len(Gam) - 1):\n y, t = self.loc_dist(Gam[i], Gam[i + 1])\n d += integrate.simps(y, t)\n\n return d\n\n def mean(self, B, n=3, delta=1e-5, min_stepsize=1e-10, nsteps=20, eps=1e-5, n_stepsGeo=10, verbosity=1):\n \"\"\"Discrete mean of a set of Bézier curves\n\n :param B: list of Bézier curves\n :param n: use discrete n-geodesics\n :param delta: iteration stops when the difference in energy between the new and old iterate drops below eps\n :param min_stepsize: iteration stops when the step length is smaller than the given value\n :param nsteps: maximal number of iterations\n :param eps: see eps in discgeodesic\n :param n_stepsGeo: maximal number of iterations when computating discrete geodesics\n :param verbosity: 0 (no text) or 1 (print information on convergence)\n :return: mean curve\n \"\"\"\n begin_mean = time.time()\n\n # get shape of array of control points\n m = np.array(B[0].control_points[0].shape)\n m[0] = self._degree + 1\n\n def legs(meaniterate):\n \"\"\" Construct legs of polygonal spider, i.e., discrete n-geodesics between mean iterate and input curves\n \"\"\"\n return Parallel(n_jobs=-1, prefer='threads', require='sharedmem')(delayed(self.discgeodesic)\n (meaniterate, b, n=n, eps=eps,\n nsteps=n_stepsGeo, verbosity=0)\n for b in B)\n\n def loss(FF):\n G = 0\n for HH in FF:\n G += self.disc_path_energy(HH)\n return G\n\n # initialize i-th control point of the mean as the mean of the i-th control points of the data\n C = ExponentialBarycenter\n P = np.zeros(m)\n for i in range(self._degree + 1):\n D = []\n for bet in B:\n D.append(bet.control_points[0][i])\n\n P[i] = C.compute(self._M, D)\n\n # initial guess\n bet_mean = B[0]\n\n # initial legs\n F = legs(bet_mean)\n # initialize stopping parameters\n Eold = 10\n Enew = 1\n stepsize = 10\n step = 0\n while np.abs(Enew - Eold) > delta and stepsize > min_stepsize and step <= nsteps:\n step += 1\n Eold = Enew\n F_old = F\n old_mean = BezierSpline(self._M, bet_mean.control_points)\n\n # new data for regression\n t = np.linspace(0, 1, num=self._degree + 1)\n Y = []\n\n for H in F:\n Y.append(H[1].eval(t))\n\n # make regression w.r.t. mean values -> faster\n mean_Y = np.zeros_like(Y[0])\n for i in range(len(mean_Y)):\n dat = []\n for j in range(len(Y)):\n # take value of each curve at time t_i\n dat.append(Y[j][i])\n\n mean_Y[i] = C.compute(self._M, dat)\n\n if verbosity == 2:\n print('Step '+str(step)+': Updating the mean...')\n\n regression = RiemannianRegression(self._M, mean_Y, t, self._degree, verbosity=2)\n bet_mean = regression.trend\n\n # update discrete paths\n if verbosity == 2:\n print('Step '+str(step)+': Updating discrete paths...')\n start = time.time()\n F = legs(bet_mean)\n\n if verbosity == 2:\n end = time.time()\n print('...took ' + \"{:.2f}\".format(end - start) + ' seconds to update the legs.')\n\n print('Evaluating energy...')\n Enew = loss(F)\n\n # check for divergence\n if Enew > Eold:\n print('Stopped because the energy increased.')\n finish_mean = time.time()\n print('Computing the mean took ' + \"{:.2f}\".format(finish_mean - begin_mean) + ' seconds.')\n return old_mean, F_old\n\n # compute step size\n step_size = 0\n for i, p in enumerate(bet_mean.control_points[0]):\n step_size += self._M.metric.dist(p, old_mean.control_points[0][i]) ** 2\n stepsize = np.sqrt(step_size)\n\n if verbosity > 0:\n print('Mean-Comp-Step', step, 'Energy:', Enew, 'Enew - Eold:', Enew - Eold)\n\n finish_mean = time.time()\n print('Computing the mean took ' + \"{:.2f}\".format(finish_mean - begin_mean) + '.')\n\n return bet_mean, F\n\n def gram(self, B, B_mean=None, F=None, n=5, delta=1e-5, min_stepSize=1e-10, nsteps=20, eps=1e-5, n_stepsGeo=10,\n verbosity=2):\n \"\"\"Approximates the Gram matrix for a curve data set\n\n :param B: list of Bézier splines\n :param B_mean: mean of curves in B\n :param F: discrete spider, i.e, discrete paths from mean to data\n :param n: see mean method\n :param delta: see mean method\n :param min_stepSize: see mean method\n :param nsteps: see mean method\n :param eps: see mean method\n :param n_stepsGeo: see mean method\n :param verbosity: see mean method\n :return G: Gram matrix\n :return bet_mean: mean curve of data curves\n :return F: discrete geodesics from mean to data curves\n \"\"\"\n\n if B_mean is None:\n B_mean, F = self.mean(B, n=n, delta=delta, min_stepsize=min_stepSize, nsteps=nsteps, eps=eps,\n n_stepsGeo=n_stepsGeo, verbosity=verbosity)\n\n if verbosity == 2:\n print('Computing Gram matrix...')\n\n n = len(F)\n G = np.zeros((n, n))\n for i, si in enumerate(F):\n for j, sj in enumerate(F[i:], start=i):\n G[i, j] = n ** 2 / (2 * n) * (self.dist(B_mean, si[1], l=1) ** 2 + self.dist(B_mean, sj[1], l=1) ** 2\n - self.dist(si[1], sj[1], l=1) ** 2)\n G[j, i] = G[i, j]\n\n return G, B_mean, F\n\n def projToGeodesic(self, R, Q, P, max_iter=10):\n # TODO\n return\n" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.isnan", "scipy.integrate.simps", "numpy.zeros_like", "numpy.zeros", "numpy.sum", "numpy.sqrt", "numpy.abs", "numpy.linspace" ] ]
jyericlin/VBC
[ "cc34169e4f4ece500ad8c33ab69378f0a700a73e" ]
[ "src/learners/q_learner_6h_vs_8z_vbc.py" ]
[ "import copy\nfrom components.episode_buffer import EpisodeBatch\nfrom modules.mixers.vdn import VDNMixer\nfrom modules.mixers.qmix import QMixer\nimport torch as th\nimport numpy as np\nfrom torch.optim import RMSprop\n\n# learning for 6h_vs_8z scenario\nclass QLearner_6h_vs_8z:\n def __init__(self, mac, scheme, logger, args):\n self.args = args\n self.mac = mac\n self.logger = logger\n self.regularization_const = self.args.normalization_const\n self.params = list(mac.parameters())\n\n self.last_target_update_episode = 0\n\n self.mixer = None\n if args.mixer is not None:\n if args.mixer == \"vdn\":\n self.mixer = VDNMixer()\n elif args.mixer == \"qmix\":\n self.mixer = QMixer(args)\n else:\n raise ValueError(\"Mixer {} not recognised.\".format(args.mixer))\n self.params += list(self.mixer.parameters())\n self.target_mixer = copy.deepcopy(self.mixer)\n\n self.params += list(self.mac.env_blender.parameters())\n self.optimiser = RMSprop(params=self.params, lr=args.lr, alpha=args.optim_alpha, eps=args.optim_eps)\n self.target_mac = copy.deepcopy(mac)\n\n self.log_stats_t = -self.args.learner_log_interval - 1\n\n def train(self, batch: EpisodeBatch, t_env: int, episode_num: int):\n # Get the relevant quantities\n rewards = batch[\"reward\"][:, :-1]\n actions = batch[\"actions\"][:, :-1]\n terminated = batch[\"terminated\"][:, :-1].float()\n mask = batch[\"filled\"][:, :-1].float()\n mask[:, 1:] = mask[:, 1:] * (1 - terminated[:, :-1])\n avail_actions = batch[\"avail_actions\"]\n\n # Calculate estimated Q-Values\n mac_out = []\n difference_out = []\n difference_out1 = [] \n self.mac.init_hidden(batch.batch_size)\n for t in range(batch.max_seq_length):\n agent_local_outputs, hidden_states = self.mac.forward(batch, t=t) \n dummy0 = self.mac.env_blender(hidden_states[:,0,:].view(32,-1)) \n dummy1 = self.mac.env_blender(hidden_states[:,1,:].view(32,-1)) \n dummy2 = self.mac.env_blender(hidden_states[:,2,:].view(32,-1)) \n dummy3 = self.mac.env_blender(hidden_states[:,3,:].view(32,-1)) \n dummy4 = self.mac.env_blender(hidden_states[:,4,:].view(32,-1))\n dummy5 = self.mac.env_blender(hidden_states[:,5,:].view(32,-1)) \n \n agent0 = (dummy1 + dummy2 + dummy3 + dummy4 + dummy5)/5.0\n agent1 = (dummy0 + dummy2 + dummy3 + dummy4 + dummy5)/5.0\n agent2 = (dummy0 + dummy1 + dummy3 + dummy4 + dummy5)/5.0\n agent3 = (dummy0 + dummy1 + dummy2 + dummy4 + dummy5)/5.0\n agent4 = (dummy0 + dummy1 + dummy2 + dummy3 + dummy5)/5.0\n agent5 = (dummy0 + dummy1 + dummy2 + dummy3 + dummy4)/5.0\n agent_global_outputs =th.cat((agent0.view((32,1,14)),agent1.view((32,1,14)),agent2.view((32,1,14)),agent3.view((32,1,14)),agent4.view((32,1,14)),agent5.view((32,1,14))),1) \n agent_outs = agent_local_outputs + agent_global_outputs\n difference = agent_global_outputs \n mac_out.append(agent_outs)\n difference_out.append(difference)\n \n mac_out = th.stack(mac_out, dim=1) # Concat over time\n difference_out = th.stack(difference_out, dim=1) # Concat over time\n difference_out = th.std(difference_out,dim = 3).sum()\n # Pick the Q-Values for the actions taken by each agent\n chosen_action_qvals = th.gather(mac_out[:, :-1], dim=3, index=actions).squeeze(3) # Remove the last dim\n avg_difference = (difference_out.sum())/((agent_outs.shape[0]*agent_outs.shape[1]*agent_outs.shape[2]*batch.max_seq_length))\n # Calculate the Q-Values necessary for the target\n target_mac_out = []\n \n self.target_mac.init_hidden(batch.batch_size)\n for t in range(batch.max_seq_length):\n target_agent_local_outputs, target_hidden_states = self.target_mac.forward(batch, t=t)\n \n dummy0 = self.mac.env_blender(target_hidden_states[:,0,:].view(32,-1)) \n dummy1 = self.mac.env_blender(target_hidden_states[:,1,:].view(32,-1)) \n dummy2 = self.mac.env_blender(target_hidden_states[:,2,:].view(32,-1)) \n dummy3 = self.mac.env_blender(target_hidden_states[:,3,:].view(32,-1)) \n dummy4 = self.mac.env_blender(target_hidden_states[:,4,:].view(32,-1))\n dummy5 = self.mac.env_blender(target_hidden_states[:,5,:].view(32,-1)) \n \n target_agent0 = (dummy1 + dummy2 + dummy3 + dummy4 + dummy5)/5.0\n target_agent1 = (dummy0 + dummy2 + dummy3 + dummy4 + dummy5)/5.0\n target_agent2 = (dummy0 + dummy1 + dummy3 + dummy4 + dummy5)/5.0\n target_agent3 = (dummy0 + dummy1 + dummy2 + dummy4 + dummy5)/5.0\n target_agent4 = (dummy0 + dummy1 + dummy2 + dummy3 + dummy5)/5.0\n target_agent5 = (dummy0 + dummy1 + dummy2 + dummy3 + dummy4)/5.0 \n \n target_agent_global_outputs = th.cat((target_agent0.view((32,1,14)),target_agent1.view((32,1,14)),target_agent2.view((32,1,14)),target_agent3.view((32,1,14)),target_agent4.view((32,1,14)),target_agent5.view((32,1,14))),1)\n target_agent_outs = target_agent_local_outputs + target_agent_global_outputs\n target_mac_out.append(target_agent_outs)\n \n # We don't need the first timesteps Q-Value estimate for calculating targets\n target_mac_out = th.stack(target_mac_out[1:], dim=1) # Concat across time\n\n # Mask out unavailable actions\n target_mac_out[avail_actions[:, 1:] == 0] = -9999999\n\n # Max over target Q-Values\n if self.args.double_q:\n # Get actions that maximise live Q (for double q-learning)\n mac_out[avail_actions == 0] = -9999999\n cur_max_actions = mac_out[:, 1:].max(dim=3, keepdim=True)[1]\n target_max_qvals = th.gather(target_mac_out, 3, cur_max_actions).squeeze(3)\n else:\n target_max_qvals = target_mac_out.max(dim=3)[0]\n\n # Mix\n if self.mixer is not None:\n chosen_action_qvals = self.mixer(chosen_action_qvals, batch[\"state\"][:, :-1])\n target_max_qvals = self.target_mixer(target_max_qvals, batch[\"state\"][:, 1:])\n\n # Calculate 1-step Q-Learning targets\n targets = rewards + self.args.gamma * (1 - terminated) * target_max_qvals\n\n # Td-error\n td_error = (chosen_action_qvals - targets.detach())\n\n mask = mask.expand_as(td_error)\n\n # 0-out the targets that came from padded data\n masked_td_error = td_error * mask\n\n # Normal L2 loss, take mean over actual data\n loss = (masked_td_error ** 2).sum() / mask.sum() + self.args.normalization_const * avg_difference\n\n # Optimise\n self.optimiser.zero_grad()\n loss.backward()\n grad_norm = th.nn.utils.clip_grad_norm_(self.params, self.args.grad_norm_clip)\n self.optimiser.step()\n\n if (episode_num - self.last_target_update_episode) / self.args.target_update_interval >= 1.0:\n self._update_targets()\n self.last_target_update_episode = episode_num\n\n if t_env - self.log_stats_t >= self.args.learner_log_interval:\n self.logger.log_stat(\"loss\", loss.item(), t_env)\n self.logger.log_stat(\"grad_norm\", grad_norm, t_env)\n mask_elems = mask.sum().item()\n self.logger.log_stat(\"td_error_abs\", (masked_td_error.abs().sum().item()/mask_elems), t_env)\n self.logger.log_stat(\"q_taken_mean\", (chosen_action_qvals * mask).sum().item()/(mask_elems * self.args.n_agents), t_env)\n self.logger.log_stat(\"target_mean\", (targets * mask).sum().item()/(mask_elems * self.args.n_agents), t_env)\n self.log_stats_t = t_env\n\n def _update_targets(self):\n self.target_mac.load_state(self.mac)\n if self.mixer is not None:\n self.target_mixer.load_state_dict(self.mixer.state_dict())\n self.logger.console_logger.info(\"Updated target network\")\n\n def cuda(self):\n self.mac.cuda()\n self.target_mac.cuda()\n if self.mixer is not None:\n self.mixer.cuda()\n self.target_mixer.cuda()\n\n def save_models(self, path):\n self.mac.save_models(path)\n if self.mixer is not None:\n th.save(self.mixer.state_dict(), \"{}/mixer.th\".format(path))\n th.save(self.optimiser.state_dict(), \"{}/opt.th\".format(path))\n\n def load_models(self, path):\n self.mac.load_models(path)\n # Not quite right but I don't want to save target networks\n self.target_mac.load_models(path)\n if self.mixer is not None:\n self.mixer.load_state_dict(th.load(\"{}/mixer.th\".format(path), map_location=lambda storage, loc: storage))\n self.optimiser.load_state_dict(th.load(\"{}/opt.th\".format(path), map_location=lambda storage, loc: storage))\n\n\n\n" ]
[ [ "torch.stack", "torch.optim.RMSprop", "torch.gather", "torch.nn.utils.clip_grad_norm_", "torch.std" ] ]
choderalab/fragmenter_examples
[ "01d63aea340e91f8cbb3a21253a906a0c3c66da3", "01d63aea340e91f8cbb3a21253a906a0c3c66da3", "01d63aea340e91f8cbb3a21253a906a0c3c66da3" ]
[ "wbo-manuscript-figures/proof_of_concept/generate_figures_coverage.py", "combinatorial_fragmentation/fragment_bond_orders/compute_oe_wbo_parent.py", "wbo-manuscript-figures/figure-12/Tedizolid_phosphate_0/generate_figure.py" ]
[ "import json\nimport seaborn as sbn\nfrom scipy import stats\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport matplotlib.colors as mcolors\nimport pandas as pd\nimport arch.bootstrap\nimport math\nimport qcfractal.interface as ptl\nfrom fragmenter.utils import HARTREE_2_KJMOL\nfrom fragmenter import chemi\nfrom openeye import oedepict, oechem, oegraphsim\nfrom openforcefield.topology import Molecule, Topology\nfrom openforcefield.typing.engines.smirnoff import ForceField\nimport pickle\n\ndef checkTorsion(smiles, torsion_indices, ff_name):\n \"\"\"\n Take mollist and check if the molecules in a list match a specific torsion id\n\n Parameters\n ----------\n molList : List of objects\n List of oemols with datatags generated in genData function\n\n Returns\n -------\n molList : list of objects\n List of oemol objects that have a datatag \"IDMatch\" that contain the torsion id\n involved in the QCA torsion drive\n \"\"\"\n\n matches = []\n count = 0\n mols = []\n #tid=''\n #molecule = Molecule.from_mapped_smiles(smiles)\n print(smiles)\n from openeye import oechem\n # create a new molecule\n #mol = oechem.OEGraphMol()\n # convert the SMILES string into a molecule\n #oechem.OESmilesToMol(mol,smiles)\n #molecule = Molecule.from_smiles(smiles)\n #molecule=Molecule.from_openeye(mol)\n\n molecule = Molecule.from_mapped_smiles(smiles)\n topology = Topology.from_molecules(molecule)\n # Let's label using the Parsley force field\n forcefield = ForceField(ff_name, allow_cosmetic_attributes=True)\n # Run the molecule labeling\n molecule_force_list = forcefield.label_molecules(topology)\n params = []\n indices=[]\n # Print out a formatted description of the torsion parameters applied to this molecule\n for mol_idx, mol_forces in enumerate(molecule_force_list):\n # print(f'Forces for molecule {mol_idx}')\n for force_tag, force_dict in mol_forces.items():\n if force_tag == \"ProperTorsions\":\n for (atom_indices, parameter) in force_dict.items():\n params.append(parameter.id)\n indices.append(atom_indices)\n #torsion_indices=tuple(torsion_indices)\n #print(type(torsion_indices))\n print(torsion_indices)\n #print(type(atom_indices))\n print(atom_indices)\n if atom_indices == torsion_indices or tuple(\n reversed(atom_indices)\n ) == torsion_indices:\n #mol.SetData(\"IDMatch\", parameter.id)\n tid=parameter.id\n print(params)\n print(indices)\n return tid\n\n\nclient = ptl.FractalClient()\n# from the TorsionDriveDataset collection picking up given datasetName\nds = client.get_collection(\"TorsionDriveDataset\", 'OpenFF Substituted Phenyl Set 1')\n\ndef testQuery(smiles):\n #print(ds.get_entry(smiles))\n #print(dir(ds.get_entry(smiles)))\n dih=ds.get_entry(smiles).dict()['td_keywords']['dihedrals'][0]\n print(dih)\n mapped_smiles = ds.get_entry(smiles).attributes[\"canonical_isomeric_explicit_hydrogen_mapped_smiles\"]\n #print(mapped_smiles)\n return mapped_smiles, dih\n\n\ndef biphenyl(filename):\n with open(filename) as json_file:\n data = json.load(json_file)\n for key, item in data.items():\n testQuery(key)\nbiphenyl('biphenyls_set_input.json')\n\n\ncolor_keys= ['maroon', 'brown', 'indianred', 'red', 'coral','orange', 'gold', 'darkkhaki','yellowgreen','limegreen',\n 'mediumseagreen', 'teal', 'steelblue', 'cornflowerblue', 'royalblue', 'darkblue',\n 'mediumblue', 'slateblue', 'blueviolet', 'purple','mediumvioletred', 'deeppink', 'hotpink',\n 'palevioletred', 'pink', 'lightpink']\n\n\n\ncolor_keys2=['darkblue',\n 'mediumblue', 'slateblue', 'blueviolet', 'purple','mediumvioletred', 'deeppink', 'hotpink',\n 'cornflowerblue', 'pink', 'lightpink']\n\ncolor_keys2=['teal', 'hotpink', 'purple', 'gold', 'orange', 'slateblue', 'darkkhaki', 'lightpink', 'purple', 'hotpink']\n\nfgroup_symbols_colors = {\n #'phenoxide': 'C[O-]',\n 'dimethylamino': (r'$\\mathrm{\\mathsf{N(Me)_2}}$', color_keys[0]),\n 'methylamino': (r'$\\mathrm{\\mathsf{NHMe}}$', color_keys[1]),\n 'amino': (r'$\\mathrm{\\mathsf{NH_2}}$', color_keys[2]),\n 'ethylamino': (r'$\\mathrm{\\mathsf{NHEt}}$', color_keys[3]),\n 'propylamino': (r'$\\mathrm{\\mathsf{NH(C_3H_7)}}$', color_keys[4]),\n 'hydroxy': (r'$\\mathrm{\\mathsf{OH}}$', color_keys[5]),\n 'methoxy': (r'$\\mathrm{\\mathsf{OMe}}$', color_keys[6]),\n 'ethoxy': (r'$\\mathrm{\\mathsf{OEt}}$', color_keys[7]),\n 'dimethylurea': (r'$\\mathrm{\\mathsf{NHCON(Me)_2}}$', color_keys[8]),\n 'urea': (r'$\\mathrm{\\mathsf{NHCONHMe}}$', color_keys[9]),\n 'phenylurea': (r'$\\mathrm{\\mathsf{NHCONH_2}}$', color_keys[10]),\n 'ethylamide': (r'$\\mathrm{\\mathsf{NHCOEt}}$', color_keys[11]),\n 'amide': (r'$\\mathrm{\\mathsf{NHCOMe}}$', color_keys[12]),\n 'fluoro': (r'$\\mathrm{\\mathsf{F}}$', color_keys[13]),\n 'chloro': (r'$\\mathrm{\\mathsf{Cl}}$', color_keys[14]),\n 'cyano': (r'$\\mathrm{\\mathsf{CN}}$', color_keys[15]),\n 'methyl': (r'$\\mathrm{\\mathsf{Me}}$', color_keys[16]),\n 'bromo': (r'$\\mathrm{\\mathsf{Br}}$', color_keys[17]),\n 'carbamate': (r'$\\mathrm{\\mathsf{OCONH_2}}$', color_keys[18]),\n 'benzoicacid': (r'$\\mathrm{\\mathsf{COOH}}$', color_keys[19]),\n 'iodo': (r'$\\mathrm{\\mathsf{I}}$', color_keys[20]),\n 'ethoxycarbonyl': (r'$\\mathrm{\\mathsf{COOEt}}$', color_keys[21]),\n 'trimethylamonium': (r'$\\mathrm{\\mathsf{N(Me)_3^+}}$', color_keys[22]),\n 'trifluoromethyl': (r'$\\mathrm{\\mathsf{CF_3}}$', color_keys[23]),\n 'nitro': (r'$\\mathrm{\\mathsf{NO_2}}$', color_keys[24])\n}\n\n\n\n# Generate joy plot\nfgroup_wbos = {}\nfor fgroup in fgroup_symbols_colors:\n if fgroup not in fgroup_wbos:\n fgroup_wbos[fgroup] = []\n with open('../../phenyl_benchmark/data/{}_R1_wbos.json'.format(fgroup), 'r') as f:\n wbos = json.load(f)\n for w in wbos:\n fgroup_wbos[fgroup].append(w[0])\n\ncolors = mcolors.CSS4_COLORS\n\nfig, axes = plt.subplots(len(fgroup_wbos))\nfor i, fgroup in enumerate(fgroup_wbos):\n ax = plt.subplot(len(fgroup_wbos), 1, i+1)\n ax.spines['left'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.patch.set_facecolor('none')\n sbn.kdeplot(fgroup_wbos[fgroup], shade=True, alpha=0.6,\n color=colors[fgroup_symbols_colors[fgroup][1]])\n sbn.kdeplot(fgroup_wbos[fgroup], shade=False, color='black', lw=0.8)\n plt.xlim(0.70, 1.4)\n plt.yticks([])\n ax.yaxis.set_label_coords(-0.05, 0)\n plt.ylabel(fgroup_symbols_colors[fgroup][0], rotation=0, size=10,\n color=colors[fgroup_symbols_colors[fgroup][1]])\n if i == len(fgroup_wbos)-1:\n plt.xlabel('AM1 ELF10 Wiberg bond order', fontsize=14)\n plt.xticks(fontsize=14)\n else:\n plt.xticks([])\n\noverlap=1.0\nh_pad = 5 + (- 5*(1 + overlap))\nfig.tight_layout(h_pad=h_pad)\nplt.savefig('figures/wbo_dist_joy_plot.pdf')\n\n\n# See if there is a correlation with Hammet sigma parameters. Values were taken from\n# doi:10.1021/cr00002a004\nsubs = ['H','dimethylamino', 'methylamino', 'amino', 'ethylamino', 'hydroxy', 'methoxy', 'phenylurea', 'amide',\n 'fluoro', 'chloro','cyano', 'methyl', 'bromo', 'benzoicacid', 'ethoxycarbonyl', 'trifluoromethyl', 'nitro']\nsigma_m = [0.0, -0.16, -0.21, -0.16, -0.24, 0.12, 0.12, -0.02, 0.21, 0.34, 0.37, 0.56, -0.07, 0.39, 0.37, 0.37, 0.43, 0.71]\nsigma_p = [0.0, -0.83, -0.70, -0.66, -0.61, -0.37, -0.27, -0.24, 0.0, 0.06, 0.23, 0.66, -0.17, 0.45, 0.45, 0.45, 0.54, 0.78]\nwbo_cooh_meta = [0.96, 0.95, 0.95, 0.95, 0.95, 0.95, 0.95, 0.96, 0.96, 0.95, 0.95, 0.95, 0.96, 0.95, 0.96, 0.95, 0.95, 95]\nwbo_cooh_para = [0.96, 0.97, 0.97, 0.97, 0.97, 0.96, 0.96, 0.97, 0.97, 0.96, 0.96, 0.96, 0.96, 0.96, 0.95, 0.95, 0.95, 95]\nwbo_r_meta = [0.96, 1.07, 1.08, 1.12, 1.08, 1.06, 1.04, 1.02, 1.02, 1.02, 1.0, 1.0, 1.0, 0.99, 0.96, 0.93, 0.91, 0.85]\nwbo_r_para = [0.96, 1.11, 1.10, 1.12, 1.14, 1.08, 1.05, 1.04, 1.03, 1.03, 1.01, 1.0, 1.0, 0.99, 0.95, 0.93, 0.91, 0.85]\n\nhammet_sigmas = {'substituent':subs, 'sigma_p': sigma_p, 'sigma_m': sigma_m, 'wbo_cooh_meta': wbo_cooh_meta,\n 'wbo_cooh_para': wbo_cooh_para,'wbo_r_meta': wbo_r_meta, 'wbo_r_para': wbo_r_para}\ndf = pd.DataFrame(hammet_sigmas)\n\n# plot correlation\nmarkersize=9\nfontsize=8\nfor sigma in ('m', 'p'):\n fig, ax = plt.subplots()\n for row in df.iterrows():\n if sigma == 'm':\n x = row[1].wbo_r_meta\n y = row[1].sigma_m\n if sigma == 'p':\n x = row[1].wbo_r_para\n y = row[1].sigma_p\n if row[1].substituent == 'H':\n plt.plot(x, y, '.', color='black', markersize=markersize, label='H')\n plt.annotate('H', (x, y),\n textcoords='offset points', xytext=(3, 2), color='black', fontsize=fontsize)\n continue\n plt.plot(x, y, '.', markersize=markersize, color=fgroup_symbols_colors[row[1].substituent][1],\n label=fgroup_symbols_colors[row[1].substituent][0])\n plt.annotate(fgroup_symbols_colors[row[1].substituent][0], (x, y),\n textcoords='offset points', xytext=(3, 2), color= fgroup_symbols_colors[row[1].substituent][1], fontsize=fontsize)\n\n plt.xlim(0.83, 1.16)\n plt.ylim(-0.86, 0.85)\n plt.ylabel(r'$\\sigma_{}$'.format(sigma), fontsize=14)\n plt.xlabel('AM1 ELF10 Wiberg Bond Order', fontsize=14);\n plt.xticks(fontsize=14)\n plt.yticks(fontsize=14)\n if sigma == 'm':\n r_value = df.corr().sigma_m.wbo_r_meta\n if sigma == 'p':\n r_value = df.corr().sigma_p.wbo_r_para\n #print(r_value)\n textstr = r'$\\rho =%.2f$' % (r_value)\n props = dict(boxstyle='square', facecolor='white', alpha=0.5)\n ax.text(0.75, 0.95, textstr, transform=ax.transAxes, fontsize=14,\n verticalalignment='top', bbox=props)\n plt.tight_layout()\n fig.savefig('figures/hammett_sigma_{}.pdf'.format(sigma))\n\n\n# Generate torsion barrier height vs ELF10 AM1 WBO plot\nwith open('../../phenyl_benchmark/data/qcarchive_torsiondrives.json', 'r') as f:\n fgroups_td = json.load(f)\n\n# Generate 2 plots. One for good lines and one for lines that have issues\nplot_1 = ['dimethylamino', 'methylamino', 'ethylamino', 'propylamino', 'hydroxy', 'methoxy', 'phenylurea', 'benzoicacid', 'nitro']\nplot_2 = ['amino', 'ethoxy', 'dimethylurea', 'urea', 'ethylamide', 'amide', 'carbamate', 'ethoxycarbonyl']\nsymbols = ['o', 'P', '^', '*', 's', 'p', 'X', 'd', 'H', '>']\n\nboth_plots=plot_1 + plot_2\n\ndef r_value_ci(am1_wbos, max_energies):\n return stats.linregress(am1_wbos, max_energies)[2]**2\n\nfontsize = 14\nfig, ax = plt.subplots()\ncolors = []\nr_values = []\nfor i, fgroup in enumerate(plot_1):\n if fgroup not in fgroups_td:\n print(fgroup)\n continue\n energies = fgroups_td[fgroup]['energy']\n am1_wbos = fgroups_td[fgroup]['elf10_am1_wbo']\n max_energies = [max(energy) for energy in energies]\n slope, intercept, r_value, p_value, std_err = stats.linregress(am1_wbos, max_energies)\n r_ci = arch.bootstrap.IIDBootstrap(np.asarray(am1_wbos), np.asarray(max_energies)).conf_int(r_value_ci, 1000, method='percentile')\n #print(r_ci)\n fgroups_td[fgroup]['stats'] = [slope, std_err, r_value**2, r_ci[0][0], r_ci[1][0]]\n plt.plot(np.unique(am1_wbos), np.poly1d([slope, intercept])(np.unique(am1_wbos)), fgroup_symbols_colors[fgroup][1])\n plt.scatter(x=am1_wbos, y=max_energies, color=fgroup_symbols_colors[fgroup][1], marker=symbols[i], label=fgroup_symbols_colors[fgroup][0])\n colors.append(fgroup_symbols_colors[fgroup][1])\n r_values.append([r_value**2, r_ci[0][0], r_ci[1][0]])\n\nl = ax.legend(bbox_to_anchor=(1, 1), fontsize=fontsize)\nfor i, text in enumerate(l.get_texts()):\n text.set_color(colors[i])\n\nplt.xlabel('AM1 ELF10 Wiberg bond order', fontsize=fontsize)\nplt.ylabel('Torsion barrier height (kJ/mol)', fontsize=fontsize)\nplt.xlim(0.8, 1.3)\nplt.ylim(0, 50)\nplt.xticks(fontsize=fontsize)\nplt.yticks(fontsize=fontsize)\nplt.tight_layout()\nplt.savefig('figures/energy_vs_wbo_1.pdf')\n\ncolors = []\nig, ax = plt.subplots()\ntig_dict={'TIG1':[[],[]], 'TIG2':[[],[]], 'TIG3':[[],[]], 'TIG4':[[],[]], 'TIG5':[[],[]], 'TIG6':[[],[]], 'TIG7':[[],[]], 'TIG8':[[],[]], 'TIG9':[[],[]], 'TIG10':[[],[]]}\nmolDict={}\n\"\"\"\nfor i, fgroup in enumerate(both_plots):\n if fgroup not in fgroups_td:\n continue\n print(i)\n print(fgroup)\n energies = fgroups_td[fgroup]['energy']\n am1_wbos = fgroups_td[fgroup]['elf10_am1_wbo']\n max_energies = [max(energy) for energy in energies]\n molcount=0\n torsions=[]\n for i, smiles in enumerate(fgroups_td[fgroup]['indices']):\n molDict[smiles]=[am1_wbos[i], max_energies[i]]\n molcount+=1\n #testQuery(smiles)\n #with open('../../phenyl_benchmark/data/{}_td_job_indices.json'.format(fgroup), 'r') as f:\n #/Users/jessica/Documents/Grad_research/fragmenter_data/wbo-manuscript-figures/proof_of_concept/data/data\n with open('data/data/{}_td_job_indices.json'.format(fgroup), 'r') as f:\n indices = json.load(f)\n for m in indices:\n if m[0] == smiles:\n molDict[smiles].extend([m[1], m[4]])\n for sm, dd in molDict.items():\n print(dd)\n smiles, dih=testQuery(sm)\n ff='tig_proof_of_concept_1.3.0.offxml'\n tid = checkTorsion(smiles, dih, ff)\n torsions.append(tid)\n tig_dict[tid][0].append(dd[0])\n tig_dict[tid][1].append(dd[1])\n print(molcount)\n print(tig_dict)\n print(torsions)\n print(len(torsions))\n with open('biphenyl_data.pickle', 'rb') as handle:\n b = pickle.load(handle)\n for key, item in b.items():\n smiles, dih=testQuery(key)\n tid = checkTorsion(smiles, item[2], ff)\n tig_dict[tid][0].append(item[0])\n tig_dict[tid][1].append(item[1])\n\n import pickle\n with open(\"wbotb.pkl\", \"wb\") as f:\n pickle.dump(tig_dict, f)\n\"\"\"\ndef makeCovPlot(filename):\n with open(filename, \"rb\") as f:\n plotdata = pickle.load(f)\n #print(plotdata)\n count=0\n colors=[]\n tid_td={}\n for key, data in plotdata.items():\n am1_wbos=data[0]\n max_energies=data[1]\n if am1_wbos==[]:\n continue\n #print(am1_wbos)\n #print(max_energies)\n\n slope, intercept, r_value, p_value, std_err = stats.linregress(am1_wbos, max_energies)\n r_ci = arch.bootstrap.IIDBootstrap(np.asarray(am1_wbos), np.asarray(max_energies)).conf_int(r_value_ci, 10000, method='percentile')\n #print(r_ci)\n fgroups_td[fgroup]['stats'] = [slope, std_err, r_value**2, r_ci[0][0], r_ci[1][0]]\n tid_td[key] = [slope, std_err, r_value**2, r_ci[0][0], r_ci[1][0]]\n plt.plot(np.unique(am1_wbos), np.poly1d([slope, intercept])(np.unique(am1_wbos)), color_keys2[count])\n plt.scatter(x=am1_wbos, y=max_energies, color=color_keys2[count], marker=symbols[count], label=key)\n colors.append(color_keys2[count])\n count+=1\n #store statistics from the td vs wbo plot for table generation\n with open(\"table_data.pkl\", \"wb\") as f:\n pickle.dump(tid_td, f)\n\n l = ax.legend(bbox_to_anchor=(1, 1), fontsize=fontsize)\n for i, text in enumerate(l.get_texts()):\n text.set_color(colors[i])\n\n plt.xlabel('AM1 ELF10 Wiberg bond order', fontsize=fontsize)\n plt.ylabel('Torsion barrier height (kJ/mol)', fontsize=fontsize)\n plt.xticks(fontsize=fontsize)\n plt.yticks(fontsize=fontsize)\n #plt.xlim(0.8, 1.5)\n #plt.ylim(0, 100)\n plt.tight_layout()\n plt.savefig('energy_vs_wbo_full_newcolors.pdf')\n\nmakeCovPlot('wbotb.pkl')\n\n\n# generate table\nstats_table = {'Parameter': [], 'smarts':[], 'slope': [],'standard error': [], 'r^2': [], 'CI_1': [], 'CI_2': []}\n#[slope, std_err, r_value**2, r_ci[0][0], r_ci[1][0]]\n\nwith open('table_data.pkl', 'rb') as f:\n tabledata = pickle.load(f)\nsmartsDict={\n 'TIG1':'[*:1]~[#6X3:2]-[#6X3:3]~[*:4]',\n 'TIG2':'[*:1]~[#6X3:2]-[#6X3$(*=[#8,#16,#7]):3]~[*:4]',\n 'TIG3':'[*:1]~[#6X3:2]-[#6X3:3](-[#8H1])=[#8X1:4]',\n 'TIG4':'[*:1]~[#7X3:2]-!@[#6X3:3]~@[#6:4]',\n 'TIG5':'[#6X3:1]~[#7X3:2]-!@[#6X3:3]~@[#6:4]',\n 'TIG6':'[#6X3$(*~[#6]):1]~[#7X3:2]-!@[#6X3:3]~@[#6:4]',\n 'TIG7':'[#6X4:1]~[#7X3:2]-!@[#6X3:3]~@[#6:4]',\n 'TIG8':'[#8X1:1]~[#7X3:2]~[#6X3:3]~[*:4]',\n 'TIG9':'[*:1]~[#6X3:2]-[#8X2:3]-[*:4]',\n 'TIG10':'[*:1]~[#6X3:2]-[#8X2:3]-[#1:4]'\n }\nfor key, item in tabledata.items():\n stats_table['Parameter'].append(key)\n stats_table['smarts'].append(smartsDict[key])\n stats_table['slope'].append(round(item[0],2))\n stats_table['standard error'].append(round(item[1],2))\n stats_table['r^2'].append(round(item[2],2))\n stats_table['CI_1'].append(round(item[3], 2))\n stats_table['CI_2'].append(round(item[4], 2))\nlatex_table = pd.DataFrame(stats_table).to_latex(index=False)\nwith open('figures/stats_tid.tex', 'w') as f:\n f.write(latex_table)\n\n", "import fragmenter\nimport json\nimport cmiles\nfrom openeye import oechem, oequacpac\nfrom matplotlib.backends.backend_pdf import PdfPages\nimport matplotlib.pyplot as plt\nimport seaborn as sbn\n\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser(description=\"calculate OE WBO\")\n parser.add_argument('-n', '--name', type=str, help='Molecule name')\n args = parser.parse_args()\n name = args.name\n\n with open('../filter/filtered_kinase_inhibitors.json', 'r') as f:\n kinase_inhibitors = json.load(f)\n kinase_inhibitors[name] = kinase_inhibitors[name]\n\n mapped_smiles = kinase_inhibitors[name]['canonical_isomeric_explicit_hydrogen_mapped_smiles']\n oemol = cmiles.utils.load_molecule(mapped_smiles, toolkit='openeye')\n charged = fragmenter.chemi.get_charges(oemol, keep_confs=-1)\n\n oe_wbo_full = {}\n for bond in charged.GetBonds():\n bond_key = (bond.GetBgn().GetMapIdx(), bond.GetEnd().GetMapIdx())\n oe_wbo_full[bond_key] = {'ensamble': bond.GetData('WibergBondOrder')}\n oe_wbo_full[bond_key]['individual_conf'] = []\n\n for i, conf in enumerate(charged.GetConfs()):\n mol_copy = oechem.OEMol(conf)\n # Get WBO\n if oequacpac.OEAssignPartialCharges(mol_copy, oequacpac.OECharges_AM1BCCSym):\n for bond in mol_copy.GetBonds():\n bond_key = (bond.GetBgn().GetMapIdx(), bond.GetEnd().GetMapIdx())\n try:\n oe_wbo_full[bond_key]['individual_conf'].append(bond.GetData('WibergBondOrder'))\n except KeyError:\n if 0 in bond_key:\n oe_wbo_full[bond_key] = {'individual_conf': [bond.GetData('WibergBondOrder')]}\n else:\n reverse_key = tuple(reversed(bond_key))\n oe_wbo_full[reverse_key]['individual_conf'].append(bond.GetData('WibergBondOrder'))\n else:\n print('AM1BCC charging failed for {}, {}'.format(str(i), i))\n\n # serialize and save\n serialized = {}\n for bond in oe_wbo_full:\n key = fragmenter.workflow_api.serialize_key(bond)\n serialized[key] = oe_wbo_full[bond]\n # save file\n with open('{}/{}_parent_oe_wbo.json'.format(name, name), 'w') as f:\n json.dump(serialized, f, indent=2, sort_keys=True)\n\n # replot to include a red distribution for parent\n # load others, deserialize and replot\n with open('{}/{}_oe_wbo_by_bond.json'.format(name, name), 'r') as f:\n by_bond = json.load(f)\n frag_with_bond = {}\n for bond in by_bond:\n key = bond.split('[')[-1].split(']')[0].split(',')\n key = (int(key[0]), int(key[1]))\n frag_with_bond[key] = by_bond[bond]\n # add parent\n for bond in frag_with_bond:\n try:\n full = oe_wbo_full[bond]\n except KeyError:\n key = (bond[-1], bond[0])\n full = oe_wbo_full[key]\n frag_with_bond[bond]['parent'] = full\n # serialize and save\n\n serialized_with_parent = {}\n for bond in frag_with_bond:\n key =fragmenter. workflow_api.serialize_key(bond)\n serialized_with_parent[key] = frag_with_bond[bond]\n with open('{}/{}_oe_wbo_by_bond_with_parent.json'.format(name, name), 'w') as f:\n json.dump(serialized_with_parent, f, indent=2, sort_keys=True)\n\n # sort fragments by wbo\n sorted_frags = {}\n for b in frag_with_bond:\n list_1 = []\n list_2 = []\n for frag in frag_with_bond[b]:\n list_1.append(frag)\n list_2.append(frag_with_bond[b][frag]['ensamble'])\n sorted_frags[b] = [x for _,x in sorted(zip(list_2, list_1))]\n\n rot_bonds = list(frag_with_bond.keys())\n\n # plot results on one pdf page\n with PdfPages('{}/{}_fragment_bond_orders_with_parent.pdf'.format(name, name)) as pdf:\n for b in rot_bonds:\n #b = rot_bonds[3]\n n = len(frag_with_bond[b])\n\n fig, axes = plt.subplots(n, 1)\n fig.dpi = 400\n x_min = 3\n x_max = 0\n for f in frag_with_bond[b]:\n wbo = frag_with_bond[b][f]['individual_conf']\n if min(wbo) < x_min:\n x_min = min(wbo)\n if max(wbo) > x_max:\n x_max = max(wbo)\n\n for i, frag in enumerate(sorted_frags[b]):\n wbo = frag_with_bond[b][frag]['individual_conf']\n\n wbo_s = frag_with_bond[b][frag]['ensamble']\n ax = plt.subplot(n, 1, i+1)\n ax.spines['left'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.spines['bottom'].set_visible(False)\n ax.patch.set_facecolor('none')\n if frag == 'parent':\n sbn.kdeplot(wbo, shade= True, color='red', alpha=0.8)\n else:\n sbn.kdeplot(wbo, shade= True, alpha=0.8)\n sbn.distplot(wbo, hist=False, rug=True, kde=False, color='black')\n sbn.kdeplot(wbo, lw=1, color='black')\n plt.axvline(x=wbo_s, ymin=0, ymax=1, color='black', linewidth=0.5)\n\n plt.xlim(x_min-0.05, x_max+0.05)\n plt.yticks([])\n ax.yaxis.set_label_coords(-0.05, 0)\n plt.ylabel(i, rotation=0, size=8)\n if i != n-1:\n plt.xticks([])\n else:\n plt.xlabel('Bond order')\n if i == 0:\n plt.title('bond {}'.format(b))\n overlap=0.5\n h_pad = 5 + (- 5*(1 + overlap))\n fig.tight_layout(h_pad=h_pad)\n pdf.savefig(bbox_inches='tight')\n plt.close()\n\n for b in frag_with_bond:\n try:\n wbo = oe_wbo_full[b]['ensamble']\n except KeyError:\n wbo = oe_wbo_full[(b[-1], b[0])]['ensamble']\n fragmenter.chemi.highlight_bond_by_map_idx(mapped_smiles, [b], wbo=wbo, fname='{}/parent_bond_{}_{}.png'.format(name, b[0], b[1]))\n\n\n\n\n", "import fragmenter\nimport json\nfrom openeye import oechem, oequacpac, oedepict, oegraphsim\nimport matplotlib.pyplot as plt\nimport glob\nimport seaborn as sbn\nimport oenotebook as onb\nimport cmiles\nimport itertools\nimport numpy as np\n\ndef get_bond(mol, bond_tuple):\n a1 = mol.GetAtom(oechem.OEHasMapIdx(bond_tuple[0]))\n a2 = mol.GetAtom(oechem.OEHasMapIdx(bond_tuple[1]))\n if not a1 or not a2:\n print('no atoms')\n return False\n bond = mol.GetBond(a1, a2)\n if not bond:\n print('no bond')\n return False\n return bond\n\ndef visualize_mols(smiles, fname, rows, cols, bond_idx, wbos, colors, align_to=0):\n \"\"\"\n Visualize molecules with highlighted bond and labeled with WBO\n Parameters\n ----------\n smiles : list of SMILES to visualize.\n bond atoms should have map indices\n fname : str\n filename\n rows : int\n cols : int\n bond_idx : tuple of atom maps of bond to highlight.\n wbos : list of floats\n colors : list of hex values for colors\n align_to: int, optional, default 0\n index for which molecule to align to. If zero, will align to first molecules in SMILES list\n\n \"\"\"\n itf = oechem.OEInterface()\n\n ropts = oedepict.OEReportOptions(rows, cols)\n ropts.SetHeaderHeight(25)\n ropts.SetFooterHeight(25)\n ropts.SetCellGap(2)\n ropts.SetPageMargins(10)\n report = oedepict.OEReport(ropts)\n\n cellwidth, cellheight = report.GetCellWidth(), report.GetCellHeight()\n opts = oedepict.OE2DMolDisplayOptions(cellwidth, cellheight, oedepict.OEScale_AutoScale)\n oedepict.OESetup2DMolDisplayOptions(opts, itf)\n\n # align to chosen molecule\n ref_mol = oechem.OEGraphMol()\n oechem.OESmilesToMol(ref_mol, smiles[align_to])\n oedepict.OEPrepareDepiction(ref_mol)\n\n mols = []\n minscale = float(\"inf\")\n for s in smiles:\n mol = oechem.OEMol()\n oechem.OESmilesToMol(mol, s)\n mols.append(mol)\n oedepict.OEPrepareDepiction(mol, False, True)\n minscale = min(minscale, oedepict.OEGetMoleculeScale(mol, opts))\n print(minscale)\n\n print(minscale)\n opts.SetScale(minscale)\n for i, mol in enumerate(mols):\n\n cell = report.NewCell()\n oedepict.OEPrepareDepiction(mol, False, True)\n bond = get_bond(mol, bond_idx)\n atom_bond_set = oechem.OEAtomBondSet()\n atom_bond_set.AddAtoms([bond.GetBgn(), bond.GetEnd()])\n atom_bond_set.AddBond(bond)\n\n hstyle = oedepict.OEHighlightStyle_BallAndStick\n if i == 3:\n hcolor = oechem.OERed\n else:\n hcolor = oechem.OEColor(*colors[i])\n\n overlaps = oegraphsim.OEGetFPOverlap(ref_mol, mol, oegraphsim.OEGetFPType(oegraphsim.OEFPType_Tree))\n oedepict.OEPrepareMultiAlignedDepiction(mol, ref_mol, overlaps)\n\n disp = oedepict.OE2DMolDisplay(mol, opts)\n oedepict.OEAddHighlighting(disp, hcolor, hstyle, atom_bond_set)\n\n bond_label = oedepict.OEHighlightLabel(\"{:.2f}\".format((wbos[i])), hcolor)\n oedepict.OEAddLabel(disp, bond_label, atom_bond_set)\n oedepict.OERenderMolecule(cell, disp)\n # oedepict.OEDrawCurvedBorder(cell, oedepict.OELightGreyPen, 10.0)\n\n return (oedepict.OEWriteReport(fname, report))\n\ndef rbg_to_int(rbg, alpha):\n \"\"\"\n Convert rbg color to ints for openeye\n Parameters\n ----------\n rbg : list\n rbg\n alpha : int\n\n Returns\n -------\n list of ints\n\n \"\"\"\n rbg[-1] = int(rbg[-1]*alpha)\n colors = [int(round(i*255)) for i in rbg[:-1]]\n colors.append(int(rbg[-1]))\n return colors\n\nwith open('Tedizolid_phosphate_0_wbo_dists.json', 'r') as f:\n results = json.load(f)\nresults = results['[9, 23]']\nwith open('Tedizolid_phosphate_0_pfizer_wbo_dists.json', 'r') as f:\n pfizer_results = json.load(f)\n\nsbn.kdeplot(results['parent']['wbo_dist'], shade=True)\nsbn.distplot(results['parent']['wbo_dist'], rug=True, hist=False, color=sbn.color_palette()[0])\nsbn.distplot(results['parent']['wbo_dist'], hist=False, color=sbn.color_palette()[0])\n\nsbn.kdeplot(results['0.05_path_length_False_None']['wbo_dist'], shade=True)\nsbn.distplot(results['0.05_path_length_False_None']['wbo_dist'], rug=True, hist=False, color=sbn.color_palette()[1])\nsbn.distplot(results['0.05_path_length_False_None']['wbo_dist'], hist=False, color=sbn.color_palette()[1])\n\nsbn.kdeplot(results['0.1_path_length_False_None']['wbo_dist'], shade=True)\nsbn.distplot(results['0.1_path_length_False_None']['wbo_dist'], rug=True, hist=False, color=sbn.color_palette()[2])\nsbn.distplot(results['0.1_path_length_False_None']['wbo_dist'], hist=False, color=sbn.color_palette()[2])\n\n\nsbn.kdeplot(pfizer_results['[9, 23]']['wbo_dist'], shade=True)\nsbn.distplot(pfizer_results['[9, 23]']['wbo_dist'], rug=True, hist=False, color=sbn.color_palette()[3])\nsbn.distplot(pfizer_results['[9, 23]']['wbo_dist'], hist=False, color=sbn.color_palette()[3])\nplt.xticks(fontsize=14)\nplt.yticks([])\nplt.xlabel('Wiberg Bond Order', fontsize=14)\nplt.tight_layout()\nplt.savefig('wbo_dists.pdf')\n\ncolors = [rbg_to_int(list(i), alpha=255) for i in sbn.color_palette()[:3]]\nwbos = [results['parent']['elf10_wbo'], results['0.05_path_length_False_None']['elf10_wbo'],\n results['0.1_path_length_False_None']['elf10_wbo'], pfizer_results['[9, 23]']['elf10_wbo']]\nfrags = [results['parent']['frag'], results['0.05_path_length_False_None']['frag'],\n results['0.1_path_length_False_None']['frag'], pfizer_results['[9, 23]']['frag']]\nvisualize_mols(frags, cols=2, rows=2, bond_idx=(9, 23), colors=colors, wbos=wbos, fname='fragments.pdf', align_to=0)" ]
[ [ "numpy.poly1d", "matplotlib.pyplot.annotate", "matplotlib.pyplot.xlim", "numpy.asarray", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.savefig", "pandas.DataFrame", "matplotlib.pyplot.ylim", "scipy.stats.linregress", "matplotlib.pyplot.subplots", "matplotlib.pyplot.yticks", "matplotlib.pyplot.plot", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.ylabel", "numpy.unique", "matplotlib.pyplot.scatter", "matplotlib.pyplot.xticks" ], [ "matplotlib.pyplot.xlim", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.close", "matplotlib.pyplot.subplots", "matplotlib.pyplot.yticks", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.axvline", "matplotlib.pyplot.xticks", "matplotlib.pyplot.subplot" ], [ "matplotlib.pyplot.savefig", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.yticks", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.xticks" ] ]
bennyrowland/suspect
[ "c09ab0a5013c5a199218214cdd791659243d7e41" ]
[ "suspect/processing/water_suppression.py" ]
[ "import numpy\n\nimport suspect.basis\n\n\ndef hsvd(data, rank, L=None):\n if L is None:\n L = data.np // 2\n # start by building the Hankel matrix\n hankel_matrix = numpy.zeros((L, data.np - L), \"complex\")\n for i in range(int(data.np - L)):\n hankel_matrix[:, i] = data[i:(i + L)]\n\n # perform the singular value decomposition\n U, s, V = numpy.linalg.svd(numpy.matrix(hankel_matrix))\n V = V.H # numpy returns the Hermitian conjugate of V\n\n # truncate the matrices to the given rank\n U_K = U[:, :rank]\n V_K = V[:, :rank]\n s_K = numpy.matrix(numpy.diag(s[:rank]))\n\n # because of the structure of the Hankel matrix, each row of U_K is the\n # result of multiplying the previous row by the delta t propagator matrix\n # Z' (a similar result holds for V as well). This gives us U_Kb * Z' = U_Kt\n # where U_Kb is U_K without the bottom row and U_Kt is U_K without the top\n # row.\n U_Kt = U_K[1:, :]\n U_Kb = U_K[:-1, :]\n # this gives us a set of linear equations which can be solved to find Z'.\n # Because of the noise in the system we solve with least-squares\n Zp = numpy.linalg.inv(U_Kb.H * U_Kb) * U_Kb.H * U_Kt\n\n # in the right basis, Zp is just the diagonal matrix describing the\n # evolution of each frequency component, by diagonalising the matrix we can\n # find that basis and get the z = exp((-damping + j*2pi * f) * dt) terms\n\n # alternatively we can just get the eigenvalues instead\n val, vec = numpy.linalg.eig(Zp)\n\n # the magnitude gives the damping and the angle gives the frequency\n damping_coeffs = numpy.zeros(rank)\n frequency_coeffs = numpy.zeros(rank)\n for i in range(rank):\n damping_coeffs[i] = - numpy.log(abs(val[i])) / data.dt\n frequency_coeffs[i] = numpy.angle(val[i]) / (data.dt * 2 * numpy.pi)\n\n # TODO in theory we can calculate the magnitude of each signal from the\n # RHS decomposition, linalg.inv(vec) * (S_K * V_K.H)[:, 0]\n\n # a simpler but more expensive way is to construct a basis set from the\n # known damping and frequency components and fit to the original data to\n # get the amplitudes and phase data\n X = numpy.zeros((data.np, rank), \"complex\")\n # TODO this should use the singlet fitting module to make the basis\n for i in range(rank):\n X[:, i] = suspect.basis.lorentzian(data.time_axis(),\n frequency_coeffs[i],\n 0,\n damping_coeffs[i] / numpy.pi) * data.np\n\n # we use the linear non-iterative least squares again\n U2, s2, V2 = numpy.linalg.svd(numpy.matrix(X), full_matrices=False)\n s2_inv = numpy.diag(1 / s2)\n beta = V2.H * s2_inv * U2.H * numpy.matrix(numpy.reshape(data, (data.np, 1)))\n\n components = []\n for i in range(rank):\n components.append({\n \"amplitude\": float(abs(beta[i])),\n \"phase\": float(numpy.angle(beta[i])),\n \"fwhm\": damping_coeffs[i] / numpy.pi,\n \"frequency\": frequency_coeffs[i]\n })\n\n return components\n\n\ndef construct_fid(components, time_axis):\n fid = numpy.zeros_like(time_axis, 'complex')\n for i in range(len(components)):\n lorentzian = suspect.basis.lorentzian(time_axis,\n components[i][\"frequency\"],\n components[i][\"phase\"],\n components[i][\"fwhm\"])\n fid += components[i][\"amplitude\"] * lorentzian * len(time_axis)\n return fid\n" ]
[ [ "numpy.matrix", "numpy.zeros_like", "numpy.angle", "numpy.reshape", "numpy.zeros", "numpy.linalg.inv", "numpy.linalg.eig", "numpy.diag" ] ]
yanndupis/tf-encrypted
[ "cfaea3ba87520f73979ed4e4f397eba3beb0a535", "cfaea3ba87520f73979ed4e4f397eba3beb0a535" ]
[ "examples/deprecated/inputs.py", "examples/federated-learning/run.py" ]
[ "import sys\n\nimport numpy as np\nimport tensorflow as tf\nimport tf_encrypted as tfe\n\nconfig = tfe.get_config()\n\nif len(sys.argv) > 1:\n\n #\n # assume we're running as a server\n #\n\n player_name = str(sys.argv[1])\n\n server = config.server(player_name)\n server.start()\n server.join()\n\nelse:\n\n #\n # assume we're running as master\n #\n\n def provide_weights() -> tf.Tensor:\n raw_w = np.array([5, 5, 5, 5]).reshape((2, 2))\n w = tf.constant(raw_w)\n tf.print(w, [w])\n return w\n\n def provide_input() -> tf.Tensor:\n x = tf.constant([1, 2, 3, 4], shape=(2, 2), dtype=tf.float32)\n tf.print(x, [x])\n return x\n\n def receive_output(prediction):\n\n tf.print([], [prediction], summarize=4)\n return []\n\n with tfe.protocol.Pond() as prot:\n\n # treat weights as private\n w = prot.define_private_input('model-provider', provide_weights)\n\n # load input for prediction\n x = prot.define_private_input('input-provider', provide_input)\n\n # compute prediction\n y = prot.matmul(x, w)\n\n # send output\n prediction_op = prot.define_output('input-provider', y, receive_output)\n\n with tfe.Session() as sess:\n sess.run(tf.global_variables_initializer(), tag='init')\n\n for _ in range(5):\n sess.run(prediction_op, tag='prediction')\n", "import sys\n\nimport tensorflow as tf\nimport tf_encrypted as tfe\n\nfrom convert import decode\n\nif len(sys.argv) > 1:\n # config file was specified\n config_file = sys.argv[1]\n config = tfe.RemoteConfig.load(config_file)\n tfe.set_config(config)\n tfe.set_protocol(tfe.protocol.Pond())\n\nsession_target = sys.argv[2] if len(sys.argv) > 2 else None\n\n\nclass ModelOwner:\n\n LEARNING_RATE = 0.1\n ITERATIONS = 60000 // 30\n\n def __init__(self, player_name):\n self.player_name = player_name\n\n with tf.device(tfe.get_config().get_player(player_name).device_name):\n self._initialize_weights()\n\n def _initialize_weights(self):\n with tf.name_scope('parameters'):\n self.w0 = tf.Variable(tf.random_normal([28 * 28, 512]))\n self.b0 = tf.Variable(tf.zeros([512]))\n self.w1 = tf.Variable(tf.random_normal([512, 10]))\n self.b1 = tf.Variable(tf.zeros([10]))\n\n def _build_model(self, x, y):\n w0 = self.w0.read_value()\n b0 = self.b0.read_value()\n w1 = self.w1.read_value()\n b1 = self.b1.read_value()\n params = (w0, b0, w1, b1)\n\n layer0 = tf.matmul(x, w0) + b0\n layer1 = tf.nn.sigmoid(layer0)\n layer2 = tf.matmul(layer1, w1) + b1\n predictions = layer2\n\n loss = tf.reduce_mean(tf.losses.sparse_softmax_cross_entropy(logits=predictions, labels=y))\n grads = tf.gradients(ys=loss, xs=params)\n return predictions, loss, grads\n\n def build_training_model(self, x, y):\n \"\"\"\n This method will be called once by all data owners\n to create a local gradient computation on their machine.\n \"\"\"\n _, _, grads = self._build_model(x, y)\n return grads\n\n def _build_validation_model(self, x, y):\n predictions, loss, _ = self._build_model(x, y)\n most_likely = tf.argmax(predictions, axis=1)\n return most_likely, loss\n\n def _build_data_pipeline(self):\n\n def normalize(image, label):\n image = tf.cast(image, tf.float32) / 255.0\n return image, label\n\n dataset = tf.data.TFRecordDataset([\"./data/train.tfrecord\"])\n dataset = dataset.map(decode)\n dataset = dataset.map(normalize)\n dataset = dataset.batch(50)\n dataset = dataset.take(1) # keep validating on the same items\n dataset = dataset.repeat()\n\n iterator = dataset.make_one_shot_iterator()\n return iterator.get_next()\n\n def update_model(self, *grads):\n params = [self.w0, self.b0, self.w1, self.b1]\n grads = [tf.cast(grad, tf.float32) for grad in grads]\n with tf.name_scope('update'):\n update_op = tf.group(*[\n param.assign(param - grad * self.LEARNING_RATE)\n for param, grad in zip(params, grads)\n ])\n # return update_op\n\n with tf.name_scope('validate'):\n x, y = self._build_data_pipeline()\n y_hat, loss = self._build_validation_model(x, y)\n\n with tf.control_dependencies([update_op]):\n return tf.print('expect', loss, y, y_hat, summarize=50)\n\n\nclass DataOwner:\n\n BATCH_SIZE = 30\n\n def __init__(self, player_name, build_training_model):\n self.player_name = player_name\n self._build_training_model = build_training_model\n\n def _build_data_pipeline(self):\n\n def normalize(image, label):\n image = tf.cast(image, tf.float32) / 255.0\n return image, label\n\n dataset = tf.data.TFRecordDataset([\"./data/train.tfrecord\"])\n dataset = dataset.map(decode)\n dataset = dataset.map(normalize)\n dataset = dataset.repeat()\n dataset = dataset.batch(self.BATCH_SIZE)\n\n iterator = dataset.make_one_shot_iterator()\n return iterator.get_next()\n\n def compute_gradient(self):\n\n with tf.name_scope('data_loading'):\n x, y = self._build_data_pipeline()\n\n with tf.name_scope('gradient_computation'):\n grads = self._build_training_model(x, y)\n\n return grads\n\n\nmodel_owner = ModelOwner('model-owner')\ndata_owners = [\n DataOwner('data-owner-0', model_owner.build_training_model),\n DataOwner('data-owner-1', model_owner.build_training_model),\n DataOwner('data-owner-2', model_owner.build_training_model),\n]\n\nmodel_grads = zip(*(\n tfe.define_private_input(data_owner.player_name, data_owner.compute_gradient)\n for data_owner in data_owners\n))\n\nwith tf.name_scope('secure_aggregation'):\n aggregated_model_grads = [\n tfe.add_n(grads) / len(grads)\n for grads in model_grads\n ]\n\niteration_op = tfe.define_output(model_owner.player_name, aggregated_model_grads, model_owner.update_model)\n\nwith tfe.Session(target=session_target) as sess:\n sess.run(tf.global_variables_initializer(), tag='init')\n\n for i in range(model_owner.ITERATIONS):\n if i % 100 == 0:\n print(\"Iteration {}\".format(i))\n sess.run(iteration_op, tag='iteration')\n else:\n sess.run(iteration_op)\n" ]
[ [ "tensorflow.print", "tensorflow.constant", "numpy.array", "tensorflow.global_variables_initializer" ], [ "tensorflow.zeros", "tensorflow.data.TFRecordDataset", "tensorflow.argmax", "tensorflow.matmul", "tensorflow.gradients", "tensorflow.print", "tensorflow.name_scope", "tensorflow.losses.sparse_softmax_cross_entropy", "tensorflow.control_dependencies", "tensorflow.nn.sigmoid", "tensorflow.global_variables_initializer", "tensorflow.random_normal", "tensorflow.cast" ] ]
suvarnak/datasets
[ "682b5adee6c36e9867f397076080ec23d9616dcc", "682b5adee6c36e9867f397076080ec23d9616dcc" ]
[ "tensorflow_datasets/core/download/download_manager.py", "tensorflow_datasets/image/celebahq.py" ]
[ "# coding=utf-8\n# Copyright 2019 The TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Download manager interface.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport uuid\n\nfrom absl import logging\nimport promise\nimport six\nimport tensorflow as tf\n\nfrom tensorflow_datasets.core import api_utils\nfrom tensorflow_datasets.core import utils\nfrom tensorflow_datasets.core.download import downloader\nfrom tensorflow_datasets.core.download import extractor\nfrom tensorflow_datasets.core.download import resource as resource_lib\nfrom tensorflow_datasets.core.download import util\n\n\nclass NonMatchingChecksumError(Exception):\n \"\"\"The downloaded file doesn't have expected checksum.\"\"\"\n\n def __init__(self, url, tmp_path):\n msg = 'Artifact %s, downloaded to %s, has wrong checksum.' % (url, tmp_path)\n Exception.__init__(self, msg)\n\n\nclass DownloadConfig(object):\n \"\"\"Configuration for `tfds.core.DatasetBuilder.download_and_prepare`.\"\"\"\n\n def __init__(self,\n extract_dir=None,\n manual_dir=None,\n download_mode=None,\n compute_stats=None,\n max_examples_per_split=None):\n \"\"\"Constructs a `DownloadConfig`.\n\n Args:\n extract_dir: `str`, directory where extracted files are stored.\n Defaults to \"<download_dir>/extracted\".\n manual_dir: `str`, read-only directory where manually downloaded/extracted\n data is stored. Defaults to\n \"<download_dir>/manual\".\n download_mode: `tfds.GenerateMode`, how to deal with downloads or data\n that already exists. Defaults to `REUSE_DATASET_IF_EXISTS`, which will\n reuse both downloads and data if it already exists.\n compute_stats: `tfds.download.ComputeStats`, whether to compute\n statistics over the generated data. Defaults to `AUTO`.\n max_examples_per_split: `int`, optional max number of examples to write\n into each split.\n \"\"\"\n self.extract_dir = extract_dir\n self.manual_dir = manual_dir\n self.download_mode = util.GenerateMode(\n download_mode or util.GenerateMode.REUSE_DATASET_IF_EXISTS)\n self.compute_stats = util.ComputeStatsMode(\n compute_stats or util.ComputeStatsMode.AUTO)\n self.max_examples_per_split = max_examples_per_split\n\n\nclass DownloadManager(object):\n \"\"\"Manages the download and extraction of files, as well as caching.\n\n Downloaded files are cached under `download_dir`. The file name of downloaded\n files follows pattern \"${sanitized_url}${content_checksum}.${ext}\". Eg:\n 'cs.toronto.edu_kriz_cifar-100-pythonJDF[...]I.tar.gz'.\n\n While a file is being downloaded, it is placed into a directory following a\n similar but different pattern:\n \"%{sanitized_url}${url_checksum}.tmp.${uuid}\".\n\n When a file is downloaded, a \"%{fname}s.INFO.json\" file is created next to it.\n This INFO file contains the following information:\n {\"dataset_names\": [\"name1\", \"name2\"],\n \"urls\": [\"http://url.of/downloaded_file\"]}\n\n Extracted files/dirs are stored under `extract_dir`. The file name or\n directory name is the same as the original name, prefixed with the extraction\n method. E.g.\n \"${extract_dir}/TAR_GZ.cs.toronto.edu_kriz_cifar-100-pythonJDF[...]I.tar.gz\".\n\n The function members accept either plain value, or values wrapped into list\n or dict. Giving a data structure will parallelize the downloads.\n\n Example of usage:\n\n ```\n # Sequential download: str -> str\n train_dir = dl_manager.download_and_extract('https://abc.org/train.tar.gz')\n test_dir = dl_manager.download_and_extract('https://abc.org/test.tar.gz')\n\n # Parallel download: list -> list\n image_files = dl_manager.download(\n ['https://a.org/1.jpg', 'https://a.org/2.jpg', ...])\n\n # Parallel download: dict -> dict\n data_dirs = dl_manager.download_and_extract({\n 'train': 'https://abc.org/train.zip',\n 'test': 'https://abc.org/test.zip',\n })\n data_dirs['train']\n data_dirs['test']\n ```\n\n For more customization on the download/extraction (ex: passwords, output_name,\n ...), you can pass a `tfds.download.Resource` as argument.\n \"\"\"\n\n @api_utils.disallow_positional_args\n def __init__(self,\n download_dir,\n extract_dir=None,\n manual_dir=None,\n dataset_name=None,\n checksums=None,\n force_download=False,\n force_extraction=False):\n \"\"\"Download manager constructor.\n\n Args:\n download_dir: `str`, path to directory where downloads are stored.\n extract_dir: `str`, path to directory where artifacts are extracted.\n manual_dir: `str`, path to manually downloaded/extracted data directory.\n dataset_name: `str`, name of dataset this instance will be used for. If\n provided, downloads will contain which datasets they were used for.\n checksums: `dict<str url, str sha256>`, url to sha256 of resource.\n Only URLs present are checked.\n If empty, checksum of (already) downloaded files is computed and can\n then be retrieved using `recorded_download_checksums` property.\n force_download: `bool`, default to False. If True, always [re]download.\n force_extraction: `bool`, default to False. If True, always [re]extract.\n \"\"\"\n self._dataset_name = dataset_name\n self._checksums = checksums or {}\n self._record_checksum_size = not checksums\n self._recorded_download_checksums = {}\n self._download_sizes = {}\n self._download_dir = os.path.expanduser(download_dir)\n self._extract_dir = os.path.expanduser(\n extract_dir or os.path.join(download_dir, 'extracted'))\n self._manual_dir = manual_dir and os.path.expanduser(manual_dir)\n tf.io.gfile.makedirs(self._download_dir)\n tf.io.gfile.makedirs(self._extract_dir)\n self._force_download = force_download\n self._force_extraction = force_extraction\n self._extractor = extractor.get_extractor()\n self._downloader = downloader.get_downloader()\n\n @property\n def recorded_download_checksums(self):\n \"\"\"Returns checksums for downloaded urls.\"\"\"\n return dict(self._recorded_download_checksums)\n\n @property\n def download_sizes(self):\n \"\"\"Returns sizes (in bytes) for downloaded urls.\"\"\"\n return dict(self._download_sizes)\n\n def _handle_download_result(self, resource, tmp_dir_path, sha256, dl_size):\n \"\"\"Store dled file to definitive place, write INFO file, return path.\"\"\"\n fnames = tf.io.gfile.listdir(tmp_dir_path)\n if len(fnames) > 1:\n raise AssertionError('More than one file in %s.' % tmp_dir_path)\n original_fname = fnames[0]\n tmp_path = os.path.join(tmp_dir_path, original_fname)\n if self._record_checksum_size:\n resource.sha256 = sha256\n self._download_sizes[resource.url] = dl_size\n self._recorded_download_checksums[resource.url] = sha256\n elif self._checksums[resource.url] != sha256:\n raise NonMatchingChecksumError(resource.url, tmp_path)\n resource.write_info_file(self._dataset_name, original_fname)\n # Unconditionally overwrite because either file doesn't exist or\n # FORCE_DOWNLOAD=true\n tf.io.gfile.rename(tmp_path, resource.path, overwrite=True)\n tf.io.gfile.rmtree(tmp_dir_path)\n return resource.path\n\n # synchronize and memoize decorators ensure same resource will only be\n # processed once, even if passed twice to download_manager.\n @util.build_synchronize_decorator()\n @utils.memoize()\n def _download(self, resource):\n \"\"\"Download resource, returns Promise->path to downloaded file.\"\"\"\n if isinstance(resource, six.string_types):\n resource = resource_lib.Resource(url=resource)\n resource.sha256 = self._checksums.get(resource.url, None)\n if not resource.path:\n resource.path = os.path.join(self._download_dir, resource.fname)\n if (not self._force_download and resource.sha256 and\n resource.exists_locally()):\n logging.info('URL %s already downloaded: reusing %s.', resource.url,\n resource.path)\n self._recorded_download_checksums[resource.url] = resource.sha256\n self._download_sizes[resource.url] = (\n tf.io.gfile.stat(resource.path).length)\n return promise.Promise.resolve(resource.path)\n # There is a slight difference between downloader and extractor here:\n # the extractor manages its own temp directory, while the DownloadManager\n # manages the temp directory of downloader.\n tmp_dir_path = '%s.tmp.%s' % (resource.path, uuid.uuid4().hex)\n tf.io.gfile.makedirs(tmp_dir_path)\n logging.info('Downloading %s into %s...', resource.url, tmp_dir_path)\n\n def callback(val):\n checksum, dl_size = val\n return self._handle_download_result(resource, tmp_dir_path, checksum,\n dl_size)\n return self._downloader.download(resource, tmp_dir_path).then(callback)\n\n @util.build_synchronize_decorator()\n @utils.memoize()\n def _extract(self, resource):\n \"\"\"Extract a single archive, returns Promise->path to extraction result.\"\"\"\n if isinstance(resource, six.string_types):\n resource = resource_lib.Resource(path=resource)\n if resource.extract_method == resource_lib.ExtractMethod.NO_EXTRACT:\n logging.info(\n 'Skipping extraction for %s (method=NO_EXTRACT).', resource.path)\n return promise.Promise.resolve(resource.path)\n extract_path = os.path.join(self._extract_dir, resource.extract_fname)\n if not self._force_extraction and tf.io.gfile.exists(extract_path):\n logging.info('Reusing extraction of %s at %s.', resource.path,\n extract_path)\n return promise.Promise.resolve(extract_path)\n return self._extractor.extract(resource, extract_path)\n\n @util.build_synchronize_decorator()\n @utils.memoize()\n def _download_extract(self, resource):\n \"\"\"Download-extract `Resource` or url, returns Promise->path.\"\"\"\n if isinstance(resource, six.string_types):\n resource = resource_lib.Resource(url=resource)\n def callback(path):\n resource.path = path\n return self._extract(resource)\n return self._download(resource).then(callback)\n\n def download_kaggle_data(self, competition_name):\n \"\"\"Download data for a given Kaggle competition.\"\"\"\n with self._downloader.tqdm():\n kaggle_downloader = self._downloader.kaggle_downloader(competition_name)\n urls = kaggle_downloader.competition_urls\n files = kaggle_downloader.competition_files\n return _map_promise(self._download,\n dict((f, u) for (f, u) in zip(files, urls)))\n\n def download(self, url_or_urls):\n \"\"\"Download given url(s).\n\n Args:\n url_or_urls: url or `list`/`dict` of urls to download and extract. Each\n url can be a `str` or `tfds.download.Resource`.\n\n Returns:\n downloaded_path(s): `str`, The downloaded paths matching the given input\n url_or_urls.\n \"\"\"\n # Add progress bar to follow the download state\n with self._downloader.tqdm():\n return _map_promise(self._download, url_or_urls)\n\n def iter_archive(self, resource):\n \"\"\"Returns iterator over files within archive.\n\n **Important Note**: caller should read files as they are yielded.\n Reading out of order is slow.\n\n Args:\n resource: path to archive or `tfds.download.Resource`.\n\n Returns:\n Generator yielding tuple (path_within_archive, file_obj).\n \"\"\"\n if isinstance(resource, six.string_types):\n resource = resource_lib.Resource(path=resource)\n return extractor.iter_archive(resource.path, resource.extract_method)\n\n def extract(self, path_or_paths):\n \"\"\"Extract given path(s).\n\n Args:\n path_or_paths: path or `list`/`dict` of path of file to extract. Each\n path can be a `str` or `tfds.download.Resource`.\n\n If not explicitly specified in `Resource`, the extraction method is deduced\n from downloaded file name.\n\n Returns:\n extracted_path(s): `str`, The extracted paths matching the given input\n path_or_paths.\n \"\"\"\n # Add progress bar to follow the download state\n with self._extractor.tqdm():\n return _map_promise(self._extract, path_or_paths)\n\n def download_and_extract(self, url_or_urls):\n \"\"\"Download and extract given url_or_urls.\n\n Is roughly equivalent to:\n\n ```\n extracted_paths = dl_manager.extract(dl_manager.download(url_or_urls))\n ```\n\n Args:\n url_or_urls: url or `list`/`dict` of urls to download and extract. Each\n url can be a `str` or `tfds.download.Resource`.\n\n If not explicitly specified in `Resource`, the extraction method will\n automatically be deduced from downloaded file name.\n\n Returns:\n extracted_path(s): `str`, extracted paths of given URL(s).\n \"\"\"\n # Add progress bar to follow the download state\n with self._downloader.tqdm():\n with self._extractor.tqdm():\n return _map_promise(self._download_extract, url_or_urls)\n\n @property\n def manual_dir(self):\n \"\"\"Returns the directory containing the manually extracted data.\"\"\"\n if not tf.io.gfile.exists(self._manual_dir):\n raise AssertionError(\n 'Manual directory {} does not exist. Create it and download/extract '\n 'dataset artifacts in there.'.format(self._manual_dir))\n return self._manual_dir\n\n\n# ============================================================================\n# In Python 2.X, threading.Condition.wait() cannot be interrupted by SIGINT,\n# unless it's given a timeout. Here we artificially give a long timeout to\n# allow ctrl+C.\n# This code should be deleted once python2 is no longer supported.\nif sys.version_info[0] > 2:\n\n def _wait_on_promise(p):\n return p.get()\n\nelse:\n\n def _wait_on_promise(p):\n while True:\n result = p.get(sys.maxint) # pylint: disable=g-deprecated-member-used\n if p.is_fulfilled:\n return result\n\n# ============================================================================\n\n\ndef _map_promise(map_fn, all_inputs):\n \"\"\"Map the function into each element and resolve the promise.\"\"\"\n all_promises = utils.map_nested(map_fn, all_inputs) # Apply the function\n res = utils.map_nested(_wait_on_promise, all_promises)\n return res\n", "# coding=utf-8\n# Copyright 2019 The TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Celeba-HQ dataset.\"\"\"\nimport os\n\nimport tensorflow as tf\nfrom tensorflow_datasets.core import api_utils\nimport tensorflow_datasets.public_api as tfds\n\n_CITATION = \"\"\"\\\n@article{DBLP:journals/corr/abs-1710-10196,\n author = {Tero Karras and\n Timo Aila and\n Samuli Laine and\n Jaakko Lehtinen},\n title = {Progressive Growing of GANs for Improved Quality, Stability, and Variation},\n journal = {CoRR},\n volume = {abs/1710.10196},\n year = {2017},\n url = {http://arxiv.org/abs/1710.10196},\n archivePrefix = {arXiv},\n eprint = {1710.10196},\n timestamp = {Mon, 13 Aug 2018 16:46:42 +0200},\n biburl = {https://dblp.org/rec/bib/journals/corr/abs-1710-10196},\n bibsource = {dblp computer science bibliography, https://dblp.org}\n}\n\"\"\"\n\n_DESCRIPTION = \"\"\"\\\nHigh-quality version of the CELEBA\ndataset, consisting of 30000 images in 1024 x 1024 resolution.\n\nWARNING: This dataset currently requires you to prepare images on your own.\n\"\"\"\n\n\nclass CelebaHQConfig(tfds.core.BuilderConfig):\n \"\"\"BuilderConfig for CelebaHQ.\"\"\"\n\n @api_utils.disallow_positional_args\n def __init__(self, resolution, **kwargs):\n \"\"\"BuilderConfig for SQUAD.\n\n Args:\n resolution: Resolution of the image. Values supported: powers of 2 up to\n 1024.\n **kwargs: keyword arguments forwarded to super.\n \"\"\"\n super(CelebaHQConfig, self).__init__(\n name=\"%d\" % resolution,\n description=(\"CelebaHQ images in %d x %d resolution\" %\n (resolution, resolution)),\n **kwargs)\n self.resolution = resolution\n self.file_name = \"data%dx%d.tar\" % (resolution, resolution)\n\n\nclass CelebAHq(tfds.core.GeneratorBasedBuilder):\n \"\"\"Celeba_HQ Dataset.\"\"\"\n\n VERSION = tfds.core.Version(\"0.1.0\")\n\n BUILDER_CONFIGS = [\n CelebaHQConfig(resolution=1024, version=\"0.1.0\"),\n CelebaHQConfig(resolution=512, version=\"0.1.0\"),\n CelebaHQConfig(resolution=256, version=\"0.1.0\"),\n CelebaHQConfig(resolution=128, version=\"0.1.0\"),\n CelebaHQConfig(resolution=64, version=\"0.1.0\"),\n CelebaHQConfig(resolution=32, version=\"0.1.0\"),\n CelebaHQConfig(resolution=16, version=\"0.1.0\"),\n CelebaHQConfig(resolution=8, version=\"0.1.0\"),\n CelebaHQConfig(resolution=4, version=\"0.1.0\"),\n CelebaHQConfig(resolution=2, version=\"0.1.0\"),\n CelebaHQConfig(resolution=1, version=\"0.1.0\"),\n ]\n\n def _info(self):\n return tfds.core.DatasetInfo(\n builder=self,\n description=_DESCRIPTION,\n features=tfds.features.FeaturesDict({\n \"image\":\n tfds.features.Image(\n shape=(self.builder_config.resolution,\n self.builder_config.resolution, 3),\n encoding_format=\"png\"),\n \"image/filename\":\n tfds.features.Text(),\n },),\n urls=[\"https://github.com/tkarras/progressive_growing_of_gans\"],\n citation=_CITATION,\n )\n\n def _split_generators(self, dl_manager):\n image_tar_file = os.path.join(dl_manager.manual_dir,\n self.builder_config.file_name)\n if not tf.io.gfile.exists(image_tar_file):\n # The current celebahq generation code depends on a concrete version of\n # pillow library and cannot be easily ported into tfds.\n msg = \"You must download the dataset files manually and place them in: \"\n msg += dl_manager.manual_dir\n msg += \" as .tar files. See testing/test_data/fake_examples/celeb_a_hq \"\n raise AssertionError(msg)\n return [\n tfds.core.SplitGenerator(\n name=tfds.Split.TRAIN,\n num_shards=50,\n gen_kwargs={\"archive\": dl_manager.iter_archive(image_tar_file)},\n )\n ]\n\n def _generate_examples(self, archive):\n for fname, fobj in archive:\n yield {\"image\": fobj, \"image/filename\": fname}\n" ]
[ [ "tensorflow.io.gfile.rename", "tensorflow.io.gfile.rmtree", "tensorflow.io.gfile.stat", "tensorflow.io.gfile.makedirs", "tensorflow.io.gfile.exists", "tensorflow.io.gfile.listdir" ], [ "tensorflow.io.gfile.exists" ] ]
alisiahkoohi/devito
[ "f535a44dff12de2837eb6e3217a65ffb2d371cb8" ]
[ "tests/test_derivatives.py" ]
[ "import numpy as np\nimport pytest\nfrom sympy import simplify, diff, cos, sin, Float\n\nfrom devito import (Grid, Function, TimeFunction, Eq, Operator, NODE,\n ConditionalDimension, left, right, centered, div, grad)\nfrom devito.finite_differences import Derivative, Differentiable\nfrom devito.finite_differences.differentiable import EvalDiffDerivative\nfrom devito.symbolics import indexify, retrieve_indexed\n\n_PRECISION = 9\n\n\ndef x(grid):\n return grid.dimensions[0]\n\n\ndef y(grid):\n return grid.dimensions[1]\n\n\ndef z(grid):\n return grid.dimensions[2]\n\n\ndef t(grid):\n return grid.stepping_dim\n\n\nclass TestFD(object):\n \"\"\"\n Class for finite difference testing.\n Tests the accuracy w.r.t polynomials.\n Test that the shortcut produce the same answer as the FD functions.\n \"\"\"\n\n def setup_method(self):\n self.shape = (20, 20, 20)\n self.grid = Grid(self.shape)\n\n def test_diff(self):\n \"\"\"Test that expr.diff returns an object of type devito.Derivative.\"\"\"\n u = Function(name='u', grid=self.grid)\n du = u.diff(x(self.grid))\n assert isinstance(du, Derivative)\n\n @pytest.mark.parametrize('so', [2, 3, 4, 5])\n def test_fd_indices(self, so):\n \"\"\"\n Test that shifted derivative have Integer offset after indexification.\n \"\"\"\n grid = Grid((10,))\n x = grid.dimensions[0]\n x0 = x + .5 * x.spacing\n u = Function(name=\"u\", grid=grid, space_order=so)\n dx = indexify(u.dx(x0=x0).evaluate)\n for f in retrieve_indexed(dx):\n assert len(f.indices[0].atoms(Float)) == 0\n\n @pytest.mark.parametrize('SymbolType, dim', [\n (Function, x), (Function, y),\n (TimeFunction, x), (TimeFunction, y), (TimeFunction, t),\n ])\n def test_stencil_derivative(self, SymbolType, dim):\n \"\"\"Test symbolic behaviour when expanding stencil derivatives\"\"\"\n i = dim(self.grid)\n u = SymbolType(name='u', grid=self.grid)\n u.data[:] = 66.6\n di = u.diff(i)\n dii = u.diff(i, i)\n # Check for sympy Derivative objects\n assert(isinstance(di, Derivative) and isinstance(dii, Derivative))\n s_di = di.as_finite_difference([i - i.spacing, i])\n s_dii = dii.as_finite_difference([i - i.spacing, i, i + i.spacing])\n # Check stencil length of first and second derivatives\n assert(len(s_di.args) == 2 and len(s_dii.args) == 3)\n u_di = s_di.args[0].args[1]\n u_dii = s_di.args[0].args[1]\n # Ensure that devito meta-data survived symbolic transformation\n assert(u_di.grid.shape == self.shape and u_dii.grid.shape == self.shape)\n assert(u_di.shape == u.shape and u_dii.shape == u.shape)\n assert(np.allclose(u_di.data, 66.6))\n assert(np.allclose(u_dii.data, 66.6))\n\n @pytest.mark.parametrize('SymbolType, derivative, dim, expected', [\n (Function, ['dx2'], 3, 'Derivative(u(x, y, z), (x, 2))'),\n (Function, ['dx2dy'], 3, 'Derivative(u(x, y, z), (x, 2), y)'),\n (Function, ['dx2dydz'], 3, 'Derivative(u(x, y, z), (x, 2), y, z)'),\n (Function, ['dx2', 'dy'], 3, 'Derivative(Derivative(u(x, y, z), (x, 2)), y)'),\n (Function, ['dx2dy', 'dz2'], 3,\n 'Derivative(Derivative(u(x, y, z), (x, 2), y), (z, 2))'),\n (TimeFunction, ['dx2'], 3, 'Derivative(u(t, x, y, z), (x, 2))'),\n (TimeFunction, ['dx2dy'], 3, 'Derivative(u(t, x, y, z), (x, 2), y)'),\n (TimeFunction, ['dx2', 'dy'], 3,\n 'Derivative(Derivative(u(t, x, y, z), (x, 2)), y)'),\n (TimeFunction, ['dx', 'dy', 'dx2', 'dz', 'dydz'], 3,\n 'Derivative(Derivative(Derivative(Derivative(Derivative(u(t, x, y, z), x), y),' +\n ' (x, 2)), z), y, z)')\n ])\n def test_unevaluation(self, SymbolType, derivative, dim, expected):\n u = SymbolType(name='u', grid=self.grid, time_order=2, space_order=2)\n expr = getattr(u, derivative[0])\n for d in derivative[1:]:\n expr = getattr(expr, d)\n assert(expr.__str__() == expected)\n # Make sure the FD evaluation executes\n expr.evaluate\n\n @pytest.mark.parametrize('expr,expected', [\n ('u.dx + u.dy', 'Derivative(u, x) + Derivative(u, y)'),\n ('u.dxdy', 'Derivative(u, x, y)'),\n ('u.laplace',\n 'Derivative(u, (x, 2)) + Derivative(u, (y, 2)) + Derivative(u, (z, 2))'),\n ('(u.dx + u.dy).dx', 'Derivative(Derivative(u, x) + Derivative(u, y), x)'),\n ('((u.dx + u.dy).dx + u.dxdy).dx',\n 'Derivative(Derivative(Derivative(u, x) + Derivative(u, y), x) +' +\n ' Derivative(u, x, y), x)'),\n ('(u**4).dx', 'Derivative(u**4, x)'),\n ('(u/4).dx', 'Derivative(u/4, x)'),\n ('((u.dx + v.dy).dx * v.dx).dy.dz',\n 'Derivative(Derivative(Derivative(Derivative(u, x) + Derivative(v, y), x) *' +\n ' Derivative(v, x), y), z)')\n ])\n def test_arithmetic(self, expr, expected):\n x, y, z = self.grid.dimensions\n u = Function(name='u', grid=self.grid, time_order=2, space_order=2) # noqa\n v = Function(name='v', grid=self.grid, time_order=2, space_order=2) # noqa\n expr = eval(expr)\n expected = eval(expected)\n assert expr == expected\n\n @pytest.mark.parametrize('expr, rules', [\n ('u.dx + u.dy', '{u.indices[0]: 1, u.indices[1]: 0}'),\n ('u.dxdy - u.dxdz', '{u.indices[0]: u.indices[0] + u.indices[0].spacing,' +\n 'u.indices[1]: 0, u.indices[2]: u.indices[1]}'),\n ('u.dx2dy + u.dz ', '{u.indices[0]: u.indices[0] + u.indices[0].spacing,' +\n 'u.indices[2]: u.indices[2] - 10}'),\n ])\n def test_derivative_eval_at(self, expr, rules):\n u = Function(name='u', grid=self.grid, time_order=2, space_order=2) # noqa\n expr = eval(expr)\n rules = eval(rules)\n assert expr.evaluate.xreplace(rules) == expr.xreplace(rules).evaluate\n\n @pytest.mark.parametrize('expr, rules', [\n ('u.dx', '{u.indices[0]: 1}'),\n ('u.dy', '{u.indices[1]: u.indices[2] - 7}'),\n ('u.dz', '{u.indices[2]: u.indices[0] + u.indices[1].spacing}'),\n ])\n def test_derivative_eval_at_expr(self, expr, rules):\n u = Function(name='u', grid=self.grid, time_order=2, space_order=2) # noqa\n expr = eval(expr)\n rules = eval(rules)\n assert expr.evaluate.xreplace(rules) == expr.xreplace(rules).evaluate\n assert expr.expr == expr.xreplace(rules).expr\n\n @pytest.mark.parametrize('expr, composite_rules', [\n ('u.dx', '[{u.indices[0]: 1}, {1: 4}]'),\n ])\n def test_derivative_eval_at_composite(self, expr, composite_rules):\n u = Function(name='u', grid=self.grid, time_order=2, space_order=2) # noqa\n expr = eval(expr)\n evaluated_expr = expr.evaluate\n composite_rules = eval(composite_rules)\n for mapper in composite_rules:\n evaluated_expr = evaluated_expr.xreplace(mapper)\n expr = expr.xreplace(mapper)\n assert evaluated_expr == expr.evaluate\n\n @pytest.mark.parametrize('SymbolType, derivative, dim', [\n (Function, 'dx2', 3), (Function, 'dy2', 3),\n (TimeFunction, 'dx2', 3), (TimeFunction, 'dy2', 3), (TimeFunction, 'dt', 2)\n ])\n def test_preformed_derivatives(self, SymbolType, derivative, dim):\n \"\"\"Test the stencil expressions provided by devito objects\"\"\"\n u = SymbolType(name='u', grid=self.grid, time_order=2, space_order=2)\n expr = getattr(u, derivative)\n assert(len(expr.evaluate.args) == dim)\n\n @pytest.mark.parametrize('derivative, dim', [\n ('dx', x), ('dy', y), ('dz', z)\n ])\n @pytest.mark.parametrize('order', [1, 2, 4, 6, 8, 10, 12, 14, 16])\n def test_derivatives_space(self, derivative, dim, order):\n \"\"\"Test first derivative expressions against native sympy\"\"\"\n dim = dim(self.grid)\n u = TimeFunction(name='u', grid=self.grid, time_order=2, space_order=order)\n expr = getattr(u, derivative).evaluate\n # Establish native sympy derivative expression\n width = int(order / 2)\n if order <= 2:\n indices = [dim, dim + dim.spacing]\n else:\n indices = [(dim + i * dim.spacing) for i in range(-width, width + 1)]\n\n s_expr = u.diff(dim).as_finite_difference(indices).evalf(_PRECISION)\n assert(simplify(expr - s_expr) == 0) # Symbolic equality\n assert type(expr) == EvalDiffDerivative\n expr1 = s_expr.func(*expr.args)\n assert(expr1 == s_expr) # Exact equality\n\n @pytest.mark.parametrize('derivative, dim', [\n ('dx2', x), ('dy2', y), ('dz2', z)\n ])\n @pytest.mark.parametrize('order', [2, 4, 6, 8, 10, 12, 14, 16])\n def test_second_derivatives_space(self, derivative, dim, order):\n \"\"\"\n Test second derivative expressions against native sympy.\n \"\"\"\n dim = dim(self.grid)\n u = TimeFunction(name='u', grid=self.grid, time_order=2, space_order=order)\n expr = getattr(u, derivative).evaluate\n # Establish native sympy derivative expression\n width = int(order / 2)\n indices = [(dim + i * dim.spacing) for i in range(-width, width + 1)]\n s_expr = u.diff(dim, dim).as_finite_difference(indices).evalf(_PRECISION)\n assert(simplify(expr - s_expr) == 0) # Symbolic equality\n assert type(expr) == EvalDiffDerivative\n expr1 = s_expr.func(*expr.args)\n assert(expr1 == s_expr) # Exact equality\n\n @pytest.mark.parametrize('space_order', [2, 4, 6, 8, 10, 12, 14, 16, 18, 20])\n # Only test x and t as y and z are the same as x\n @pytest.mark.parametrize('derivative', ['dx', 'dxl', 'dxr', 'dx2'])\n def test_fd_space(self, derivative, space_order):\n \"\"\"\n This test compares the discrete finite-difference scheme against polynomials\n For a given order p, the finite difference scheme should\n be exact for polynomials of order p.\n \"\"\"\n # dummy axis dimension\n nx = 100\n xx = np.linspace(-1, 1, nx)\n dx = xx[1] - xx[0]\n # Symbolic data\n grid = Grid(shape=(nx,), dtype=np.float32)\n x = grid.dimensions[0]\n u = Function(name=\"u\", grid=grid, space_order=space_order)\n du = Function(name=\"du\", grid=grid, space_order=space_order)\n # Define polynomial with exact fd\n coeffs = np.ones((space_order,), dtype=np.float32)\n polynome = sum([coeffs[i]*x**i for i in range(0, space_order)])\n polyvalues = np.array([polynome.subs(x, xi) for xi in xx], np.float32)\n # Fill original data with the polynomial values\n u.data[:] = polyvalues\n # True derivative of the polynome\n Dpolynome = diff(diff(polynome)) if derivative == 'dx2' else diff(polynome)\n Dpolyvalues = np.array([Dpolynome.subs(x, xi) for xi in xx], np.float32)\n # FD derivative, symbolic\n u_deriv = getattr(u, derivative)\n # Compute numerical FD\n stencil = Eq(du, u_deriv)\n op = Operator(stencil, subs={x.spacing: dx})\n op.apply()\n\n # Check exactness of the numerical derivative except inside space_brd\n space_border = space_order\n error = abs(du.data[space_border:-space_border] -\n Dpolyvalues[space_border:-space_border])\n assert np.isclose(np.mean(error), 0., atol=1e-3)\n\n @pytest.mark.parametrize('space_order', [2, 4, 6, 8, 10, 12, 14, 16, 18, 20])\n @pytest.mark.parametrize('stagger', [centered, left, right])\n # Only test x and t as y and z are the same as x\n def test_fd_space_staggered(self, space_order, stagger):\n \"\"\"\n This test compares the discrete finite-difference scheme against polynomials\n For a given order p, the finite difference scheme should\n be exact for polynomials of order p\n \"\"\"\n # dummy axis dimension\n nx = 101\n xx = np.linspace(-1, 1, nx)\n dx = xx[1] - xx[0]\n # Symbolic data\n grid = Grid(shape=(nx,), dtype=np.float32)\n x = grid.dimensions[0]\n\n # Location of the staggered function\n if stagger == left:\n off = -.5\n side = -x\n xx2 = xx + off * dx\n elif stagger == right:\n off = .5\n side = x\n xx2 = xx + off * dx\n else:\n side = NODE\n xx2 = xx\n\n u = Function(name=\"u\", grid=grid, space_order=space_order, staggered=side)\n du = Function(name=\"du\", grid=grid, space_order=space_order, staggered=side)\n # Define polynomial with exact fd\n coeffs = np.ones((space_order-1,), dtype=np.float32)\n polynome = sum([coeffs[i]*x**i for i in range(0, space_order-1)])\n polyvalues = np.array([polynome.subs(x, xi) for xi in xx2], np.float32)\n # Fill original data with the polynomial values\n u.data[:] = polyvalues\n # True derivative of the polynome\n Dpolynome = diff(polynome)\n Dpolyvalues = np.array([Dpolynome.subs(x, xi) for xi in xx2], np.float32)\n # Compute numerical FD\n stencil = Eq(du, u.dx)\n op = Operator(stencil, subs={x.spacing: dx})\n op.apply()\n\n # Check exactness of the numerical derivative except inside space_brd\n space_border = space_order\n error = abs(du.data[space_border:-space_border] -\n Dpolyvalues[space_border:-space_border])\n\n assert np.isclose(np.mean(error), 0., atol=1e-3)\n\n @pytest.mark.parametrize('so', [2, 4, 6, 8])\n def test_fd_new_order(self, so):\n grid = Grid((10,))\n u = Function(name=\"u\", grid=grid, space_order=so)\n u1 = Function(name=\"u\", grid=grid, space_order=so//2)\n u2 = Function(name=\"u\", grid=grid, space_order=2*so)\n assert str(u.dx(fd_order=so//2).evaluate) == str(u1.dx.evaluate)\n assert str(u.dx(fd_order=2*so).evaluate) == str(u2.dx.evaluate)\n\n def test_fd_new_side(self):\n grid = Grid((10,))\n u = Function(name=\"u\", grid=grid, space_order=4)\n assert u.dx(side=left).evaluate == u.dxl.evaluate\n assert u.dx(side=right).evaluate == u.dxr.evaluate\n assert u.dxl(side=centered).evaluate == u.dx.evaluate\n\n @pytest.mark.parametrize('so, expected', [\n (2, '1.0*u(x)/h_x - 1.0*u(x - 1.0*h_x)/h_x'),\n (4, '1.125*u(x)/h_x + 0.0416666667*u(x - 2.0*h_x)/h_x - '\n '1.125*u(x - 1.0*h_x)/h_x - 0.0416666667*u(x + 1.0*h_x)/h_x'),\n (6, '1.171875*u(x)/h_x - 0.0046875*u(x - 3.0*h_x)/h_x + '\n '0.0651041667*u(x - 2.0*h_x)/h_x - 1.171875*u(x - 1.0*h_x)/h_x - '\n '0.0651041667*u(x + 1.0*h_x)/h_x + 0.0046875*u(x + 2.0*h_x)/h_x'),\n (8, '1.19628906*u(x)/h_x + 0.000697544643*u(x - 4.0*h_x)/h_x - '\n '0.0095703125*u(x - 3.0*h_x)/h_x + 0.0797526042*u(x - 2.0*h_x)/h_x - '\n '1.19628906*u(x - 1.0*h_x)/h_x - 0.0797526042*u(x + 1.0*h_x)/h_x + '\n '0.0095703125*u(x + 2.0*h_x)/h_x - 0.000697544643*u(x + 3.0*h_x)/h_x')])\n def test_fd_new_x0(self, so, expected):\n grid = Grid((10,))\n x = grid.dimensions[0]\n u = Function(name=\"u\", grid=grid, space_order=so)\n assert u.dx(x0=x + x.spacing).evaluate == u.dx.evaluate.subs({x: x + x.spacing})\n assert u.dx(x0=x - x.spacing).evaluate == u.dx.evaluate.subs({x: x - x.spacing})\n # half shifted compare to explicit coeffs (Forneberg)\n assert str(u.dx(x0=x - .5 * x.spacing).evaluate) == expected\n\n def test_new_x0_eval_at(self):\n \"\"\"\n Make sure that explicitly set x0 does not get overwritten by eval_at.\n \"\"\"\n grid = Grid((10,))\n x = grid.dimensions[0]\n u = Function(name=\"u\", grid=grid, space_order=2)\n v = Function(name=\"v\", grid=grid, space_order=2)\n assert u.dx(x0=x - x.spacing/2)._eval_at(v).x0 == {x: x - x.spacing/2}\n\n def test_fd_new_lo(self):\n grid = Grid((10,))\n x = grid.dimensions[0]\n u = Function(name=\"u\", grid=grid, space_order=2)\n\n dplus = \"-1.0*u(x)/h_x + 1.0*u(x + 1.0*h_x)/h_x\"\n dminus = \"1.0*u(x)/h_x - 1.0*u(x - 1.0*h_x)/h_x\"\n assert str(u.dx(x0=x + .5 * x.spacing).evaluate) == dplus\n assert str(u.dx(x0=x - .5 * x.spacing).evaluate) == dminus\n assert str(u.dx(x0=x + .5 * x.spacing, fd_order=1).evaluate) == dplus\n assert str(u.dx(x0=x - .5 * x.spacing, fd_order=1).evaluate) == dminus\n\n def test_subsampled_fd(self):\n \"\"\"\n Test that the symbolic interface is working for space subsampled\n functions.\n \"\"\"\n nt = 19\n grid = Grid(shape=(12, 12), extent=(11, 11))\n\n u = TimeFunction(name='u', grid=grid, save=nt, space_order=2)\n assert(grid.time_dim in u.indices)\n\n # Creates subsampled spatial dimensions and according grid\n dims = tuple([ConditionalDimension(d.name+'sub', parent=d, factor=2)\n for d in u.grid.dimensions])\n grid2 = Grid((6, 6), dimensions=dims)\n u2 = TimeFunction(name='u2', grid=grid2, save=nt, space_order=1)\n for i in range(nt):\n for j in range(u2.data_with_halo.shape[2]):\n u2.data_with_halo[i, :, j] = np.arange(u2.data_with_halo.shape[2])\n\n eqns = [Eq(u.forward, u + 1.), Eq(u2.forward, u2.dx)]\n op = Operator(eqns)\n op.apply(time_M=nt-2)\n # Verify that u2[1, x,y]= du2/dx[0, x, y]\n\n assert np.allclose(u.data[-1], nt-1)\n assert np.allclose(u2.data[1], 0.5)\n\n @pytest.mark.parametrize('expr,expected', [\n ('f.dx', '-f(x)/h_x + f(x + h_x)/h_x'),\n ('f.dx + g.dx', '-f(x)/h_x + f(x + h_x)/h_x - g(x)/h_x + g(x + h_x)/h_x'),\n ('-f', '-f(x)'),\n ('-(f + g)', '-f(x) - g(x)')\n ])\n def test_shortcuts(self, expr, expected):\n grid = Grid(shape=(10,))\n f = Function(name='f', grid=grid) # noqa\n g = Function(name='g', grid=grid) # noqa\n\n expr = eval(expr)\n\n assert isinstance(expr, Differentiable)\n assert expected == str(expr.evaluate)\n\n @pytest.mark.parametrize('so', [2, 5, 8])\n def test_all_shortcuts(self, so):\n \"\"\"\n Test that verify that all fd shortcuts are functional.\n \"\"\"\n grid = Grid(shape=(10, 10, 10))\n f = Function(name='f', grid=grid, space_order=so)\n g = TimeFunction(name='g', grid=grid, space_order=so)\n\n for fd in f._fd:\n assert getattr(f, fd)\n\n for fd in g._fd:\n assert getattr(g, fd)\n\n @pytest.mark.parametrize('so', [2, 4, 8, 12])\n @pytest.mark.parametrize('ndim', [1, 2])\n @pytest.mark.parametrize('derivative, adjoint_name', [\n ('dx', 'dx'),\n ('dx2', 'dx2'),\n ('dxl', 'dxr'),\n ('dxr', 'dxl')])\n def test_fd_adjoint(self, so, ndim, derivative, adjoint_name):\n grid = Grid(shape=tuple([51]*ndim), extent=tuple([25]*ndim))\n x = grid.dimensions[0]\n f = Function(name='f', grid=grid, space_order=so)\n f_deriv = Function(name='f_deriv', grid=grid, space_order=so)\n g = Function(name='g', grid=grid, space_order=so)\n g_deriv = Function(name='g_deriv', grid=grid, space_order=so)\n\n # Fill f and g with smooth cos/sin\n Operator([Eq(g, x*cos(2*np.pi*x/5)), Eq(f, sin(2*np.pi*x/8))]).apply()\n # Check symbolic expression are expected ones for the adjoint .T\n deriv = getattr(f, derivative)\n coeff = 1 if derivative == 'dx2' else -1\n expected = coeff * getattr(f, derivative).evaluate.subs({x.spacing: -x.spacing})\n assert simplify(deriv.T.evaluate) == simplify(expected)\n\n # Compute numerical derivatives and verify dot test\n # i.e <f.dx, g> = <f, g.dx.T>\n\n eq_f = Eq(f_deriv, deriv)\n eq_g = Eq(g_deriv, getattr(g, derivative).T)\n\n op = Operator([eq_f, eq_g])\n op()\n\n a = np.dot(f_deriv.data.reshape(-1), g.data.reshape(-1))\n b = np.dot(g_deriv.data.reshape(-1), f.data.reshape(-1))\n assert np.isclose(1 - a/b, 0, atol=1e-5)\n\n @pytest.mark.parametrize('shift', [None, .5, -.5])\n @pytest.mark.parametrize('ndim', [2, 3])\n def test_shifted_div(self, shift, ndim):\n grid = Grid(tuple([11]*ndim))\n f = Function(name=\"f\", grid=grid, space_order=4)\n df = div(f, shift=shift).evaluate\n ref = 0\n for d in grid.dimensions:\n x0 = None if shift is None else d + shift * d.spacing\n ref += getattr(f, 'd%s' % d.name)(x0=x0)\n assert df == ref.evaluate\n\n @pytest.mark.parametrize('shift', [None, .5, -.5])\n @pytest.mark.parametrize('ndim', [2, 3])\n def test_shifted_div_of_vectorfunction(self, shift, ndim):\n grid = Grid(tuple([11]*ndim))\n f = Function(name=\"f\", grid=grid, space_order=4)\n df = div(grad(f), shift=shift).evaluate\n ref = 0\n for i, d in enumerate(grid.dimensions):\n x0 = None if shift is None else d + shift * d.spacing\n ref += getattr(grad(f)[i], 'd%s' % d.name)(x0=x0)\n assert df == ref.evaluate\n\n @pytest.mark.parametrize('shift', [None, .5, -.5])\n @pytest.mark.parametrize('ndim', [2, 3])\n def test_shifted_grad(self, shift, ndim):\n grid = Grid(tuple([11]*ndim))\n f = Function(name=\"f\", grid=grid, space_order=4)\n g = grad(f, shift=shift).evaluate\n for d, gi in zip(grid.dimensions, g):\n x0 = None if shift is None else d + shift * d.spacing\n assert gi == getattr(f, 'd%s' % d.name)(x0=x0).evaluate\n" ]
[ [ "numpy.isclose", "numpy.ones", "numpy.mean", "numpy.allclose", "numpy.arange", "numpy.linspace" ] ]
Leedk3/pvcnn
[ "8e3bddbc0719bdc262c5d438273eb2a54e45d9d4" ]
[ "data/kitti/example.py" ]
[ "''' Prepare KITTI data for 3D object detection.\n\nAuthor: Charles R. Qi\nDate: September 2017\n'''\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport numpy as np\nimport cv2\nfrom PIL import Image\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nROOT_DIR = os.path.dirname(BASE_DIR)\nsys.path.append(BASE_DIR)\nsys.path.append(os.path.join(ROOT_DIR, 'mayavi'))\nimport kitti_util as utils\nimport cPickle as pickle\nfrom kitti_object import *\nimport argparse\n\n\ndef in_hull(p, hull):\n from scipy.spatial import Delaunay\n if not isinstance(hull,Delaunay):\n hull = Delaunay(hull)\n return hull.find_simplex(p)>=0\n\ndef extract_pc_in_box3d(pc, box3d):\n ''' pc: (N,3), box3d: (8,3) '''\n box3d_roi_inds = in_hull(pc[:,0:3], box3d)\n return pc[box3d_roi_inds,:], box3d_roi_inds\n\ndef extract_pc_in_box2d(pc, box2d):\n ''' pc: (N,2), box2d: (xmin,ymin,xmax,ymax) '''\n box2d_corners = np.zeros((4,2))\n box2d_corners[0,:] = [box2d[0],box2d[1]] \n box2d_corners[1,:] = [box2d[2],box2d[1]] \n box2d_corners[2,:] = [box2d[2],box2d[3]] \n box2d_corners[3,:] = [box2d[0],box2d[3]] \n box2d_roi_inds = in_hull(pc[:,0:2], box2d_corners)\n return pc[box2d_roi_inds,:], box2d_roi_inds\n \ndef demo():\n import mayavi.mlab as mlab\n from viz_util import draw_lidar, draw_lidar_simple, draw_gt_boxes3d\n dataset = kitti_object(os.path.join(ROOT_DIR, 'dataset/KITTI/object'))\n data_idx = 0\n\n # Load data from dataset\n objects = dataset.get_label_objects(data_idx)\n objects[0].print_object()\n img = dataset.get_image(data_idx)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) \n img_height, img_width, img_channel = img.shape\n print(('Image shape: ', img.shape))\n pc_velo = dataset.get_lidar(data_idx)[:,0:3]\n calib = dataset.get_calibration(data_idx)\n\n ## Draw lidar in rect camera coord\n #print(' -------- LiDAR points in rect camera coordination --------')\n #pc_rect = calib.project_velo_to_rect(pc_velo)\n #fig = draw_lidar_simple(pc_rect)\n #raw_input()\n\n # Draw 2d and 3d boxes on image\n print(' -------- 2D/3D bounding boxes in images --------')\n show_image_with_boxes(img, objects, calib)\n raw_input()\n\n # Show all LiDAR points. Draw 3d box in LiDAR point cloud\n print(' -------- LiDAR points and 3D boxes in velodyne coordinate --------')\n #show_lidar_with_boxes(pc_velo, objects, calib)\n #raw_input()\n show_lidar_with_boxes(pc_velo, objects, calib, True, img_width, img_height)\n raw_input()\n\n # Visualize LiDAR points on images\n print(' -------- LiDAR points projected to image plane --------')\n show_lidar_on_image(pc_velo, img, calib, img_width, img_height) \n raw_input()\n \n # Show LiDAR points that are in the 3d box\n print(' -------- LiDAR points in a 3D bounding box --------')\n box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d(objects[0], calib.P) \n box3d_pts_3d_velo = calib.project_rect_to_velo(box3d_pts_3d)\n box3droi_pc_velo, _ = extract_pc_in_box3d(pc_velo, box3d_pts_3d_velo)\n print(('Number of points in 3d box: ', box3droi_pc_velo.shape[0]))\n\n fig = mlab.figure(figure=None, bgcolor=(0,0,0),\n fgcolor=None, engine=None, size=(1000, 500))\n draw_lidar(box3droi_pc_velo, fig=fig)\n draw_gt_boxes3d([box3d_pts_3d_velo], fig=fig)\n mlab.show(1)\n raw_input()\n \n # UVDepth Image and its backprojection to point clouds\n print(' -------- LiDAR points in a frustum from a 2D box --------')\n imgfov_pc_velo, pts_2d, fov_inds = get_lidar_in_image_fov(pc_velo,\n calib, 0, 0, img_width, img_height, True)\n imgfov_pts_2d = pts_2d[fov_inds,:]\n imgfov_pc_rect = calib.project_velo_to_rect(imgfov_pc_velo)\n\n cameraUVDepth = np.zeros_like(imgfov_pc_rect)\n cameraUVDepth[:,0:2] = imgfov_pts_2d\n cameraUVDepth[:,2] = imgfov_pc_rect[:,2]\n\n # Show that the points are exactly the same\n backprojected_pc_velo = calib.project_image_to_velo(cameraUVDepth)\n print(imgfov_pc_velo[0:20])\n print(backprojected_pc_velo[0:20])\n\n fig = mlab.figure(figure=None, bgcolor=(0,0,0),\n fgcolor=None, engine=None, size=(1000, 500))\n draw_lidar(backprojected_pc_velo, fig=fig)\n raw_input()\n\n # Only display those points that fall into 2d box\n print(' -------- LiDAR points in a frustum from a 2D box --------')\n xmin,ymin,xmax,ymax = \\\n objects[0].xmin, objects[0].ymin, objects[0].xmax, objects[0].ymax\n boxfov_pc_velo = \\\n get_lidar_in_image_fov(pc_velo, calib, xmin, ymin, xmax, ymax)\n print(('2d box FOV point num: ', boxfov_pc_velo.shape[0]))\n\n fig = mlab.figure(figure=None, bgcolor=(0,0,0),\n fgcolor=None, engine=None, size=(1000, 500))\n draw_lidar(boxfov_pc_velo, fig=fig)\n mlab.show(1)\n raw_input()\n\ndef random_shift_box2d(box2d, shift_ratio=0.1):\n ''' Randomly shift box center, randomly scale width and height \n '''\n r = shift_ratio\n xmin,ymin,xmax,ymax = box2d\n h = ymax-ymin\n w = xmax-xmin\n cx = (xmin+xmax)/2.0\n cy = (ymin+ymax)/2.0\n cx2 = cx + w*r*(np.random.random()*2-1)\n cy2 = cy + h*r*(np.random.random()*2-1)\n h2 = h*(1+np.random.random()*2*r-r) # 0.9 to 1.1\n w2 = w*(1+np.random.random()*2*r-r) # 0.9 to 1.1\n return np.array([cx2-w2/2.0, cy2-h2/2.0, cx2+w2/2.0, cy2+h2/2.0])\n \ndef extract_frustum_data(idx_filename, split, output_filename, viz=False,\n perturb_box2d=False, augmentX=1, type_whitelist=['Car']):\n ''' Extract point clouds and corresponding annotations in frustums\n defined generated from 2D bounding boxes\n Lidar points and 3d boxes are in *rect camera* coord system\n (as that in 3d box label files)\n \n Input:\n idx_filename: string, each line of the file is a sample ID\n split: string, either trianing or testing\n output_filename: string, the name for output .pickle file\n viz: bool, whether to visualize extracted data\n perturb_box2d: bool, whether to perturb the box2d\n (used for data augmentation in train set)\n augmentX: scalar, how many augmentations to have for each 2D box.\n type_whitelist: a list of strings, object types we are interested in.\n Output:\n None (will write a .pickle file to the disk)\n '''\n dataset = kitti_object(os.path.join(ROOT_DIR,'dataset/KITTI/object'), split)\n data_idx_list = [int(line.rstrip()) for line in open(idx_filename)]\n\n id_list = [] # int number\n box2d_list = [] # [xmin,ymin,xmax,ymax]\n box3d_list = [] # (8,3) array in rect camera coord\n input_list = [] # channel number = 4, xyz,intensity in rect camera coord\n label_list = [] # 1 for roi object, 0 for clutter\n type_list = [] # string e.g. Car\n heading_list = [] # ry (along y-axis in rect camera coord) radius of\n # (cont.) clockwise angle from positive x axis in velo coord.\n box3d_size_list = [] # array of l,w,h\n frustum_angle_list = [] # angle of 2d box center from pos x-axis\n\n pos_cnt = 0\n all_cnt = 0\n for data_idx in data_idx_list:\n print('------------- ', data_idx)\n calib = dataset.get_calibration(data_idx) # 3 by 4 matrix\n objects = dataset.get_label_objects(data_idx)\n pc_velo = dataset.get_lidar(data_idx)\n pc_rect = np.zeros_like(pc_velo)\n pc_rect[:,0:3] = calib.project_velo_to_rect(pc_velo[:,0:3])\n pc_rect[:,3] = pc_velo[:,3]\n img = dataset.get_image(data_idx)\n img_height, img_width, img_channel = img.shape\n _, pc_image_coord, img_fov_inds = get_lidar_in_image_fov(pc_velo[:,0:3],\n calib, 0, 0, img_width, img_height, True)\n\n for obj_idx in range(len(objects)):\n if objects[obj_idx].type not in type_whitelist :continue\n\n # 2D BOX: Get pts rect backprojected \n box2d = objects[obj_idx].box2d\n for _ in range(augmentX):\n # Augment data by box2d perturbation\n if perturb_box2d:\n xmin,ymin,xmax,ymax = random_shift_box2d(box2d)\n print(box2d)\n print(xmin,ymin,xmax,ymax)\n else:\n xmin,ymin,xmax,ymax = box2d\n box_fov_inds = (pc_image_coord[:,0]<xmax) & \\\n (pc_image_coord[:,0]>=xmin) & \\\n (pc_image_coord[:,1]<ymax) & \\\n (pc_image_coord[:,1]>=ymin)\n box_fov_inds = box_fov_inds & img_fov_inds\n pc_in_box_fov = pc_rect[box_fov_inds,:]\n # Get frustum angle (according to center pixel in 2D BOX)\n box2d_center = np.array([(xmin+xmax)/2.0, (ymin+ymax)/2.0])\n uvdepth = np.zeros((1,3))\n uvdepth[0,0:2] = box2d_center\n uvdepth[0,2] = 20 # some random depth\n box2d_center_rect = calib.project_image_to_rect(uvdepth)\n frustum_angle = -1 * np.arctan2(box2d_center_rect[0,2],\n box2d_center_rect[0,0])\n # 3D BOX: Get pts velo in 3d box\n obj = objects[obj_idx]\n box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d(obj, calib.P) \n _,inds = extract_pc_in_box3d(pc_in_box_fov, box3d_pts_3d)\n label = np.zeros((pc_in_box_fov.shape[0]))\n label[inds] = 1\n # Get 3D BOX heading\n heading_angle = obj.ry\n # Get 3D BOX size\n box3d_size = np.array([obj.l, obj.w, obj.h])\n\n # Reject too far away object or object without points\n if ymax-ymin<25 or np.sum(label)==0:\n continue\n\n id_list.append(data_idx)\n box2d_list.append(np.array([xmin,ymin,xmax,ymax]))\n box3d_list.append(box3d_pts_3d)\n input_list.append(pc_in_box_fov)\n label_list.append(label)\n type_list.append(objects[obj_idx].type)\n heading_list.append(heading_angle)\n box3d_size_list.append(box3d_size)\n frustum_angle_list.append(frustum_angle)\n \n # collect statistics\n pos_cnt += np.sum(label)\n all_cnt += pc_in_box_fov.shape[0]\n \n print('Average pos ratio: %f' % (pos_cnt/float(all_cnt)))\n print('Average npoints: %f' % (float(all_cnt)/len(id_list)))\n \n with open(output_filename,'wb') as fp:\n pickle.dump(id_list, fp)\n pickle.dump(box2d_list,fp)\n pickle.dump(box3d_list,fp)\n pickle.dump(input_list, fp)\n pickle.dump(label_list, fp)\n pickle.dump(type_list, fp)\n pickle.dump(heading_list, fp)\n pickle.dump(box3d_size_list, fp)\n pickle.dump(frustum_angle_list, fp)\n \n if viz:\n import mayavi.mlab as mlab\n for i in range(10):\n p1 = input_list[i]\n seg = label_list[i] \n fig = mlab.figure(figure=None, bgcolor=(0.4,0.4,0.4),\n fgcolor=None, engine=None, size=(500, 500))\n mlab.points3d(p1[:,0], p1[:,1], p1[:,2], seg, mode='point',\n colormap='gnuplot', scale_factor=1, figure=fig)\n fig = mlab.figure(figure=None, bgcolor=(0.4,0.4,0.4),\n fgcolor=None, engine=None, size=(500, 500))\n mlab.points3d(p1[:,2], -p1[:,0], -p1[:,1], seg, mode='point',\n colormap='gnuplot', scale_factor=1, figure=fig)\n raw_input()\n\ndef get_box3d_dim_statistics(idx_filename):\n ''' Collect and dump 3D bounding box statistics '''\n dataset = kitti_object(os.path.join(ROOT_DIR,'dataset/KITTI/object'))\n dimension_list = []\n type_list = []\n ry_list = []\n data_idx_list = [int(line.rstrip()) for line in open(idx_filename)]\n for data_idx in data_idx_list:\n print('------------- ', data_idx)\n calib = dataset.get_calibration(data_idx) # 3 by 4 matrix\n objects = dataset.get_label_objects(data_idx)\n for obj_idx in range(len(objects)):\n obj = objects[obj_idx]\n if obj.type=='DontCare':continue\n dimension_list.append(np.array([obj.l,obj.w,obj.h])) \n type_list.append(obj.type) \n ry_list.append(obj.ry)\n\n with open('box3d_dimensions.pickle','wb') as fp:\n pickle.dump(type_list, fp)\n pickle.dump(dimension_list, fp)\n pickle.dump(ry_list, fp)\n\ndef read_det_file(det_filename):\n ''' Parse lines in 2D detection output files '''\n det_id2str = {1:'Pedestrian', 2:'Car', 3:'Cyclist'}\n id_list = []\n type_list = []\n prob_list = []\n box2d_list = []\n for line in open(det_filename, 'r'):\n t = line.rstrip().split(\" \")\n id_list.append(int(os.path.basename(t[0]).rstrip('.png')))\n type_list.append(det_id2str[int(t[1])])\n prob_list.append(float(t[2]))\n box2d_list.append(np.array([float(t[i]) for i in range(3,7)]))\n return id_list, type_list, box2d_list, prob_list\n\n \ndef extract_frustum_data_rgb_detection(det_filename, split, output_filename,\n viz=False,\n type_whitelist=['Car'],\n img_height_threshold=25,\n lidar_point_threshold=5):\n ''' Extract point clouds in frustums extruded from 2D detection boxes.\n Update: Lidar points and 3d boxes are in *rect camera* coord system\n (as that in 3d box label files)\n \n Input:\n det_filename: string, each line is\n img_path typeid confidence xmin ymin xmax ymax\n split: string, either trianing or testing\n output_filename: string, the name for output .pickle file\n type_whitelist: a list of strings, object types we are interested in.\n img_height_threshold: int, neglect image with height lower than that.\n lidar_point_threshold: int, neglect frustum with too few points.\n Output:\n None (will write a .pickle file to the disk)\n '''\n dataset = kitti_object(os.path.join(ROOT_DIR, 'dataset/KITTI/object'), split)\n det_id_list, det_type_list, det_box2d_list, det_prob_list = \\\n read_det_file(det_filename)\n cache_id = -1\n cache = None\n \n id_list = []\n type_list = []\n box2d_list = []\n prob_list = []\n input_list = [] # channel number = 4, xyz,intensity in rect camera coord\n frustum_angle_list = [] # angle of 2d box center from pos x-axis\n\n for det_idx in range(len(det_id_list)):\n data_idx = det_id_list[det_idx]\n print('det idx: %d/%d, data idx: %d' % \\\n (det_idx, len(det_id_list), data_idx))\n if cache_id != data_idx:\n calib = dataset.get_calibration(data_idx) # 3 by 4 matrix\n pc_velo = dataset.get_lidar(data_idx)\n pc_rect = np.zeros_like(pc_velo)\n pc_rect[:,0:3] = calib.project_velo_to_rect(pc_velo[:,0:3])\n pc_rect[:,3] = pc_velo[:,3]\n img = dataset.get_image(data_idx)\n img_height, img_width, img_channel = img.shape\n _, pc_image_coord, img_fov_inds = get_lidar_in_image_fov(\\\n pc_velo[:,0:3], calib, 0, 0, img_width, img_height, True)\n cache = [calib,pc_rect,pc_image_coord,img_fov_inds]\n cache_id = data_idx\n else:\n calib,pc_rect,pc_image_coord,img_fov_inds = cache\n\n if det_type_list[det_idx] not in type_whitelist: continue\n\n # 2D BOX: Get pts rect backprojected \n xmin,ymin,xmax,ymax = det_box2d_list[det_idx]\n box_fov_inds = (pc_image_coord[:,0]<xmax) & \\\n (pc_image_coord[:,0]>=xmin) & \\\n (pc_image_coord[:,1]<ymax) & \\\n (pc_image_coord[:,1]>=ymin)\n box_fov_inds = box_fov_inds & img_fov_inds\n pc_in_box_fov = pc_rect[box_fov_inds,:]\n # Get frustum angle (according to center pixel in 2D BOX)\n box2d_center = np.array([(xmin+xmax)/2.0, (ymin+ymax)/2.0])\n uvdepth = np.zeros((1,3))\n uvdepth[0,0:2] = box2d_center\n uvdepth[0,2] = 20 # some random depth\n box2d_center_rect = calib.project_image_to_rect(uvdepth)\n frustum_angle = -1 * np.arctan2(box2d_center_rect[0,2],\n box2d_center_rect[0,0])\n \n # Pass objects that are too small\n if ymax-ymin<img_height_threshold or \\\n len(pc_in_box_fov)<lidar_point_threshold:\n continue\n \n id_list.append(data_idx)\n type_list.append(det_type_list[det_idx])\n box2d_list.append(det_box2d_list[det_idx])\n prob_list.append(det_prob_list[det_idx])\n input_list.append(pc_in_box_fov)\n frustum_angle_list.append(frustum_angle)\n \n with open(output_filename,'wb') as fp:\n pickle.dump(id_list, fp)\n pickle.dump(box2d_list,fp)\n pickle.dump(input_list, fp)\n pickle.dump(type_list, fp)\n pickle.dump(frustum_angle_list, fp)\n pickle.dump(prob_list, fp)\n \n if viz:\n import mayavi.mlab as mlab\n for i in range(10):\n p1 = input_list[i]\n fig = mlab.figure(figure=None, bgcolor=(0.4,0.4,0.4),\n fgcolor=None, engine=None, size=(500, 500))\n mlab.points3d(p1[:,0], p1[:,1], p1[:,2], p1[:,1], mode='point',\n colormap='gnuplot', scale_factor=1, figure=fig)\n fig = mlab.figure(figure=None, bgcolor=(0.4,0.4,0.4),\n fgcolor=None, engine=None, size=(500, 500))\n mlab.points3d(p1[:,2], -p1[:,0], -p1[:,1], seg, mode='point',\n colormap='gnuplot', scale_factor=1, figure=fig)\n raw_input()\n\ndef write_2d_rgb_detection(det_filename, split, result_dir):\n ''' Write 2D detection results for KITTI evaluation.\n Convert from Wei's format to KITTI format. \n \n Input:\n det_filename: string, each line is\n img_path typeid confidence xmin ymin xmax ymax\n split: string, either trianing or testing\n result_dir: string, folder path for results dumping\n Output:\n None (will write <xxx>.txt files to disk)\n\n Usage:\n write_2d_rgb_detection(\"val_det.txt\", \"training\", \"results\")\n '''\n dataset = kitti_object(os.path.join(ROOT_DIR, 'dataset/KITTI/object'), split)\n det_id_list, det_type_list, det_box2d_list, det_prob_list = \\\n read_det_file(det_filename)\n # map from idx to list of strings, each string is a line without \\n\n results = {} \n for i in range(len(det_id_list)):\n idx = det_id_list[i]\n typename = det_type_list[i]\n box2d = det_box2d_list[i]\n prob = det_prob_list[i]\n output_str = typename + \" -1 -1 -10 \"\n output_str += \"%f %f %f %f \" % (box2d[0],box2d[1],box2d[2],box2d[3])\n output_str += \"-1 -1 -1 -1000 -1000 -1000 -10 %f\" % (prob)\n if idx not in results: results[idx] = []\n results[idx].append(output_str)\n if not os.path.exists(result_dir): os.mkdir(result_dir)\n output_dir = os.path.join(result_dir, 'data')\n if not os.path.exists(output_dir): os.mkdir(output_dir)\n for idx in results:\n pred_filename = os.path.join(output_dir, '%06d.txt'%(idx))\n fout = open(pred_filename, 'w')\n for line in results[idx]:\n fout.write(line+'\\n')\n fout.close() \n\nif __name__=='__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--demo', action='store_true', help='Run demo.')\n parser.add_argument('--gen_train', action='store_true', help='Generate train split frustum data with perturbed GT 2D boxes')\n parser.add_argument('--gen_val', action='store_true', help='Generate val split frustum data with GT 2D boxes')\n parser.add_argument('--gen_val_rgb_detection', action='store_true', help='Generate val split frustum data with RGB detection 2D boxes')\n parser.add_argument('--car_only', action='store_true', help='Only generate cars; otherwise cars, peds and cycs')\n args = parser.parse_args()\n\n if args.demo:\n demo()\n exit()\n\n if args.car_only:\n type_whitelist = ['Car']\n output_prefix = 'frustum_caronly_'\n else:\n type_whitelist = ['Car', 'Pedestrian', 'Cyclist']\n output_prefix = 'frustum_carpedcyc_'\n\n if args.gen_train:\n extract_frustum_data(\\\n os.path.join(BASE_DIR, 'image_sets/train.txt'),\n 'training',\n os.path.join(BASE_DIR, output_prefix+'train.pickle'), \n viz=False, perturb_box2d=True, augmentX=5,\n type_whitelist=type_whitelist)\n\n if args.gen_val:\n extract_frustum_data(\\\n os.path.join(BASE_DIR, 'image_sets/val.txt'),\n 'training',\n os.path.join(BASE_DIR, output_prefix+'val.pickle'),\n viz=False, perturb_box2d=False, augmentX=1,\n type_whitelist=type_whitelist)\n\n if args.gen_val_rgb_detection:\n extract_frustum_data_rgb_detection(\\\n os.path.join(BASE_DIR, 'rgb_detections/rgb_detection_val.txt'),\n 'training',\n os.path.join(BASE_DIR, output_prefix+'val_rgb_detection.pickle'),\n viz=False,\n type_whitelist=type_whitelist) \n{\"mode\":\"full\",\"isActive\":false}" ]
[ [ "numpy.array", "numpy.zeros_like", "numpy.zeros", "numpy.sum", "numpy.arctan2", "numpy.random.random", "scipy.spatial.Delaunay" ] ]
vsuomi/fibroid-classification
[ "749e77af4dbd28b00184a9aa9e32b9d891493bd4" ]
[ "scale_features.py" ]
[ "# -*- coding: utf-8 -*-\n'''\nCreated on Thu May 31 11:38:48 2018\n\n@author:\n \n Visa Suomi\n Turku University Hospital\n May 2018\n \n@description:\n \n This function is used to scale features using different scaling types\n \n'''\n\n#%% import necessary packages\n\nimport numpy as np\nimport pandas as pd\n\n#%% define function\n\ndef scale_features(features, scaling):\n \n ''' Scales given features with standard deviation\n \n Args:\n features: pandas Dataframe of features\n scaling: type of scaling: linear ('linear'), logarithmic ('log') or\n z-score ('z-score')\n Returns:\n scaled_features: scaled features\n '''\n \n if scaling == 'linear':\n min_val = features.min()\n max_val = features.max()\n scale = (max_val - min_val) / 2.0\n a = (features - min_val)\n b = scale\n scaled_features = np.divide(a, b, out=np.zeros_like(a), where=b!=0) - 1.0 # NaN to zero - 1\n elif scaling == 'log':\n scaled_features = np.log(features + 1.0)\n elif scaling == 'z-score':\n a = (features - features.mean())\n b = features.std()\n scaled_features = np.divide(a, b, out=np.zeros_like(a), where=b!=0) # NaN to zero\n else:\n print('Unknown scaling type')\n scaled_features = features\n \n scaled_features = pd.DataFrame(scaled_features, columns = list(features), \n index = features.index, dtype = float)\n \n return scaled_features" ]
[ [ "numpy.zeros_like", "numpy.log" ] ]
MaiRajborirug/scikit-learn
[ "c18d015372f7041099d19c215cd4c36ffd6fe5c5", "c18d015372f7041099d19c215cd4c36ffd6fe5c5", "c18d015372f7041099d19c215cd4c36ffd6fe5c5", "c18d015372f7041099d19c215cd4c36ffd6fe5c5", "c18d015372f7041099d19c215cd4c36ffd6fe5c5", "c18d015372f7041099d19c215cd4c36ffd6fe5c5", "c18d015372f7041099d19c215cd4c36ffd6fe5c5", "c18d015372f7041099d19c215cd4c36ffd6fe5c5", "c18d015372f7041099d19c215cd4c36ffd6fe5c5", "c18d015372f7041099d19c215cd4c36ffd6fe5c5" ]
[ "sklearn/tests/test_config.py", "sklearn/decomposition/_base.py", "examples/multioutput/plot_classifier_chain_yeast.py", "sklearn/metrics/cluster/tests/test_unsupervised.py", "sklearn/manifold/_spectral_embedding.py", "sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py", "sklearn/random_projection.py", "sklearn/discriminant_analysis.py", "sklearn/datasets/_olivetti_faces.py", "examples/ensemble/plot_adaboost_multiclass.py" ]
[ "import time\nfrom concurrent.futures import ThreadPoolExecutor\n\nfrom joblib import Parallel\nimport joblib\nimport pytest\n\nfrom sklearn import get_config, set_config, config_context\nfrom sklearn.utils.fixes import delayed\nfrom sklearn.utils.fixes import parse_version\n\n\ndef test_config_context():\n assert get_config() == {\n \"assume_finite\": False,\n \"working_memory\": 1024,\n \"print_changed_only\": True,\n \"display\": \"text\",\n }\n\n # Not using as a context manager affects nothing\n config_context(assume_finite=True)\n assert get_config()[\"assume_finite\"] is False\n\n with config_context(assume_finite=True):\n assert get_config() == {\n \"assume_finite\": True,\n \"working_memory\": 1024,\n \"print_changed_only\": True,\n \"display\": \"text\",\n }\n assert get_config()[\"assume_finite\"] is False\n\n with config_context(assume_finite=True):\n with config_context(assume_finite=None):\n assert get_config()[\"assume_finite\"] is True\n\n assert get_config()[\"assume_finite\"] is True\n\n with config_context(assume_finite=False):\n assert get_config()[\"assume_finite\"] is False\n\n with config_context(assume_finite=None):\n assert get_config()[\"assume_finite\"] is False\n\n # global setting will not be retained outside of context that\n # did not modify this setting\n set_config(assume_finite=True)\n assert get_config()[\"assume_finite\"] is True\n\n assert get_config()[\"assume_finite\"] is False\n\n assert get_config()[\"assume_finite\"] is True\n\n assert get_config() == {\n \"assume_finite\": False,\n \"working_memory\": 1024,\n \"print_changed_only\": True,\n \"display\": \"text\",\n }\n\n # No positional arguments\n with pytest.raises(TypeError):\n config_context(True)\n\n # No unknown arguments\n with pytest.raises(TypeError):\n config_context(do_something_else=True).__enter__()\n\n\ndef test_config_context_exception():\n assert get_config()[\"assume_finite\"] is False\n try:\n with config_context(assume_finite=True):\n assert get_config()[\"assume_finite\"] is True\n raise ValueError()\n except ValueError:\n pass\n assert get_config()[\"assume_finite\"] is False\n\n\ndef test_set_config():\n assert get_config()[\"assume_finite\"] is False\n set_config(assume_finite=None)\n assert get_config()[\"assume_finite\"] is False\n set_config(assume_finite=True)\n assert get_config()[\"assume_finite\"] is True\n set_config(assume_finite=None)\n assert get_config()[\"assume_finite\"] is True\n set_config(assume_finite=False)\n assert get_config()[\"assume_finite\"] is False\n\n # No unknown arguments\n with pytest.raises(TypeError):\n set_config(do_something_else=True)\n\n\ndef set_assume_finite(assume_finite, sleep_duration):\n \"\"\"Return the value of assume_finite after waiting `sleep_duration`.\"\"\"\n with config_context(assume_finite=assume_finite):\n time.sleep(sleep_duration)\n return get_config()[\"assume_finite\"]\n\n\[email protected](\"backend\", [\"loky\", \"multiprocessing\", \"threading\"])\ndef test_config_threadsafe_joblib(backend):\n \"\"\"Test that the global config is threadsafe with all joblib backends.\n Two jobs are spawned and sets assume_finite to two different values.\n When the job with a duration 0.1s completes, the assume_finite value\n should be the same as the value passed to the function. In other words,\n it is not influenced by the other job setting assume_finite to True.\n \"\"\"\n\n if parse_version(joblib.__version__) < parse_version(\"0.12\") and backend == \"loky\":\n pytest.skip(\"loky backend does not exist in joblib <0.12\") # noqa\n\n assume_finites = [False, True]\n sleep_durations = [0.1, 0.2]\n\n items = Parallel(backend=backend, n_jobs=2)(\n delayed(set_assume_finite)(assume_finite, sleep_dur)\n for assume_finite, sleep_dur in zip(assume_finites, sleep_durations)\n )\n\n assert items == [False, True]\n\n\ndef test_config_threadsafe():\n \"\"\"Uses threads directly to test that the global config does not change\n between threads. Same test as `test_config_threadsafe_joblib` but with\n `ThreadPoolExecutor`.\"\"\"\n\n assume_finites = [False, True]\n sleep_durations = [0.1, 0.2]\n\n with ThreadPoolExecutor(max_workers=2) as e:\n items = [\n output\n for output in e.map(set_assume_finite, assume_finites, sleep_durations)\n ]\n\n assert items == [False, True]\n", "\"\"\"Principal Component Analysis Base Classes\"\"\"\n\n# Author: Alexandre Gramfort <[email protected]>\n# Olivier Grisel <[email protected]>\n# Mathieu Blondel <[email protected]>\n# Denis A. Engemann <[email protected]>\n# Kyle Kastner <[email protected]>\n#\n# License: BSD 3 clause\n\nimport numpy as np\nfrom scipy import linalg\n\nfrom ..base import BaseEstimator, TransformerMixin, _ClassNamePrefixFeaturesOutMixin\nfrom ..utils.validation import check_is_fitted\nfrom abc import ABCMeta, abstractmethod\n\n\nclass _BasePCA(\n _ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator, metaclass=ABCMeta\n):\n \"\"\"Base class for PCA methods.\n\n Warning: This class should not be used directly.\n Use derived classes instead.\n \"\"\"\n\n def get_covariance(self):\n \"\"\"Compute data covariance with the generative model.\n\n ``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``\n where S**2 contains the explained variances, and sigma2 contains the\n noise variances.\n\n Returns\n -------\n cov : array of shape=(n_features, n_features)\n Estimated covariance of data.\n \"\"\"\n components_ = self.components_\n exp_var = self.explained_variance_\n if self.whiten:\n components_ = components_ * np.sqrt(exp_var[:, np.newaxis])\n exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.0)\n cov = np.dot(components_.T * exp_var_diff, components_)\n cov.flat[:: len(cov) + 1] += self.noise_variance_ # modify diag inplace\n return cov\n\n def get_precision(self):\n \"\"\"Compute data precision matrix with the generative model.\n\n Equals the inverse of the covariance but computed with\n the matrix inversion lemma for efficiency.\n\n Returns\n -------\n precision : array, shape=(n_features, n_features)\n Estimated precision of data.\n \"\"\"\n n_features = self.components_.shape[1]\n\n # handle corner cases first\n if self.n_components_ == 0:\n return np.eye(n_features) / self.noise_variance_\n if self.n_components_ == n_features:\n return linalg.inv(self.get_covariance())\n\n # Get precision using matrix inversion lemma\n components_ = self.components_\n exp_var = self.explained_variance_\n if self.whiten:\n components_ = components_ * np.sqrt(exp_var[:, np.newaxis])\n exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.0)\n precision = np.dot(components_, components_.T) / self.noise_variance_\n precision.flat[:: len(precision) + 1] += 1.0 / exp_var_diff\n precision = np.dot(components_.T, np.dot(linalg.inv(precision), components_))\n precision /= -(self.noise_variance_ ** 2)\n precision.flat[:: len(precision) + 1] += 1.0 / self.noise_variance_\n return precision\n\n @abstractmethod\n def fit(self, X, y=None):\n \"\"\"Placeholder for fit. Subclasses should implement this method!\n\n Fit the model with X.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data, where `n_samples` is the number of samples and\n `n_features` is the number of features.\n\n Returns\n -------\n self : object\n Returns the instance itself.\n \"\"\"\n\n def transform(self, X):\n \"\"\"Apply dimensionality reduction to X.\n\n X is projected on the first principal components previously extracted\n from a training set.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n New data, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n Returns\n -------\n X_new : array-like of shape (n_samples, n_components)\n Projection of X in the first principal components, where `n_samples`\n is the number of samples and `n_components` is the number of the components.\n \"\"\"\n check_is_fitted(self)\n\n X = self._validate_data(X, dtype=[np.float64, np.float32], reset=False)\n if self.mean_ is not None:\n X = X - self.mean_\n X_transformed = np.dot(X, self.components_.T)\n if self.whiten:\n X_transformed /= np.sqrt(self.explained_variance_)\n return X_transformed\n\n def inverse_transform(self, X):\n \"\"\"Transform data back to its original space.\n\n In other words, return an input `X_original` whose transform would be X.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_components)\n New data, where `n_samples` is the number of samples\n and `n_components` is the number of components.\n\n Returns\n -------\n X_original array-like of shape (n_samples, n_features)\n Original data, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n Notes\n -----\n If whitening is enabled, inverse_transform will compute the\n exact inverse operation, which includes reversing whitening.\n \"\"\"\n if self.whiten:\n return (\n np.dot(\n X,\n np.sqrt(self.explained_variance_[:, np.newaxis]) * self.components_,\n )\n + self.mean_\n )\n else:\n return np.dot(X, self.components_) + self.mean_\n\n @property\n def _n_features_out(self):\n \"\"\"Number of transformed output features.\"\"\"\n return self.components_.shape[0]\n", "\"\"\"\n============================\nClassifier Chain\n============================\nExample of using classifier chain on a multilabel dataset.\n\nFor this example we will use the `yeast\n<https://www.openml.org/d/40597>`_ dataset which contains\n2417 datapoints each with 103 features and 14 possible labels. Each\ndata point has at least one label. As a baseline we first train a logistic\nregression classifier for each of the 14 labels. To evaluate the performance of\nthese classifiers we predict on a held-out test set and calculate the\n:ref:`jaccard score <jaccard_similarity_score>` for each sample.\n\nNext we create 10 classifier chains. Each classifier chain contains a\nlogistic regression model for each of the 14 labels. The models in each\nchain are ordered randomly. In addition to the 103 features in the dataset,\neach model gets the predictions of the preceding models in the chain as\nfeatures (note that by default at training time each model gets the true\nlabels as features). These additional features allow each chain to exploit\ncorrelations among the classes. The Jaccard similarity score for each chain\ntends to be greater than that of the set independent logistic models.\n\nBecause the models in each chain are arranged randomly there is significant\nvariation in performance among the chains. Presumably there is an optimal\nordering of the classes in a chain that will yield the best performance.\nHowever we do not know that ordering a priori. Instead we can construct an\nvoting ensemble of classifier chains by averaging the binary predictions of\nthe chains and apply a threshold of 0.5. The Jaccard similarity score of the\nensemble is greater than that of the independent models and tends to exceed\nthe score of each chain in the ensemble (although this is not guaranteed\nwith randomly ordered chains).\n\n\"\"\"\n\n# Author: Adam Kleczewski\n# License: BSD 3 clause\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.datasets import fetch_openml\nfrom sklearn.multioutput import ClassifierChain\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom sklearn.metrics import jaccard_score\nfrom sklearn.linear_model import LogisticRegression\n\n# Load a multi-label dataset from https://www.openml.org/d/40597\nX, Y = fetch_openml(\"yeast\", version=4, return_X_y=True)\nY = Y == \"TRUE\"\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=0)\n\n# Fit an independent logistic regression model for each class using the\n# OneVsRestClassifier wrapper.\nbase_lr = LogisticRegression()\novr = OneVsRestClassifier(base_lr)\novr.fit(X_train, Y_train)\nY_pred_ovr = ovr.predict(X_test)\novr_jaccard_score = jaccard_score(Y_test, Y_pred_ovr, average=\"samples\")\n\n# Fit an ensemble of logistic regression classifier chains and take the\n# take the average prediction of all the chains.\nchains = [ClassifierChain(base_lr, order=\"random\", random_state=i) for i in range(10)]\nfor chain in chains:\n chain.fit(X_train, Y_train)\n\nY_pred_chains = np.array([chain.predict(X_test) for chain in chains])\nchain_jaccard_scores = [\n jaccard_score(Y_test, Y_pred_chain >= 0.5, average=\"samples\")\n for Y_pred_chain in Y_pred_chains\n]\n\nY_pred_ensemble = Y_pred_chains.mean(axis=0)\nensemble_jaccard_score = jaccard_score(\n Y_test, Y_pred_ensemble >= 0.5, average=\"samples\"\n)\n\nmodel_scores = [ovr_jaccard_score] + chain_jaccard_scores\nmodel_scores.append(ensemble_jaccard_score)\n\nmodel_names = (\n \"Independent\",\n \"Chain 1\",\n \"Chain 2\",\n \"Chain 3\",\n \"Chain 4\",\n \"Chain 5\",\n \"Chain 6\",\n \"Chain 7\",\n \"Chain 8\",\n \"Chain 9\",\n \"Chain 10\",\n \"Ensemble\",\n)\n\nx_pos = np.arange(len(model_names))\n\n# Plot the Jaccard similarity scores for the independent model, each of the\n# chains, and the ensemble (note that the vertical axis on this plot does\n# not begin at 0).\n\nfig, ax = plt.subplots(figsize=(7, 4))\nax.grid(True)\nax.set_title(\"Classifier Chain Ensemble Performance Comparison\")\nax.set_xticks(x_pos)\nax.set_xticklabels(model_names, rotation=\"vertical\")\nax.set_ylabel(\"Jaccard Similarity Score\")\nax.set_ylim([min(model_scores) * 0.9, max(model_scores) * 1.1])\ncolors = [\"r\"] + [\"b\"] * len(chain_jaccard_scores) + [\"g\"]\nax.bar(x_pos, model_scores, alpha=0.5, color=colors)\nplt.tight_layout()\nplt.show()\n", "import numpy as np\nimport scipy.sparse as sp\nimport pytest\nfrom scipy.sparse import csr_matrix\n\nfrom sklearn import datasets\nfrom sklearn.utils._testing import assert_array_equal\nfrom sklearn.metrics.cluster import silhouette_score\nfrom sklearn.metrics.cluster import silhouette_samples\nfrom sklearn.metrics import pairwise_distances\nfrom sklearn.metrics.cluster import calinski_harabasz_score\nfrom sklearn.metrics.cluster import davies_bouldin_score\n\n\ndef test_silhouette():\n # Tests the Silhouette Coefficient.\n dataset = datasets.load_iris()\n X_dense = dataset.data\n X_csr = csr_matrix(X_dense)\n X_dok = sp.dok_matrix(X_dense)\n X_lil = sp.lil_matrix(X_dense)\n y = dataset.target\n\n for X in [X_dense, X_csr, X_dok, X_lil]:\n D = pairwise_distances(X, metric=\"euclidean\")\n # Given that the actual labels are used, we can assume that S would be\n # positive.\n score_precomputed = silhouette_score(D, y, metric=\"precomputed\")\n assert score_precomputed > 0\n # Test without calculating D\n score_euclidean = silhouette_score(X, y, metric=\"euclidean\")\n pytest.approx(score_precomputed, score_euclidean)\n\n if X is X_dense:\n score_dense_without_sampling = score_precomputed\n else:\n pytest.approx(score_euclidean, score_dense_without_sampling)\n\n # Test with sampling\n score_precomputed = silhouette_score(\n D, y, metric=\"precomputed\", sample_size=int(X.shape[0] / 2), random_state=0\n )\n score_euclidean = silhouette_score(\n X, y, metric=\"euclidean\", sample_size=int(X.shape[0] / 2), random_state=0\n )\n assert score_precomputed > 0\n assert score_euclidean > 0\n pytest.approx(score_euclidean, score_precomputed)\n\n if X is X_dense:\n score_dense_with_sampling = score_precomputed\n else:\n pytest.approx(score_euclidean, score_dense_with_sampling)\n\n\ndef test_cluster_size_1():\n # Assert Silhouette Coefficient == 0 when there is 1 sample in a cluster\n # (cluster 0). We also test the case where there are identical samples\n # as the only members of a cluster (cluster 2). To our knowledge, this case\n # is not discussed in reference material, and we choose for it a sample\n # score of 1.\n X = [[0.0], [1.0], [1.0], [2.0], [3.0], [3.0]]\n labels = np.array([0, 1, 1, 1, 2, 2])\n\n # Cluster 0: 1 sample -> score of 0 by Rousseeuw's convention\n # Cluster 1: intra-cluster = [.5, .5, 1]\n # inter-cluster = [1, 1, 1]\n # silhouette = [.5, .5, 0]\n # Cluster 2: intra-cluster = [0, 0]\n # inter-cluster = [arbitrary, arbitrary]\n # silhouette = [1., 1.]\n\n silhouette = silhouette_score(X, labels)\n assert not np.isnan(silhouette)\n ss = silhouette_samples(X, labels)\n assert_array_equal(ss, [0, 0.5, 0.5, 0, 1, 1])\n\n\ndef test_silhouette_paper_example():\n # Explicitly check per-sample results against Rousseeuw (1987)\n # Data from Table 1\n lower = [\n 5.58,\n 7.00,\n 6.50,\n 7.08,\n 7.00,\n 3.83,\n 4.83,\n 5.08,\n 8.17,\n 5.83,\n 2.17,\n 5.75,\n 6.67,\n 6.92,\n 4.92,\n 6.42,\n 5.00,\n 5.58,\n 6.00,\n 4.67,\n 6.42,\n 3.42,\n 5.50,\n 6.42,\n 6.42,\n 5.00,\n 3.92,\n 6.17,\n 2.50,\n 4.92,\n 6.25,\n 7.33,\n 4.50,\n 2.25,\n 6.33,\n 2.75,\n 6.08,\n 6.67,\n 4.25,\n 2.67,\n 6.00,\n 6.17,\n 6.17,\n 6.92,\n 6.17,\n 5.25,\n 6.83,\n 4.50,\n 3.75,\n 5.75,\n 5.42,\n 6.08,\n 5.83,\n 6.67,\n 3.67,\n 4.75,\n 3.00,\n 6.08,\n 6.67,\n 5.00,\n 5.58,\n 4.83,\n 6.17,\n 5.67,\n 6.50,\n 6.92,\n ]\n D = np.zeros((12, 12))\n D[np.tril_indices(12, -1)] = lower\n D += D.T\n\n names = [\n \"BEL\",\n \"BRA\",\n \"CHI\",\n \"CUB\",\n \"EGY\",\n \"FRA\",\n \"IND\",\n \"ISR\",\n \"USA\",\n \"USS\",\n \"YUG\",\n \"ZAI\",\n ]\n\n # Data from Figure 2\n labels1 = [1, 1, 2, 2, 1, 1, 2, 1, 1, 2, 2, 1]\n expected1 = {\n \"USA\": 0.43,\n \"BEL\": 0.39,\n \"FRA\": 0.35,\n \"ISR\": 0.30,\n \"BRA\": 0.22,\n \"EGY\": 0.20,\n \"ZAI\": 0.19,\n \"CUB\": 0.40,\n \"USS\": 0.34,\n \"CHI\": 0.33,\n \"YUG\": 0.26,\n \"IND\": -0.04,\n }\n score1 = 0.28\n\n # Data from Figure 3\n labels2 = [1, 2, 3, 3, 1, 1, 2, 1, 1, 3, 3, 2]\n expected2 = {\n \"USA\": 0.47,\n \"FRA\": 0.44,\n \"BEL\": 0.42,\n \"ISR\": 0.37,\n \"EGY\": 0.02,\n \"ZAI\": 0.28,\n \"BRA\": 0.25,\n \"IND\": 0.17,\n \"CUB\": 0.48,\n \"USS\": 0.44,\n \"YUG\": 0.31,\n \"CHI\": 0.31,\n }\n score2 = 0.33\n\n for labels, expected, score in [\n (labels1, expected1, score1),\n (labels2, expected2, score2),\n ]:\n expected = [expected[name] for name in names]\n # we check to 2dp because that's what's in the paper\n pytest.approx(\n expected,\n silhouette_samples(D, np.array(labels), metric=\"precomputed\"),\n abs=1e-2,\n )\n pytest.approx(\n score, silhouette_score(D, np.array(labels), metric=\"precomputed\"), abs=1e-2\n )\n\n\ndef test_correct_labelsize():\n # Assert 1 < n_labels < n_samples\n dataset = datasets.load_iris()\n X = dataset.data\n\n # n_labels = n_samples\n y = np.arange(X.shape[0])\n err_msg = (\n r\"Number of labels is %d\\. Valid values are 2 \"\n r\"to n_samples - 1 \\(inclusive\\)\" % len(np.unique(y))\n )\n with pytest.raises(ValueError, match=err_msg):\n silhouette_score(X, y)\n\n # n_labels = 1\n y = np.zeros(X.shape[0])\n err_msg = (\n r\"Number of labels is %d\\. Valid values are 2 \"\n r\"to n_samples - 1 \\(inclusive\\)\" % len(np.unique(y))\n )\n with pytest.raises(ValueError, match=err_msg):\n silhouette_score(X, y)\n\n\ndef test_non_encoded_labels():\n dataset = datasets.load_iris()\n X = dataset.data\n labels = dataset.target\n assert silhouette_score(X, labels * 2 + 10) == silhouette_score(X, labels)\n assert_array_equal(\n silhouette_samples(X, labels * 2 + 10), silhouette_samples(X, labels)\n )\n\n\ndef test_non_numpy_labels():\n dataset = datasets.load_iris()\n X = dataset.data\n y = dataset.target\n assert silhouette_score(list(X), list(y)) == silhouette_score(X, y)\n\n\[email protected](\"dtype\", (np.float32, np.float64))\ndef test_silhouette_nonzero_diag(dtype):\n # Make sure silhouette_samples requires diagonal to be zero.\n # Non-regression test for #12178\n\n # Construct a zero-diagonal matrix\n dists = pairwise_distances(\n np.array([[0.2, 0.1, 0.12, 1.34, 1.11, 1.6]], dtype=dtype).T\n )\n labels = [0, 0, 0, 1, 1, 1]\n\n # small values on the diagonal are OK\n dists[2][2] = np.finfo(dists.dtype).eps * 10\n silhouette_samples(dists, labels, metric=\"precomputed\")\n\n # values bigger than eps * 100 are not\n dists[2][2] = np.finfo(dists.dtype).eps * 1000\n with pytest.raises(ValueError, match=\"contains non-zero\"):\n silhouette_samples(dists, labels, metric=\"precomputed\")\n\n\ndef assert_raises_on_only_one_label(func):\n \"\"\"Assert message when there is only one label\"\"\"\n rng = np.random.RandomState(seed=0)\n with pytest.raises(ValueError, match=\"Number of labels is\"):\n func(rng.rand(10, 2), np.zeros(10))\n\n\ndef assert_raises_on_all_points_same_cluster(func):\n \"\"\"Assert message when all point are in different clusters\"\"\"\n rng = np.random.RandomState(seed=0)\n with pytest.raises(ValueError, match=\"Number of labels is\"):\n func(rng.rand(10, 2), np.arange(10))\n\n\ndef test_calinski_harabasz_score():\n assert_raises_on_only_one_label(calinski_harabasz_score)\n\n assert_raises_on_all_points_same_cluster(calinski_harabasz_score)\n\n # Assert the value is 1. when all samples are equals\n assert 1.0 == calinski_harabasz_score(np.ones((10, 2)), [0] * 5 + [1] * 5)\n\n # Assert the value is 0. when all the mean cluster are equal\n assert 0.0 == calinski_harabasz_score([[-1, -1], [1, 1]] * 10, [0] * 10 + [1] * 10)\n\n # General case (with non numpy arrays)\n X = (\n [[0, 0], [1, 1]] * 5\n + [[3, 3], [4, 4]] * 5\n + [[0, 4], [1, 3]] * 5\n + [[3, 1], [4, 0]] * 5\n )\n labels = [0] * 10 + [1] * 10 + [2] * 10 + [3] * 10\n pytest.approx(calinski_harabasz_score(X, labels), 45 * (40 - 4) / (5 * (4 - 1)))\n\n\ndef test_davies_bouldin_score():\n assert_raises_on_only_one_label(davies_bouldin_score)\n assert_raises_on_all_points_same_cluster(davies_bouldin_score)\n\n # Assert the value is 0. when all samples are equals\n assert davies_bouldin_score(np.ones((10, 2)), [0] * 5 + [1] * 5) == pytest.approx(\n 0.0\n )\n\n # Assert the value is 0. when all the mean cluster are equal\n assert davies_bouldin_score(\n [[-1, -1], [1, 1]] * 10, [0] * 10 + [1] * 10\n ) == pytest.approx(0.0)\n\n # General case (with non numpy arrays)\n X = (\n [[0, 0], [1, 1]] * 5\n + [[3, 3], [4, 4]] * 5\n + [[0, 4], [1, 3]] * 5\n + [[3, 1], [4, 0]] * 5\n )\n labels = [0] * 10 + [1] * 10 + [2] * 10 + [3] * 10\n pytest.approx(davies_bouldin_score(X, labels), 2 * np.sqrt(0.5) / 3)\n\n # Ensure divide by zero warning is not raised in general case\n with pytest.warns(None) as record:\n davies_bouldin_score(X, labels)\n div_zero_warnings = [\n warning\n for warning in record\n if \"divide by zero encountered\" in warning.message.args[0]\n ]\n assert len(div_zero_warnings) == 0\n\n # General case - cluster have one sample\n X = [[0, 0], [2, 2], [3, 3], [5, 5]]\n labels = [0, 0, 1, 2]\n pytest.approx(davies_bouldin_score(X, labels), (5.0 / 4) / 3)\n", "\"\"\"Spectral Embedding.\"\"\"\n\n# Author: Gael Varoquaux <[email protected]>\n# Wei LI <[email protected]>\n# License: BSD 3 clause\n\n\nimport warnings\n\nimport numpy as np\nfrom scipy import sparse\nfrom scipy.linalg import eigh\nfrom scipy.sparse.linalg import eigsh\nfrom scipy.sparse.csgraph import connected_components\nfrom scipy.sparse.csgraph import laplacian as csgraph_laplacian\n\nfrom ..base import BaseEstimator\nfrom ..utils import (\n check_array,\n check_random_state,\n check_symmetric,\n)\nfrom ..utils._arpack import _init_arpack_v0\nfrom ..utils.extmath import _deterministic_vector_sign_flip\nfrom ..utils.fixes import lobpcg\nfrom ..metrics.pairwise import rbf_kernel\nfrom ..neighbors import kneighbors_graph, NearestNeighbors\nfrom ..utils.deprecation import deprecated\n\n\ndef _graph_connected_component(graph, node_id):\n \"\"\"Find the largest graph connected components that contains one\n given node.\n\n Parameters\n ----------\n graph : array-like of shape (n_samples, n_samples)\n Adjacency matrix of the graph, non-zero weight means an edge\n between the nodes.\n\n node_id : int\n The index of the query node of the graph.\n\n Returns\n -------\n connected_components_matrix : array-like of shape (n_samples,)\n An array of bool value indicating the indexes of the nodes\n belonging to the largest connected components of the given query\n node.\n \"\"\"\n n_node = graph.shape[0]\n if sparse.issparse(graph):\n # speed up row-wise access to boolean connection mask\n graph = graph.tocsr()\n connected_nodes = np.zeros(n_node, dtype=bool)\n nodes_to_explore = np.zeros(n_node, dtype=bool)\n nodes_to_explore[node_id] = True\n for _ in range(n_node):\n last_num_component = connected_nodes.sum()\n np.logical_or(connected_nodes, nodes_to_explore, out=connected_nodes)\n if last_num_component >= connected_nodes.sum():\n break\n indices = np.where(nodes_to_explore)[0]\n nodes_to_explore.fill(False)\n for i in indices:\n if sparse.issparse(graph):\n neighbors = graph[i].toarray().ravel()\n else:\n neighbors = graph[i]\n np.logical_or(nodes_to_explore, neighbors, out=nodes_to_explore)\n return connected_nodes\n\n\ndef _graph_is_connected(graph):\n \"\"\"Return whether the graph is connected (True) or Not (False).\n\n Parameters\n ----------\n graph : {array-like, sparse matrix} of shape (n_samples, n_samples)\n Adjacency matrix of the graph, non-zero weight means an edge\n between the nodes.\n\n Returns\n -------\n is_connected : bool\n True means the graph is fully connected and False means not.\n \"\"\"\n if sparse.isspmatrix(graph):\n # sparse graph, find all the connected components\n n_connected_components, _ = connected_components(graph)\n return n_connected_components == 1\n else:\n # dense graph, find all connected components start from node 0\n return _graph_connected_component(graph, 0).sum() == graph.shape[0]\n\n\ndef _set_diag(laplacian, value, norm_laplacian):\n \"\"\"Set the diagonal of the laplacian matrix and convert it to a\n sparse format well suited for eigenvalue decomposition.\n\n Parameters\n ----------\n laplacian : {ndarray, sparse matrix}\n The graph laplacian.\n\n value : float\n The value of the diagonal.\n\n norm_laplacian : bool\n Whether the value of the diagonal should be changed or not.\n\n Returns\n -------\n laplacian : {array, sparse matrix}\n An array of matrix in a form that is well suited to fast\n eigenvalue decomposition, depending on the band width of the\n matrix.\n \"\"\"\n n_nodes = laplacian.shape[0]\n # We need all entries in the diagonal to values\n if not sparse.isspmatrix(laplacian):\n if norm_laplacian:\n laplacian.flat[:: n_nodes + 1] = value\n else:\n laplacian = laplacian.tocoo()\n if norm_laplacian:\n diag_idx = laplacian.row == laplacian.col\n laplacian.data[diag_idx] = value\n # If the matrix has a small number of diagonals (as in the\n # case of structured matrices coming from images), the\n # dia format might be best suited for matvec products:\n n_diags = np.unique(laplacian.row - laplacian.col).size\n if n_diags <= 7:\n # 3 or less outer diagonals on each side\n laplacian = laplacian.todia()\n else:\n # csr has the fastest matvec and is thus best suited to\n # arpack\n laplacian = laplacian.tocsr()\n return laplacian\n\n\ndef spectral_embedding(\n adjacency,\n *,\n n_components=8,\n eigen_solver=None,\n random_state=None,\n eigen_tol=0.0,\n norm_laplacian=True,\n drop_first=True,\n):\n \"\"\"Project the sample on the first eigenvectors of the graph Laplacian.\n\n The adjacency matrix is used to compute a normalized graph Laplacian\n whose spectrum (especially the eigenvectors associated to the\n smallest eigenvalues) has an interpretation in terms of minimal\n number of cuts necessary to split the graph into comparably sized\n components.\n\n This embedding can also 'work' even if the ``adjacency`` variable is\n not strictly the adjacency matrix of a graph but more generally\n an affinity or similarity matrix between samples (for instance the\n heat kernel of a euclidean distance matrix or a k-NN matrix).\n\n However care must taken to always make the affinity matrix symmetric\n so that the eigenvector decomposition works as expected.\n\n Note : Laplacian Eigenmaps is the actual algorithm implemented here.\n\n Read more in the :ref:`User Guide <spectral_embedding>`.\n\n Parameters\n ----------\n adjacency : {array-like, sparse graph} of shape (n_samples, n_samples)\n The adjacency matrix of the graph to embed.\n\n n_components : int, default=8\n The dimension of the projection subspace.\n\n eigen_solver : {'arpack', 'lobpcg', 'amg'}, default=None\n The eigenvalue decomposition strategy to use. AMG requires pyamg\n to be installed. It can be faster on very large, sparse problems,\n but may also lead to instabilities. If None, then ``'arpack'`` is\n used.\n\n random_state : int, RandomState instance or None, default=None\n A pseudo random number generator used for the initialization\n of the lobpcg eigen vectors decomposition when `eigen_solver ==\n 'amg'`, and for the K-Means initialization. Use an int to make\n the results deterministic across calls (See\n :term:`Glossary <random_state>`).\n\n .. note::\n When using `eigen_solver == 'amg'`,\n it is necessary to also fix the global numpy seed with\n `np.random.seed(int)` to get deterministic results. See\n https://github.com/pyamg/pyamg/issues/139 for further\n information.\n\n eigen_tol : float, default=0.0\n Stopping criterion for eigendecomposition of the Laplacian matrix\n when using arpack eigen_solver.\n\n norm_laplacian : bool, default=True\n If True, then compute symmetric normalized Laplacian.\n\n drop_first : bool, default=True\n Whether to drop the first eigenvector. For spectral embedding, this\n should be True as the first eigenvector should be constant vector for\n connected graph, but for spectral clustering, this should be kept as\n False to retain the first eigenvector.\n\n Returns\n -------\n embedding : ndarray of shape (n_samples, n_components)\n The reduced samples.\n\n Notes\n -----\n Spectral Embedding (Laplacian Eigenmaps) is most useful when the graph\n has one connected component. If there graph has many components, the first\n few eigenvectors will simply uncover the connected components of the graph.\n\n References\n ----------\n * https://en.wikipedia.org/wiki/LOBPCG\n\n * Toward the Optimal Preconditioned Eigensolver: Locally Optimal\n Block Preconditioned Conjugate Gradient Method\n Andrew V. Knyazev\n https://doi.org/10.1137%2FS1064827500366124\n \"\"\"\n adjacency = check_symmetric(adjacency)\n\n try:\n from pyamg import smoothed_aggregation_solver\n except ImportError as e:\n if eigen_solver == \"amg\":\n raise ValueError(\n \"The eigen_solver was set to 'amg', but pyamg is not available.\"\n ) from e\n\n if eigen_solver is None:\n eigen_solver = \"arpack\"\n elif eigen_solver not in (\"arpack\", \"lobpcg\", \"amg\"):\n raise ValueError(\n \"Unknown value for eigen_solver: '%s'.\"\n \"Should be 'amg', 'arpack', or 'lobpcg'\" % eigen_solver\n )\n\n random_state = check_random_state(random_state)\n\n n_nodes = adjacency.shape[0]\n # Whether to drop the first eigenvector\n if drop_first:\n n_components = n_components + 1\n\n if not _graph_is_connected(adjacency):\n warnings.warn(\n \"Graph is not fully connected, spectral embedding may not work as expected.\"\n )\n\n laplacian, dd = csgraph_laplacian(\n adjacency, normed=norm_laplacian, return_diag=True\n )\n if (\n eigen_solver == \"arpack\"\n or eigen_solver != \"lobpcg\"\n and (not sparse.isspmatrix(laplacian) or n_nodes < 5 * n_components)\n ):\n # lobpcg used with eigen_solver='amg' has bugs for low number of nodes\n # for details see the source code in scipy:\n # https://github.com/scipy/scipy/blob/v0.11.0/scipy/sparse/linalg/eigen\n # /lobpcg/lobpcg.py#L237\n # or matlab:\n # https://www.mathworks.com/matlabcentral/fileexchange/48-lobpcg-m\n laplacian = _set_diag(laplacian, 1, norm_laplacian)\n\n # Here we'll use shift-invert mode for fast eigenvalues\n # (see https://docs.scipy.org/doc/scipy/reference/tutorial/arpack.html\n # for a short explanation of what this means)\n # Because the normalized Laplacian has eigenvalues between 0 and 2,\n # I - L has eigenvalues between -1 and 1. ARPACK is most efficient\n # when finding eigenvalues of largest magnitude (keyword which='LM')\n # and when these eigenvalues are very large compared to the rest.\n # For very large, very sparse graphs, I - L can have many, many\n # eigenvalues very near 1.0. This leads to slow convergence. So\n # instead, we'll use ARPACK's shift-invert mode, asking for the\n # eigenvalues near 1.0. This effectively spreads-out the spectrum\n # near 1.0 and leads to much faster convergence: potentially an\n # orders-of-magnitude speedup over simply using keyword which='LA'\n # in standard mode.\n try:\n # We are computing the opposite of the laplacian inplace so as\n # to spare a memory allocation of a possibly very large array\n laplacian *= -1\n v0 = _init_arpack_v0(laplacian.shape[0], random_state)\n _, diffusion_map = eigsh(\n laplacian, k=n_components, sigma=1.0, which=\"LM\", tol=eigen_tol, v0=v0\n )\n embedding = diffusion_map.T[n_components::-1]\n if norm_laplacian:\n # recover u = D^-1/2 x from the eigenvector output x\n embedding = embedding / dd\n except RuntimeError:\n # When submatrices are exactly singular, an LU decomposition\n # in arpack fails. We fallback to lobpcg\n eigen_solver = \"lobpcg\"\n # Revert the laplacian to its opposite to have lobpcg work\n laplacian *= -1\n\n elif eigen_solver == \"amg\":\n # Use AMG to get a preconditioner and speed up the eigenvalue\n # problem.\n if not sparse.issparse(laplacian):\n warnings.warn(\"AMG works better for sparse matrices\")\n laplacian = check_array(\n laplacian, dtype=[np.float64, np.float32], accept_sparse=True\n )\n laplacian = _set_diag(laplacian, 1, norm_laplacian)\n\n # The Laplacian matrix is always singular, having at least one zero\n # eigenvalue, corresponding to the trivial eigenvector, which is a\n # constant. Using a singular matrix for preconditioning may result in\n # random failures in LOBPCG and is not supported by the existing\n # theory:\n # see https://doi.org/10.1007/s10208-015-9297-1\n # Shift the Laplacian so its diagononal is not all ones. The shift\n # does change the eigenpairs however, so we'll feed the shifted\n # matrix to the solver and afterward set it back to the original.\n diag_shift = 1e-5 * sparse.eye(laplacian.shape[0])\n laplacian += diag_shift\n ml = smoothed_aggregation_solver(check_array(laplacian, accept_sparse=\"csr\"))\n laplacian -= diag_shift\n\n M = ml.aspreconditioner()\n # Create initial approximation X to eigenvectors\n X = random_state.rand(laplacian.shape[0], n_components + 1)\n X[:, 0] = dd.ravel()\n X = X.astype(laplacian.dtype)\n _, diffusion_map = lobpcg(laplacian, X, M=M, tol=1.0e-5, largest=False)\n embedding = diffusion_map.T\n if norm_laplacian:\n # recover u = D^-1/2 x from the eigenvector output x\n embedding = embedding / dd\n if embedding.shape[0] == 1:\n raise ValueError\n\n if eigen_solver == \"lobpcg\":\n laplacian = check_array(\n laplacian, dtype=[np.float64, np.float32], accept_sparse=True\n )\n if n_nodes < 5 * n_components + 1:\n # see note above under arpack why lobpcg has problems with small\n # number of nodes\n # lobpcg will fallback to eigh, so we short circuit it\n if sparse.isspmatrix(laplacian):\n laplacian = laplacian.toarray()\n _, diffusion_map = eigh(laplacian, check_finite=False)\n embedding = diffusion_map.T[:n_components]\n if norm_laplacian:\n # recover u = D^-1/2 x from the eigenvector output x\n embedding = embedding / dd\n else:\n laplacian = _set_diag(laplacian, 1, norm_laplacian)\n # We increase the number of eigenvectors requested, as lobpcg\n # doesn't behave well in low dimension and create initial\n # approximation X to eigenvectors\n X = random_state.rand(laplacian.shape[0], n_components + 1)\n X[:, 0] = dd.ravel()\n X = X.astype(laplacian.dtype)\n _, diffusion_map = lobpcg(\n laplacian, X, tol=1e-5, largest=False, maxiter=2000\n )\n embedding = diffusion_map.T[:n_components]\n if norm_laplacian:\n # recover u = D^-1/2 x from the eigenvector output x\n embedding = embedding / dd\n if embedding.shape[0] == 1:\n raise ValueError\n\n embedding = _deterministic_vector_sign_flip(embedding)\n if drop_first:\n return embedding[1:n_components].T\n else:\n return embedding[:n_components].T\n\n\nclass SpectralEmbedding(BaseEstimator):\n \"\"\"Spectral embedding for non-linear dimensionality reduction.\n\n Forms an affinity matrix given by the specified function and\n applies spectral decomposition to the corresponding graph laplacian.\n The resulting transformation is given by the value of the\n eigenvectors for each data point.\n\n Note : Laplacian Eigenmaps is the actual algorithm implemented here.\n\n Read more in the :ref:`User Guide <spectral_embedding>`.\n\n Parameters\n ----------\n n_components : int, default=2\n The dimension of the projected subspace.\n\n affinity : {'nearest_neighbors', 'rbf', 'precomputed', \\\n 'precomputed_nearest_neighbors'} or callable, \\\n default='nearest_neighbors'\n How to construct the affinity matrix.\n - 'nearest_neighbors' : construct the affinity matrix by computing a\n graph of nearest neighbors.\n - 'rbf' : construct the affinity matrix by computing a radial basis\n function (RBF) kernel.\n - 'precomputed' : interpret ``X`` as a precomputed affinity matrix.\n - 'precomputed_nearest_neighbors' : interpret ``X`` as a sparse graph\n of precomputed nearest neighbors, and constructs the affinity matrix\n by selecting the ``n_neighbors`` nearest neighbors.\n - callable : use passed in function as affinity\n the function takes in data matrix (n_samples, n_features)\n and return affinity matrix (n_samples, n_samples).\n\n gamma : float, default=None\n Kernel coefficient for rbf kernel. If None, gamma will be set to\n 1/n_features.\n\n random_state : int, RandomState instance or None, default=None\n A pseudo random number generator used for the initialization\n of the lobpcg eigen vectors decomposition when `eigen_solver ==\n 'amg'`, and for the K-Means initialization. Use an int to make\n the results deterministic across calls (See\n :term:`Glossary <random_state>`).\n\n .. note::\n When using `eigen_solver == 'amg'`,\n it is necessary to also fix the global numpy seed with\n `np.random.seed(int)` to get deterministic results. See\n https://github.com/pyamg/pyamg/issues/139 for further\n information.\n\n eigen_solver : {'arpack', 'lobpcg', 'amg'}, default=None\n The eigenvalue decomposition strategy to use. AMG requires pyamg\n to be installed. It can be faster on very large, sparse problems.\n If None, then ``'arpack'`` is used.\n\n n_neighbors : int, default=None\n Number of nearest neighbors for nearest_neighbors graph building.\n If None, n_neighbors will be set to max(n_samples/10, 1).\n\n n_jobs : int, default=None\n The number of parallel jobs to run.\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\n for more details.\n\n Attributes\n ----------\n embedding_ : ndarray of shape (n_samples, n_components)\n Spectral embedding of the training matrix.\n\n affinity_matrix_ : ndarray of shape (n_samples, n_samples)\n Affinity_matrix constructed from samples or precomputed.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\n Names of features seen during :term:`fit`. Defined only when `X`\n has feature names that are all strings.\n\n .. versionadded:: 1.0\n\n n_neighbors_ : int\n Number of nearest neighbors effectively used.\n\n See Also\n --------\n Isomap : Non-linear dimensionality reduction through Isometric Mapping.\n\n References\n ----------\n\n - A Tutorial on Spectral Clustering, 2007\n Ulrike von Luxburg\n http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323\n\n - On Spectral Clustering: Analysis and an algorithm, 2001\n Andrew Y. Ng, Michael I. Jordan, Yair Weiss\n http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.19.8100\n\n - Normalized cuts and image segmentation, 2000\n Jianbo Shi, Jitendra Malik\n http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324\n\n Examples\n --------\n >>> from sklearn.datasets import load_digits\n >>> from sklearn.manifold import SpectralEmbedding\n >>> X, _ = load_digits(return_X_y=True)\n >>> X.shape\n (1797, 64)\n >>> embedding = SpectralEmbedding(n_components=2)\n >>> X_transformed = embedding.fit_transform(X[:100])\n >>> X_transformed.shape\n (100, 2)\n \"\"\"\n\n def __init__(\n self,\n n_components=2,\n *,\n affinity=\"nearest_neighbors\",\n gamma=None,\n random_state=None,\n eigen_solver=None,\n n_neighbors=None,\n n_jobs=None,\n ):\n self.n_components = n_components\n self.affinity = affinity\n self.gamma = gamma\n self.random_state = random_state\n self.eigen_solver = eigen_solver\n self.n_neighbors = n_neighbors\n self.n_jobs = n_jobs\n\n def _more_tags(self):\n return {\n \"pairwise\": self.affinity\n in [\"precomputed\", \"precomputed_nearest_neighbors\"]\n }\n\n # TODO: Remove in 1.1\n # mypy error: Decorated property not supported\n @deprecated( # type: ignore\n \"Attribute `_pairwise` was deprecated in \"\n \"version 0.24 and will be removed in 1.1 (renaming of 0.26).\"\n )\n @property\n def _pairwise(self):\n return self.affinity in [\"precomputed\", \"precomputed_nearest_neighbors\"]\n\n def _get_affinity_matrix(self, X, Y=None):\n \"\"\"Calculate the affinity matrix from data\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training vector, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n If affinity is \"precomputed\"\n X : array-like of shape (n_samples, n_samples),\n Interpret X as precomputed adjacency graph computed from\n samples.\n\n Y: Ignored\n\n Returns\n -------\n affinity_matrix of shape (n_samples, n_samples)\n \"\"\"\n if self.affinity == \"precomputed\":\n self.affinity_matrix_ = X\n return self.affinity_matrix_\n if self.affinity == \"precomputed_nearest_neighbors\":\n estimator = NearestNeighbors(\n n_neighbors=self.n_neighbors, n_jobs=self.n_jobs, metric=\"precomputed\"\n ).fit(X)\n connectivity = estimator.kneighbors_graph(X=X, mode=\"connectivity\")\n self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)\n return self.affinity_matrix_\n if self.affinity == \"nearest_neighbors\":\n if sparse.issparse(X):\n warnings.warn(\n \"Nearest neighbors affinity currently does \"\n \"not support sparse input, falling back to \"\n \"rbf affinity\"\n )\n self.affinity = \"rbf\"\n else:\n self.n_neighbors_ = (\n self.n_neighbors\n if self.n_neighbors is not None\n else max(int(X.shape[0] / 10), 1)\n )\n self.affinity_matrix_ = kneighbors_graph(\n X, self.n_neighbors_, include_self=True, n_jobs=self.n_jobs\n )\n # currently only symmetric affinity_matrix supported\n self.affinity_matrix_ = 0.5 * (\n self.affinity_matrix_ + self.affinity_matrix_.T\n )\n return self.affinity_matrix_\n if self.affinity == \"rbf\":\n self.gamma_ = self.gamma if self.gamma is not None else 1.0 / X.shape[1]\n self.affinity_matrix_ = rbf_kernel(X, gamma=self.gamma_)\n return self.affinity_matrix_\n self.affinity_matrix_ = self.affinity(X)\n return self.affinity_matrix_\n\n def fit(self, X, y=None):\n \"\"\"Fit the model from data in X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training vector, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n If affinity is \"precomputed\"\n X : {array-like, sparse matrix}, shape (n_samples, n_samples),\n Interpret X as precomputed adjacency graph computed from\n samples.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n self : object\n Returns the instance itself.\n \"\"\"\n\n X = self._validate_data(X, accept_sparse=\"csr\", ensure_min_samples=2)\n\n random_state = check_random_state(self.random_state)\n if isinstance(self.affinity, str):\n if self.affinity not in {\n \"nearest_neighbors\",\n \"rbf\",\n \"precomputed\",\n \"precomputed_nearest_neighbors\",\n }:\n raise ValueError(\n \"%s is not a valid affinity. Expected \"\n \"'precomputed', 'rbf', 'nearest_neighbors' \"\n \"or a callable.\"\n % self.affinity\n )\n elif not callable(self.affinity):\n raise ValueError(\n \"'affinity' is expected to be an affinity name or a callable. Got: %s\"\n % self.affinity\n )\n\n affinity_matrix = self._get_affinity_matrix(X)\n self.embedding_ = spectral_embedding(\n affinity_matrix,\n n_components=self.n_components,\n eigen_solver=self.eigen_solver,\n random_state=random_state,\n )\n return self\n\n def fit_transform(self, X, y=None):\n \"\"\"Fit the model from data in X and transform X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training vector, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n If affinity is \"precomputed\"\n X : {array-like, sparse matrix} of shape (n_samples, n_samples),\n Interpret X as precomputed adjacency graph computed from\n samples.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n X_new : array-like of shape (n_samples, n_components)\n Spectral embedding of the training matrix.\n \"\"\"\n self.fit(X)\n return self.embedding_\n", "import numpy as np\nimport pytest\nfrom numpy.testing import assert_allclose, assert_array_equal\nfrom sklearn.datasets import make_classification, make_regression\nfrom sklearn.datasets import make_low_rank_matrix\nfrom sklearn.preprocessing import KBinsDiscretizer, MinMaxScaler, OneHotEncoder\nfrom sklearn.model_selection import train_test_split, cross_val_score\nfrom sklearn.base import clone, BaseEstimator, TransformerMixin\nfrom sklearn.base import is_regressor\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.metrics import mean_poisson_deviance\nfrom sklearn.dummy import DummyRegressor\nfrom sklearn.exceptions import NotFittedError\nfrom sklearn.compose import make_column_transformer\n\nfrom sklearn.ensemble import HistGradientBoostingRegressor\nfrom sklearn.ensemble import HistGradientBoostingClassifier\nfrom sklearn.ensemble._hist_gradient_boosting.loss import _LOSSES\nfrom sklearn.ensemble._hist_gradient_boosting.loss import LeastSquares\nfrom sklearn.ensemble._hist_gradient_boosting.loss import BinaryCrossEntropy\nfrom sklearn.ensemble._hist_gradient_boosting.grower import TreeGrower\nfrom sklearn.ensemble._hist_gradient_boosting.binning import _BinMapper\nfrom sklearn.utils import shuffle\nfrom sklearn.utils._openmp_helpers import _openmp_effective_n_threads\n\nn_threads = _openmp_effective_n_threads()\n\n\nX_classification, y_classification = make_classification(random_state=0)\nX_regression, y_regression = make_regression(random_state=0)\nX_multi_classification, y_multi_classification = make_classification(\n n_classes=3, n_informative=3, random_state=0\n)\n\n\ndef _make_dumb_dataset(n_samples):\n \"\"\"Make a dumb dataset to test early stopping.\"\"\"\n rng = np.random.RandomState(42)\n X_dumb = rng.randn(n_samples, 1)\n y_dumb = (X_dumb[:, 0] > 0).astype(\"int64\")\n return X_dumb, y_dumb\n\n\[email protected](\n \"GradientBoosting, X, y\",\n [\n (HistGradientBoostingClassifier, X_classification, y_classification),\n (HistGradientBoostingRegressor, X_regression, y_regression),\n ],\n)\[email protected](\n \"params, err_msg\",\n [\n ({\"loss\": \"blah\"}, \"Loss blah is not supported for\"),\n ({\"learning_rate\": 0}, \"learning_rate=0 must be strictly positive\"),\n ({\"learning_rate\": -1}, \"learning_rate=-1 must be strictly positive\"),\n ({\"max_iter\": 0}, \"max_iter=0 must not be smaller than 1\"),\n ({\"max_leaf_nodes\": 0}, \"max_leaf_nodes=0 should not be smaller than 2\"),\n ({\"max_leaf_nodes\": 1}, \"max_leaf_nodes=1 should not be smaller than 2\"),\n ({\"max_depth\": 0}, \"max_depth=0 should not be smaller than 1\"),\n ({\"min_samples_leaf\": 0}, \"min_samples_leaf=0 should not be smaller\"),\n ({\"l2_regularization\": -1}, \"l2_regularization=-1 must be positive\"),\n ({\"max_bins\": 1}, \"max_bins=1 should be no smaller than 2 and no larger\"),\n ({\"max_bins\": 256}, \"max_bins=256 should be no smaller than 2 and no\"),\n ({\"n_iter_no_change\": -1}, \"n_iter_no_change=-1 must be positive\"),\n ({\"validation_fraction\": -1}, \"validation_fraction=-1 must be strictly\"),\n ({\"validation_fraction\": 0}, \"validation_fraction=0 must be strictly\"),\n ({\"tol\": -1}, \"tol=-1 must not be smaller than 0\"),\n ],\n)\ndef test_init_parameters_validation(GradientBoosting, X, y, params, err_msg):\n\n with pytest.raises(ValueError, match=err_msg):\n GradientBoosting(**params).fit(X, y)\n\n\ndef test_invalid_classification_loss():\n binary_clf = HistGradientBoostingClassifier(loss=\"binary_crossentropy\")\n err_msg = (\n \"loss='binary_crossentropy' is not defined for multiclass \"\n \"classification with n_classes=3, use \"\n \"loss='categorical_crossentropy' instead\"\n )\n with pytest.raises(ValueError, match=err_msg):\n binary_clf.fit(np.zeros(shape=(3, 2)), np.arange(3))\n\n\[email protected](\n \"scoring, validation_fraction, early_stopping, n_iter_no_change, tol\",\n [\n (\"neg_mean_squared_error\", 0.1, True, 5, 1e-7), # use scorer\n (\"neg_mean_squared_error\", None, True, 5, 1e-1), # use scorer on train\n (None, 0.1, True, 5, 1e-7), # same with default scorer\n (None, None, True, 5, 1e-1),\n (\"loss\", 0.1, True, 5, 1e-7), # use loss\n (\"loss\", None, True, 5, 1e-1), # use loss on training data\n (None, None, False, 5, 0.0), # no early stopping\n ],\n)\ndef test_early_stopping_regression(\n scoring, validation_fraction, early_stopping, n_iter_no_change, tol\n):\n\n max_iter = 200\n\n X, y = make_regression(n_samples=50, random_state=0)\n\n gb = HistGradientBoostingRegressor(\n verbose=1, # just for coverage\n min_samples_leaf=5, # easier to overfit fast\n scoring=scoring,\n tol=tol,\n early_stopping=early_stopping,\n validation_fraction=validation_fraction,\n max_iter=max_iter,\n n_iter_no_change=n_iter_no_change,\n random_state=0,\n )\n gb.fit(X, y)\n\n if early_stopping:\n assert n_iter_no_change <= gb.n_iter_ < max_iter\n else:\n assert gb.n_iter_ == max_iter\n\n\[email protected](\n \"data\",\n (\n make_classification(n_samples=30, random_state=0),\n make_classification(\n n_samples=30, n_classes=3, n_clusters_per_class=1, random_state=0\n ),\n ),\n)\[email protected](\n \"scoring, validation_fraction, early_stopping, n_iter_no_change, tol\",\n [\n (\"accuracy\", 0.1, True, 5, 1e-7), # use scorer\n (\"accuracy\", None, True, 5, 1e-1), # use scorer on training data\n (None, 0.1, True, 5, 1e-7), # same with default scorer\n (None, None, True, 5, 1e-1),\n (\"loss\", 0.1, True, 5, 1e-7), # use loss\n (\"loss\", None, True, 5, 1e-1), # use loss on training data\n (None, None, False, 5, 0.0), # no early stopping\n ],\n)\ndef test_early_stopping_classification(\n data, scoring, validation_fraction, early_stopping, n_iter_no_change, tol\n):\n\n max_iter = 50\n\n X, y = data\n\n gb = HistGradientBoostingClassifier(\n verbose=1, # just for coverage\n min_samples_leaf=5, # easier to overfit fast\n scoring=scoring,\n tol=tol,\n early_stopping=early_stopping,\n validation_fraction=validation_fraction,\n max_iter=max_iter,\n n_iter_no_change=n_iter_no_change,\n random_state=0,\n )\n gb.fit(X, y)\n\n if early_stopping is True:\n assert n_iter_no_change <= gb.n_iter_ < max_iter\n else:\n assert gb.n_iter_ == max_iter\n\n\[email protected](\n \"GradientBoosting, X, y\",\n [\n (HistGradientBoostingClassifier, *_make_dumb_dataset(10000)),\n (HistGradientBoostingClassifier, *_make_dumb_dataset(10001)),\n (HistGradientBoostingRegressor, *_make_dumb_dataset(10000)),\n (HistGradientBoostingRegressor, *_make_dumb_dataset(10001)),\n ],\n)\ndef test_early_stopping_default(GradientBoosting, X, y):\n # Test that early stopping is enabled by default if and only if there\n # are more than 10000 samples\n gb = GradientBoosting(max_iter=10, n_iter_no_change=2, tol=1e-1)\n gb.fit(X, y)\n if X.shape[0] > 10000:\n assert gb.n_iter_ < gb.max_iter\n else:\n assert gb.n_iter_ == gb.max_iter\n\n\[email protected](\n \"scores, n_iter_no_change, tol, stopping\",\n [\n ([], 1, 0.001, False), # not enough iterations\n ([1, 1, 1], 5, 0.001, False), # not enough iterations\n ([1, 1, 1, 1, 1], 5, 0.001, False), # not enough iterations\n ([1, 2, 3, 4, 5, 6], 5, 0.001, False), # significant improvement\n ([1, 2, 3, 4, 5, 6], 5, 0.0, False), # significant improvement\n ([1, 2, 3, 4, 5, 6], 5, 0.999, False), # significant improvement\n ([1, 2, 3, 4, 5, 6], 5, 5 - 1e-5, False), # significant improvement\n ([1] * 6, 5, 0.0, True), # no significant improvement\n ([1] * 6, 5, 0.001, True), # no significant improvement\n ([1] * 6, 5, 5, True), # no significant improvement\n ],\n)\ndef test_should_stop(scores, n_iter_no_change, tol, stopping):\n\n gbdt = HistGradientBoostingClassifier(n_iter_no_change=n_iter_no_change, tol=tol)\n assert gbdt._should_stop(scores) == stopping\n\n\ndef test_absolute_error():\n # For coverage only.\n X, y = make_regression(n_samples=500, random_state=0)\n gbdt = HistGradientBoostingRegressor(loss=\"absolute_error\", random_state=0)\n gbdt.fit(X, y)\n assert gbdt.score(X, y) > 0.9\n\n\ndef test_absolute_error_sample_weight():\n # non regression test for issue #19400\n # make sure no error is thrown during fit of\n # HistGradientBoostingRegressor with absolute_error loss function\n # and passing sample_weight\n rng = np.random.RandomState(0)\n n_samples = 100\n X = rng.uniform(-1, 1, size=(n_samples, 2))\n y = rng.uniform(-1, 1, size=n_samples)\n sample_weight = rng.uniform(0, 1, size=n_samples)\n gbdt = HistGradientBoostingRegressor(loss=\"absolute_error\")\n gbdt.fit(X, y, sample_weight=sample_weight)\n\n\[email protected](\"y\", [([1.0, -2.0, 0.0]), ([0.0, 0.0, 0.0])])\ndef test_poisson_y_positive(y):\n # Test that ValueError is raised if either one y_i < 0 or sum(y_i) <= 0.\n err_msg = r\"loss='poisson' requires non-negative y and sum\\(y\\) > 0.\"\n gbdt = HistGradientBoostingRegressor(loss=\"poisson\", random_state=0)\n with pytest.raises(ValueError, match=err_msg):\n gbdt.fit(np.zeros(shape=(len(y), 1)), y)\n\n\ndef test_poisson():\n # For Poisson distributed target, Poisson loss should give better results\n # than least squares measured in Poisson deviance as metric.\n rng = np.random.RandomState(42)\n n_train, n_test, n_features = 500, 100, 100\n X = make_low_rank_matrix(\n n_samples=n_train + n_test, n_features=n_features, random_state=rng\n )\n # We create a log-linear Poisson model and downscale coef as it will get\n # exponentiated.\n coef = rng.uniform(low=-2, high=2, size=n_features) / np.max(X, axis=0)\n y = rng.poisson(lam=np.exp(X @ coef))\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=n_test, random_state=rng\n )\n gbdt_pois = HistGradientBoostingRegressor(loss=\"poisson\", random_state=rng)\n gbdt_ls = HistGradientBoostingRegressor(loss=\"squared_error\", random_state=rng)\n gbdt_pois.fit(X_train, y_train)\n gbdt_ls.fit(X_train, y_train)\n dummy = DummyRegressor(strategy=\"mean\").fit(X_train, y_train)\n\n for X, y in [(X_train, y_train), (X_test, y_test)]:\n metric_pois = mean_poisson_deviance(y, gbdt_pois.predict(X))\n # squared_error might produce non-positive predictions => clip\n metric_ls = mean_poisson_deviance(y, np.clip(gbdt_ls.predict(X), 1e-15, None))\n metric_dummy = mean_poisson_deviance(y, dummy.predict(X))\n assert metric_pois < metric_ls\n assert metric_pois < metric_dummy\n\n\ndef test_binning_train_validation_are_separated():\n # Make sure training and validation data are binned separately.\n # See issue 13926\n\n rng = np.random.RandomState(0)\n validation_fraction = 0.2\n gb = HistGradientBoostingClassifier(\n early_stopping=True, validation_fraction=validation_fraction, random_state=rng\n )\n gb.fit(X_classification, y_classification)\n mapper_training_data = gb._bin_mapper\n\n # Note that since the data is small there is no subsampling and the\n # random_state doesn't matter\n mapper_whole_data = _BinMapper(random_state=0)\n mapper_whole_data.fit(X_classification)\n\n n_samples = X_classification.shape[0]\n assert np.all(\n mapper_training_data.n_bins_non_missing_\n == int((1 - validation_fraction) * n_samples)\n )\n assert np.all(\n mapper_training_data.n_bins_non_missing_\n != mapper_whole_data.n_bins_non_missing_\n )\n\n\ndef test_missing_values_trivial():\n # sanity check for missing values support. With only one feature and\n # y == isnan(X), the gbdt is supposed to reach perfect accuracy on the\n # training set.\n\n n_samples = 100\n n_features = 1\n rng = np.random.RandomState(0)\n\n X = rng.normal(size=(n_samples, n_features))\n mask = rng.binomial(1, 0.5, size=X.shape).astype(bool)\n X[mask] = np.nan\n y = mask.ravel()\n gb = HistGradientBoostingClassifier()\n gb.fit(X, y)\n\n assert gb.score(X, y) == pytest.approx(1)\n\n\[email protected](\"problem\", (\"classification\", \"regression\"))\[email protected](\n \"missing_proportion, expected_min_score_classification, \"\n \"expected_min_score_regression\",\n [(0.1, 0.97, 0.89), (0.2, 0.93, 0.81), (0.5, 0.79, 0.52)],\n)\ndef test_missing_values_resilience(\n problem,\n missing_proportion,\n expected_min_score_classification,\n expected_min_score_regression,\n):\n # Make sure the estimators can deal with missing values and still yield\n # decent predictions\n\n rng = np.random.RandomState(0)\n n_samples = 1000\n n_features = 2\n if problem == \"regression\":\n X, y = make_regression(\n n_samples=n_samples,\n n_features=n_features,\n n_informative=n_features,\n random_state=rng,\n )\n gb = HistGradientBoostingRegressor()\n expected_min_score = expected_min_score_regression\n else:\n X, y = make_classification(\n n_samples=n_samples,\n n_features=n_features,\n n_informative=n_features,\n n_redundant=0,\n n_repeated=0,\n random_state=rng,\n )\n gb = HistGradientBoostingClassifier()\n expected_min_score = expected_min_score_classification\n\n mask = rng.binomial(1, missing_proportion, size=X.shape).astype(bool)\n X[mask] = np.nan\n\n gb.fit(X, y)\n\n assert gb.score(X, y) > expected_min_score\n\n\[email protected](\n \"data\",\n [\n make_classification(random_state=0, n_classes=2),\n make_classification(random_state=0, n_classes=3, n_informative=3),\n ],\n ids=[\"binary_crossentropy\", \"categorical_crossentropy\"],\n)\ndef test_zero_division_hessians(data):\n # non regression test for issue #14018\n # make sure we avoid zero division errors when computing the leaves values.\n\n # If the learning rate is too high, the raw predictions are bad and will\n # saturate the softmax (or sigmoid in binary classif). This leads to\n # probabilities being exactly 0 or 1, gradients being constant, and\n # hessians being zero.\n X, y = data\n gb = HistGradientBoostingClassifier(learning_rate=100, max_iter=10)\n gb.fit(X, y)\n\n\ndef test_small_trainset():\n # Make sure that the small trainset is stratified and has the expected\n # length (10k samples)\n n_samples = 20000\n original_distrib = {0: 0.1, 1: 0.2, 2: 0.3, 3: 0.4}\n rng = np.random.RandomState(42)\n X = rng.randn(n_samples).reshape(n_samples, 1)\n y = [\n [class_] * int(prop * n_samples) for (class_, prop) in original_distrib.items()\n ]\n y = shuffle(np.concatenate(y))\n gb = HistGradientBoostingClassifier()\n\n # Compute the small training set\n X_small, y_small, _ = gb._get_small_trainset(\n X, y, seed=42, sample_weight_train=None\n )\n\n # Compute the class distribution in the small training set\n unique, counts = np.unique(y_small, return_counts=True)\n small_distrib = {class_: count / 10000 for (class_, count) in zip(unique, counts)}\n\n # Test that the small training set has the expected length\n assert X_small.shape[0] == 10000\n assert y_small.shape[0] == 10000\n\n # Test that the class distributions in the whole dataset and in the small\n # training set are identical\n assert small_distrib == pytest.approx(original_distrib)\n\n\ndef test_missing_values_minmax_imputation():\n # Compare the buit-in missing value handling of Histogram GBC with an\n # a-priori missing value imputation strategy that should yield the same\n # results in terms of decision function.\n #\n # Each feature (containing NaNs) is replaced by 2 features:\n # - one where the nans are replaced by min(feature) - 1\n # - one where the nans are replaced by max(feature) + 1\n # A split where nans go to the left has an equivalent split in the\n # first (min) feature, and a split where nans go to the right has an\n # equivalent split in the second (max) feature.\n #\n # Assuming the data is such that there is never a tie to select the best\n # feature to split on during training, the learned decision trees should be\n # strictly equivalent (learn a sequence of splits that encode the same\n # decision function).\n #\n # The MinMaxImputer transformer is meant to be a toy implementation of the\n # \"Missing In Attributes\" (MIA) missing value handling for decision trees\n # https://www.sciencedirect.com/science/article/abs/pii/S0167865508000305\n # The implementation of MIA as an imputation transformer was suggested by\n # \"Remark 3\" in https://arxiv.org/abs/1902.06931\n\n class MinMaxImputer(TransformerMixin, BaseEstimator):\n def fit(self, X, y=None):\n mm = MinMaxScaler().fit(X)\n self.data_min_ = mm.data_min_\n self.data_max_ = mm.data_max_\n return self\n\n def transform(self, X):\n X_min, X_max = X.copy(), X.copy()\n\n for feature_idx in range(X.shape[1]):\n nan_mask = np.isnan(X[:, feature_idx])\n X_min[nan_mask, feature_idx] = self.data_min_[feature_idx] - 1\n X_max[nan_mask, feature_idx] = self.data_max_[feature_idx] + 1\n\n return np.concatenate([X_min, X_max], axis=1)\n\n def make_missing_value_data(n_samples=int(1e4), seed=0):\n rng = np.random.RandomState(seed)\n X, y = make_regression(n_samples=n_samples, n_features=4, random_state=rng)\n\n # Pre-bin the data to ensure a deterministic handling by the 2\n # strategies and also make it easier to insert np.nan in a structured\n # way:\n X = KBinsDiscretizer(n_bins=42, encode=\"ordinal\").fit_transform(X)\n\n # First feature has missing values completely at random:\n rnd_mask = rng.rand(X.shape[0]) > 0.9\n X[rnd_mask, 0] = np.nan\n\n # Second and third features have missing values for extreme values\n # (censoring missingness):\n low_mask = X[:, 1] == 0\n X[low_mask, 1] = np.nan\n\n high_mask = X[:, 2] == X[:, 2].max()\n X[high_mask, 2] = np.nan\n\n # Make the last feature nan pattern very informative:\n y_max = np.percentile(y, 70)\n y_max_mask = y >= y_max\n y[y_max_mask] = y_max\n X[y_max_mask, 3] = np.nan\n\n # Check that there is at least one missing value in each feature:\n for feature_idx in range(X.shape[1]):\n assert any(np.isnan(X[:, feature_idx]))\n\n # Let's use a test set to check that the learned decision function is\n # the same as evaluated on unseen data. Otherwise it could just be the\n # case that we find two independent ways to overfit the training set.\n return train_test_split(X, y, random_state=rng)\n\n # n_samples need to be large enough to minimize the likelihood of having\n # several candidate splits with the same gain value in a given tree.\n X_train, X_test, y_train, y_test = make_missing_value_data(\n n_samples=int(1e4), seed=0\n )\n\n # Use a small number of leaf nodes and iterations so as to keep\n # under-fitting models to minimize the likelihood of ties when training the\n # model.\n gbm1 = HistGradientBoostingRegressor(max_iter=100, max_leaf_nodes=5, random_state=0)\n gbm1.fit(X_train, y_train)\n\n gbm2 = make_pipeline(MinMaxImputer(), clone(gbm1))\n gbm2.fit(X_train, y_train)\n\n # Check that the model reach the same score:\n assert gbm1.score(X_train, y_train) == pytest.approx(gbm2.score(X_train, y_train))\n\n assert gbm1.score(X_test, y_test) == pytest.approx(gbm2.score(X_test, y_test))\n\n # Check the individual prediction match as a finer grained\n # decision function check.\n assert_allclose(gbm1.predict(X_train), gbm2.predict(X_train))\n assert_allclose(gbm1.predict(X_test), gbm2.predict(X_test))\n\n\ndef test_infinite_values():\n # Basic test for infinite values\n\n X = np.array([-np.inf, 0, 1, np.inf]).reshape(-1, 1)\n y = np.array([0, 0, 1, 1])\n\n gbdt = HistGradientBoostingRegressor(min_samples_leaf=1)\n gbdt.fit(X, y)\n np.testing.assert_allclose(gbdt.predict(X), y, atol=1e-4)\n\n\ndef test_consistent_lengths():\n X = np.array([-np.inf, 0, 1, np.inf]).reshape(-1, 1)\n y = np.array([0, 0, 1, 1])\n sample_weight = np.array([0.1, 0.3, 0.1])\n gbdt = HistGradientBoostingRegressor()\n with pytest.raises(ValueError, match=r\"sample_weight.shape == \\(3,\\), expected\"):\n gbdt.fit(X, y, sample_weight)\n\n with pytest.raises(\n ValueError, match=\"Found input variables with inconsistent number\"\n ):\n gbdt.fit(X, y[1:])\n\n\ndef test_infinite_values_missing_values():\n # High level test making sure that inf and nan values are properly handled\n # when both are present. This is similar to\n # test_split_on_nan_with_infinite_values() in test_grower.py, though we\n # cannot check the predictions for binned values here.\n\n X = np.asarray([-np.inf, 0, 1, np.inf, np.nan]).reshape(-1, 1)\n y_isnan = np.isnan(X.ravel())\n y_isinf = X.ravel() == np.inf\n\n stump_clf = HistGradientBoostingClassifier(\n min_samples_leaf=1, max_iter=1, learning_rate=1, max_depth=2\n )\n\n assert stump_clf.fit(X, y_isinf).score(X, y_isinf) == 1\n assert stump_clf.fit(X, y_isnan).score(X, y_isnan) == 1\n\n\ndef test_crossentropy_binary_problem():\n # categorical_crossentropy should only be used if there are more than two\n # classes present. PR #14869\n X = [[1], [0]]\n y = [0, 1]\n gbrt = HistGradientBoostingClassifier(loss=\"categorical_crossentropy\")\n with pytest.raises(\n ValueError, match=\"'categorical_crossentropy' is not suitable for\"\n ):\n gbrt.fit(X, y)\n\n\[email protected](\"scoring\", [None, \"loss\"])\ndef test_string_target_early_stopping(scoring):\n # Regression tests for #14709 where the targets need to be encoded before\n # to compute the score\n rng = np.random.RandomState(42)\n X = rng.randn(100, 10)\n y = np.array([\"x\"] * 50 + [\"y\"] * 50, dtype=object)\n gbrt = HistGradientBoostingClassifier(n_iter_no_change=10, scoring=scoring)\n gbrt.fit(X, y)\n\n\ndef test_zero_sample_weights_regression():\n # Make sure setting a SW to zero amounts to ignoring the corresponding\n # sample\n\n X = [[1, 0], [1, 0], [1, 0], [0, 1]]\n y = [0, 0, 1, 0]\n # ignore the first 2 training samples by setting their weight to 0\n sample_weight = [0, 0, 1, 1]\n gb = HistGradientBoostingRegressor(min_samples_leaf=1)\n gb.fit(X, y, sample_weight=sample_weight)\n assert gb.predict([[1, 0]])[0] > 0.5\n\n\ndef test_zero_sample_weights_classification():\n # Make sure setting a SW to zero amounts to ignoring the corresponding\n # sample\n\n X = [[1, 0], [1, 0], [1, 0], [0, 1]]\n y = [0, 0, 1, 0]\n # ignore the first 2 training samples by setting their weight to 0\n sample_weight = [0, 0, 1, 1]\n gb = HistGradientBoostingClassifier(loss=\"binary_crossentropy\", min_samples_leaf=1)\n gb.fit(X, y, sample_weight=sample_weight)\n assert_array_equal(gb.predict([[1, 0]]), [1])\n\n X = [[1, 0], [1, 0], [1, 0], [0, 1], [1, 1]]\n y = [0, 0, 1, 0, 2]\n # ignore the first 2 training samples by setting their weight to 0\n sample_weight = [0, 0, 1, 1, 1]\n gb = HistGradientBoostingClassifier(\n loss=\"categorical_crossentropy\", min_samples_leaf=1\n )\n gb.fit(X, y, sample_weight=sample_weight)\n assert_array_equal(gb.predict([[1, 0]]), [1])\n\n\[email protected](\n \"problem\", (\"regression\", \"binary_classification\", \"multiclass_classification\")\n)\[email protected](\"duplication\", (\"half\", \"all\"))\ndef test_sample_weight_effect(problem, duplication):\n # High level test to make sure that duplicating a sample is equivalent to\n # giving it weight of 2.\n\n # fails for n_samples > 255 because binning does not take sample weights\n # into account. Keeping n_samples <= 255 makes\n # sure only unique values are used so SW have no effect on binning.\n n_samples = 255\n n_features = 2\n if problem == \"regression\":\n X, y = make_regression(\n n_samples=n_samples,\n n_features=n_features,\n n_informative=n_features,\n random_state=0,\n )\n Klass = HistGradientBoostingRegressor\n else:\n n_classes = 2 if problem == \"binary_classification\" else 3\n X, y = make_classification(\n n_samples=n_samples,\n n_features=n_features,\n n_informative=n_features,\n n_redundant=0,\n n_clusters_per_class=1,\n n_classes=n_classes,\n random_state=0,\n )\n Klass = HistGradientBoostingClassifier\n\n # This test can't pass if min_samples_leaf > 1 because that would force 2\n # samples to be in the same node in est_sw, while these samples would be\n # free to be separate in est_dup: est_dup would just group together the\n # duplicated samples.\n est = Klass(min_samples_leaf=1)\n\n # Create dataset with duplicate and corresponding sample weights\n if duplication == \"half\":\n lim = n_samples // 2\n else:\n lim = n_samples\n X_dup = np.r_[X, X[:lim]]\n y_dup = np.r_[y, y[:lim]]\n sample_weight = np.ones(shape=(n_samples))\n sample_weight[:lim] = 2\n\n est_sw = clone(est).fit(X, y, sample_weight=sample_weight)\n est_dup = clone(est).fit(X_dup, y_dup)\n\n # checking raw_predict is stricter than just predict for classification\n assert np.allclose(est_sw._raw_predict(X_dup), est_dup._raw_predict(X_dup))\n\n\[email protected](\"loss_name\", (\"squared_error\", \"absolute_error\"))\ndef test_sum_hessians_are_sample_weight(loss_name):\n # For losses with constant hessians, the sum_hessians field of the\n # histograms must be equal to the sum of the sample weight of samples at\n # the corresponding bin.\n\n rng = np.random.RandomState(0)\n n_samples = 1000\n n_features = 2\n X, y = make_regression(n_samples=n_samples, n_features=n_features, random_state=rng)\n bin_mapper = _BinMapper()\n X_binned = bin_mapper.fit_transform(X)\n\n sample_weight = rng.normal(size=n_samples)\n\n loss = _LOSSES[loss_name](sample_weight=sample_weight, n_threads=n_threads)\n gradients, hessians = loss.init_gradients_and_hessians(\n n_samples=n_samples, prediction_dim=1, sample_weight=sample_weight\n )\n raw_predictions = rng.normal(size=(1, n_samples))\n loss.update_gradients_and_hessians(\n gradients, hessians, y, raw_predictions, sample_weight\n )\n\n # build sum_sample_weight which contains the sum of the sample weights at\n # each bin (for each feature). This must be equal to the sum_hessians\n # field of the corresponding histogram\n sum_sw = np.zeros(shape=(n_features, bin_mapper.n_bins))\n for feature_idx in range(n_features):\n for sample_idx in range(n_samples):\n sum_sw[feature_idx, X_binned[sample_idx, feature_idx]] += sample_weight[\n sample_idx\n ]\n\n # Build histogram\n grower = TreeGrower(X_binned, gradients[0], hessians[0], n_bins=bin_mapper.n_bins)\n histograms = grower.histogram_builder.compute_histograms_brute(\n grower.root.sample_indices\n )\n\n for feature_idx in range(n_features):\n for bin_idx in range(bin_mapper.n_bins):\n assert histograms[feature_idx, bin_idx][\"sum_hessians\"] == (\n pytest.approx(sum_sw[feature_idx, bin_idx], rel=1e-5)\n )\n\n\ndef test_max_depth_max_leaf_nodes():\n # Non regression test for\n # https://github.com/scikit-learn/scikit-learn/issues/16179\n # there was a bug when the max_depth and the max_leaf_nodes criteria were\n # met at the same time, which would lead to max_leaf_nodes not being\n # respected.\n X, y = make_classification(random_state=0)\n est = HistGradientBoostingClassifier(max_depth=2, max_leaf_nodes=3, max_iter=1).fit(\n X, y\n )\n tree = est._predictors[0][0]\n assert tree.get_max_depth() == 2\n assert tree.get_n_leaf_nodes() == 3 # would be 4 prior to bug fix\n\n\ndef test_early_stopping_on_test_set_with_warm_start():\n # Non regression test for #16661 where second fit fails with\n # warm_start=True, early_stopping is on, and no validation set\n X, y = make_classification(random_state=0)\n gb = HistGradientBoostingClassifier(\n max_iter=1,\n scoring=\"loss\",\n warm_start=True,\n early_stopping=True,\n n_iter_no_change=1,\n validation_fraction=None,\n )\n\n gb.fit(X, y)\n # does not raise on second call\n gb.set_params(max_iter=2)\n gb.fit(X, y)\n\n\[email protected](\n \"Est\", (HistGradientBoostingClassifier, HistGradientBoostingRegressor)\n)\ndef test_single_node_trees(Est):\n # Make sure it's still possible to build single-node trees. In that case\n # the value of the root is set to 0. That's a correct value: if the tree is\n # single-node that's because min_gain_to_split is not respected right from\n # the root, so we don't want the tree to have any impact on the\n # predictions.\n\n X, y = make_classification(random_state=0)\n y[:] = 1 # constant target will lead to a single root node\n\n est = Est(max_iter=20)\n est.fit(X, y)\n\n assert all(len(predictor[0].nodes) == 1 for predictor in est._predictors)\n assert all(predictor[0].nodes[0][\"value\"] == 0 for predictor in est._predictors)\n # Still gives correct predictions thanks to the baseline prediction\n assert_allclose(est.predict(X), y)\n\n\[email protected](\n \"Est, loss, X, y\",\n [\n (\n HistGradientBoostingClassifier,\n BinaryCrossEntropy(sample_weight=None),\n X_classification,\n y_classification,\n ),\n (\n HistGradientBoostingRegressor,\n LeastSquares(sample_weight=None),\n X_regression,\n y_regression,\n ),\n ],\n)\ndef test_custom_loss(Est, loss, X, y):\n est = Est(loss=loss, max_iter=20)\n est.fit(X, y)\n\n\[email protected](\n \"HistGradientBoosting, X, y\",\n [\n (HistGradientBoostingClassifier, X_classification, y_classification),\n (HistGradientBoostingRegressor, X_regression, y_regression),\n (\n HistGradientBoostingClassifier,\n X_multi_classification,\n y_multi_classification,\n ),\n ],\n)\ndef test_staged_predict(HistGradientBoosting, X, y):\n\n # Test whether staged predictor eventually gives\n # the same prediction.\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.5, random_state=0\n )\n gb = HistGradientBoosting(max_iter=10)\n\n # test raise NotFittedError if not fitted\n with pytest.raises(NotFittedError):\n next(gb.staged_predict(X_test))\n\n gb.fit(X_train, y_train)\n\n # test if the staged predictions of each iteration\n # are equal to the corresponding predictions of the same estimator\n # trained from scratch.\n # this also test limit case when max_iter = 1\n method_names = (\n [\"predict\"]\n if is_regressor(gb)\n else [\"predict\", \"predict_proba\", \"decision_function\"]\n )\n for method_name in method_names:\n\n staged_method = getattr(gb, \"staged_\" + method_name)\n staged_predictions = list(staged_method(X_test))\n assert len(staged_predictions) == gb.n_iter_\n for n_iter, staged_predictions in enumerate(staged_method(X_test), 1):\n aux = HistGradientBoosting(max_iter=n_iter)\n aux.fit(X_train, y_train)\n pred_aux = getattr(aux, method_name)(X_test)\n\n assert_allclose(staged_predictions, pred_aux)\n assert staged_predictions.shape == pred_aux.shape\n\n\[email protected](\"insert_missing\", [False, True])\[email protected](\n \"Est\", (HistGradientBoostingRegressor, HistGradientBoostingClassifier)\n)\[email protected](\"bool_categorical_parameter\", [True, False])\ndef test_unknown_categories_nan(insert_missing, Est, bool_categorical_parameter):\n # Make sure no error is raised at predict if a category wasn't seen during\n # fit. We also make sure they're treated as nans.\n\n rng = np.random.RandomState(0)\n n_samples = 1000\n f1 = rng.rand(n_samples)\n f2 = rng.randint(4, size=n_samples)\n X = np.c_[f1, f2]\n y = np.zeros(shape=n_samples)\n y[X[:, 1] % 2 == 0] = 1\n\n if bool_categorical_parameter:\n categorical_features = [False, True]\n else:\n categorical_features = [1]\n\n if insert_missing:\n mask = rng.binomial(1, 0.01, size=X.shape).astype(bool)\n assert mask.sum() > 0\n X[mask] = np.nan\n\n est = Est(max_iter=20, categorical_features=categorical_features).fit(X, y)\n assert_array_equal(est.is_categorical_, [False, True])\n\n # Make sure no error is raised on unknown categories and nans\n # unknown categories will be treated as nans\n X_test = np.zeros((10, X.shape[1]), dtype=float)\n X_test[:5, 1] = 30\n X_test[5:, 1] = np.nan\n assert len(np.unique(est.predict(X_test))) == 1\n\n\ndef test_categorical_encoding_strategies():\n # Check native categorical handling vs different encoding strategies. We\n # make sure that native encoding needs only 1 split to achieve a perfect\n # prediction on a simple dataset. In contrast, OneHotEncoded data needs\n # more depth / splits, and treating categories as ordered (just using\n # OrdinalEncoder) requires even more depth.\n\n # dataset with one random continuous feature, and one categorical feature\n # with values in [0, 5], e.g. from an OrdinalEncoder.\n # class == 1 iff categorical value in {0, 2, 4}\n rng = np.random.RandomState(0)\n n_samples = 10_000\n f1 = rng.rand(n_samples)\n f2 = rng.randint(6, size=n_samples)\n X = np.c_[f1, f2]\n y = np.zeros(shape=n_samples)\n y[X[:, 1] % 2 == 0] = 1\n\n # make sure dataset is balanced so that the baseline_prediction doesn't\n # influence predictions too much with max_iter = 1\n assert 0.49 < y.mean() < 0.51\n\n clf_cat = HistGradientBoostingClassifier(\n max_iter=1, max_depth=1, categorical_features=[False, True]\n )\n\n # Using native categorical encoding, we get perfect predictions with just\n # one split\n assert cross_val_score(clf_cat, X, y).mean() == 1\n\n # quick sanity check for the bitset: 0, 2, 4 = 2**0 + 2**2 + 2**4 = 21\n expected_left_bitset = [21, 0, 0, 0, 0, 0, 0, 0]\n left_bitset = clf_cat.fit(X, y)._predictors[0][0].raw_left_cat_bitsets[0]\n assert_array_equal(left_bitset, expected_left_bitset)\n\n # Treating categories as ordered, we need more depth / more splits to get\n # the same predictions\n clf_no_cat = HistGradientBoostingClassifier(\n max_iter=1, max_depth=4, categorical_features=None\n )\n assert cross_val_score(clf_no_cat, X, y).mean() < 0.9\n\n clf_no_cat.set_params(max_depth=5)\n assert cross_val_score(clf_no_cat, X, y).mean() == 1\n\n # Using OHEd data, we need less splits than with pure OEd data, but we\n # still need more splits than with the native categorical splits\n ct = make_column_transformer(\n (OneHotEncoder(sparse=False), [1]), remainder=\"passthrough\"\n )\n X_ohe = ct.fit_transform(X)\n clf_no_cat.set_params(max_depth=2)\n assert cross_val_score(clf_no_cat, X_ohe, y).mean() < 0.9\n\n clf_no_cat.set_params(max_depth=3)\n assert cross_val_score(clf_no_cat, X_ohe, y).mean() == 1\n\n\[email protected](\n \"Est\", (HistGradientBoostingClassifier, HistGradientBoostingRegressor)\n)\[email protected](\n \"categorical_features, monotonic_cst, expected_msg\",\n [\n (\n [\"hello\", \"world\"],\n None,\n \"categorical_features must be an array-like of bools or array-like of \"\n \"ints.\",\n ),\n (\n [0, -1],\n None,\n (\n r\"categorical_features set as integer indices must be in \"\n r\"\\[0, n_features - 1\\]\"\n ),\n ),\n (\n [True, True, False, False, True],\n None,\n r\"categorical_features set as a boolean mask must have shape \"\n r\"\\(n_features,\\)\",\n ),\n (\n [True, True, False, False],\n [0, -1, 0, 1],\n \"Categorical features cannot have monotonic constraints\",\n ),\n ],\n)\ndef test_categorical_spec_errors(\n Est, categorical_features, monotonic_cst, expected_msg\n):\n # Test errors when categories are specified incorrectly\n n_samples = 100\n X, y = make_classification(random_state=0, n_features=4, n_samples=n_samples)\n rng = np.random.RandomState(0)\n X[:, 0] = rng.randint(0, 10, size=n_samples)\n X[:, 1] = rng.randint(0, 10, size=n_samples)\n est = Est(categorical_features=categorical_features, monotonic_cst=monotonic_cst)\n\n with pytest.raises(ValueError, match=expected_msg):\n est.fit(X, y)\n\n\[email protected](\n \"Est\", (HistGradientBoostingClassifier, HistGradientBoostingRegressor)\n)\[email protected](\"categorical_features\", ([False, False], []))\[email protected](\"as_array\", (True, False))\ndef test_categorical_spec_no_categories(Est, categorical_features, as_array):\n # Make sure we can properly detect that no categorical features are present\n # even if the categorical_features parameter is not None\n X = np.arange(10).reshape(5, 2)\n y = np.arange(5)\n if as_array:\n categorical_features = np.asarray(categorical_features)\n est = Est(categorical_features=categorical_features).fit(X, y)\n assert est.is_categorical_ is None\n\n\[email protected](\n \"Est\", (HistGradientBoostingClassifier, HistGradientBoostingRegressor)\n)\ndef test_categorical_bad_encoding_errors(Est):\n # Test errors when categories are encoded incorrectly\n\n gb = Est(categorical_features=[True], max_bins=2)\n\n X = np.array([[0, 1, 2]]).T\n y = np.arange(3)\n msg = \"Categorical feature at index 0 is expected to have a cardinality <= 2\"\n with pytest.raises(ValueError, match=msg):\n gb.fit(X, y)\n\n X = np.array([[0, 2]]).T\n y = np.arange(2)\n msg = \"Categorical feature at index 0 is expected to be encoded with values < 2\"\n with pytest.raises(ValueError, match=msg):\n gb.fit(X, y)\n\n # nans are ignored in the counts\n X = np.array([[0, 1, np.nan]]).T\n y = np.arange(3)\n gb.fit(X, y)\n\n\[email protected](\n \"Est\", (HistGradientBoostingClassifier, HistGradientBoostingRegressor)\n)\ndef test_uint8_predict(Est):\n # Non regression test for\n # https://github.com/scikit-learn/scikit-learn/issues/18408\n # Make sure X can be of dtype uint8 (i.e. X_BINNED_DTYPE) in predict. It\n # will be converted to X_DTYPE.\n\n rng = np.random.RandomState(0)\n\n X = rng.randint(0, 100, size=(10, 2)).astype(np.uint8)\n y = rng.randint(0, 2, size=10).astype(np.uint8)\n est = Est()\n est.fit(X, y)\n est.predict(X)\n\n\n# TODO: Remove in v1.2\[email protected](\n \"old_loss, new_loss\",\n [\n (\"least_squares\", \"squared_error\"),\n (\"least_absolute_deviation\", \"absolute_error\"),\n ],\n)\ndef test_loss_deprecated(old_loss, new_loss):\n X, y = make_regression(n_samples=50, random_state=0)\n est1 = HistGradientBoostingRegressor(loss=old_loss, random_state=0)\n\n with pytest.warns(FutureWarning, match=f\"The loss '{old_loss}' was deprecated\"):\n est1.fit(X, y)\n\n est2 = HistGradientBoostingRegressor(loss=new_loss, random_state=0)\n est2.fit(X, y)\n assert_allclose(est1.predict(X), est2.predict(X))\n", "# -*- coding: utf8\n\"\"\"Random Projection transformers.\n\nRandom Projections are a simple and computationally efficient way to\nreduce the dimensionality of the data by trading a controlled amount\nof accuracy (as additional variance) for faster processing times and\nsmaller model sizes.\n\nThe dimensions and distribution of Random Projections matrices are\ncontrolled so as to preserve the pairwise distances between any two\nsamples of the dataset.\n\nThe main theoretical result behind the efficiency of random projection is the\n`Johnson-Lindenstrauss lemma (quoting Wikipedia)\n<https://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma>`_:\n\n In mathematics, the Johnson-Lindenstrauss lemma is a result\n concerning low-distortion embeddings of points from high-dimensional\n into low-dimensional Euclidean space. The lemma states that a small set\n of points in a high-dimensional space can be embedded into a space of\n much lower dimension in such a way that distances between the points are\n nearly preserved. The map used for the embedding is at least Lipschitz,\n and can even be taken to be an orthogonal projection.\n\n\"\"\"\n# Authors: Olivier Grisel <[email protected]>,\n# Arnaud Joly <[email protected]>\n# License: BSD 3 clause\n\nimport warnings\nfrom abc import ABCMeta, abstractmethod\n\nimport numpy as np\nimport scipy.sparse as sp\n\nfrom .base import BaseEstimator, TransformerMixin\nfrom .base import _ClassNamePrefixFeaturesOutMixin\n\nfrom .utils import check_random_state\nfrom .utils.extmath import safe_sparse_dot\nfrom .utils.random import sample_without_replacement\nfrom .utils.validation import check_is_fitted\nfrom .exceptions import DataDimensionalityWarning\n\n\n__all__ = [\n \"SparseRandomProjection\",\n \"GaussianRandomProjection\",\n \"johnson_lindenstrauss_min_dim\",\n]\n\n\ndef johnson_lindenstrauss_min_dim(n_samples, *, eps=0.1):\n \"\"\"Find a 'safe' number of components to randomly project to.\n\n The distortion introduced by a random projection `p` only changes the\n distance between two points by a factor (1 +- eps) in an euclidean space\n with good probability. The projection `p` is an eps-embedding as defined\n by:\n\n (1 - eps) ||u - v||^2 < ||p(u) - p(v)||^2 < (1 + eps) ||u - v||^2\n\n Where u and v are any rows taken from a dataset of shape (n_samples,\n n_features), eps is in ]0, 1[ and p is a projection by a random Gaussian\n N(0, 1) matrix of shape (n_components, n_features) (or a sparse\n Achlioptas matrix).\n\n The minimum number of components to guarantee the eps-embedding is\n given by:\n\n n_components >= 4 log(n_samples) / (eps^2 / 2 - eps^3 / 3)\n\n Note that the number of dimensions is independent of the original\n number of features but instead depends on the size of the dataset:\n the larger the dataset, the higher is the minimal dimensionality of\n an eps-embedding.\n\n Read more in the :ref:`User Guide <johnson_lindenstrauss>`.\n\n Parameters\n ----------\n n_samples : int or array-like of int\n Number of samples that should be a integer greater than 0. If an array\n is given, it will compute a safe number of components array-wise.\n\n eps : float or ndarray of shape (n_components,), dtype=float, \\\n default=0.1\n Maximum distortion rate in the range (0,1 ) as defined by the\n Johnson-Lindenstrauss lemma. If an array is given, it will compute a\n safe number of components array-wise.\n\n Returns\n -------\n n_components : int or ndarray of int\n The minimal number of components to guarantee with good probability\n an eps-embedding with n_samples.\n\n Examples\n --------\n >>> from sklearn.random_projection import johnson_lindenstrauss_min_dim\n >>> johnson_lindenstrauss_min_dim(1e6, eps=0.5)\n 663\n\n >>> johnson_lindenstrauss_min_dim(1e6, eps=[0.5, 0.1, 0.01])\n array([ 663, 11841, 1112658])\n\n >>> johnson_lindenstrauss_min_dim([1e4, 1e5, 1e6], eps=0.1)\n array([ 7894, 9868, 11841])\n\n References\n ----------\n\n .. [1] https://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma\n\n .. [2] Sanjoy Dasgupta and Anupam Gupta, 1999,\n \"An elementary proof of the Johnson-Lindenstrauss Lemma.\"\n http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.45.3654\n\n \"\"\"\n eps = np.asarray(eps)\n n_samples = np.asarray(n_samples)\n\n if np.any(eps <= 0.0) or np.any(eps >= 1):\n raise ValueError(\"The JL bound is defined for eps in ]0, 1[, got %r\" % eps)\n\n if np.any(n_samples) <= 0:\n raise ValueError(\n \"The JL bound is defined for n_samples greater than zero, got %r\"\n % n_samples\n )\n\n denominator = (eps ** 2 / 2) - (eps ** 3 / 3)\n return (4 * np.log(n_samples) / denominator).astype(np.int64)\n\n\ndef _check_density(density, n_features):\n \"\"\"Factorize density check according to Li et al.\"\"\"\n if density == \"auto\":\n density = 1 / np.sqrt(n_features)\n\n elif density <= 0 or density > 1:\n raise ValueError(\"Expected density in range ]0, 1], got: %r\" % density)\n return density\n\n\ndef _check_input_size(n_components, n_features):\n \"\"\"Factorize argument checking for random matrix generation.\"\"\"\n if n_components <= 0:\n raise ValueError(\n \"n_components must be strictly positive, got %d\" % n_components\n )\n if n_features <= 0:\n raise ValueError(\"n_features must be strictly positive, got %d\" % n_features)\n\n\ndef _gaussian_random_matrix(n_components, n_features, random_state=None):\n \"\"\"Generate a dense Gaussian random matrix.\n\n The components of the random matrix are drawn from\n\n N(0, 1.0 / n_components).\n\n Read more in the :ref:`User Guide <gaussian_random_matrix>`.\n\n Parameters\n ----------\n n_components : int,\n Dimensionality of the target projection space.\n\n n_features : int,\n Dimensionality of the original source space.\n\n random_state : int, RandomState instance or None, default=None\n Controls the pseudo random number generator used to generate the matrix\n at fit time.\n Pass an int for reproducible output across multiple function calls.\n See :term:`Glossary <random_state>`.\n\n Returns\n -------\n components : ndarray of shape (n_components, n_features)\n The generated Gaussian random matrix.\n\n See Also\n --------\n GaussianRandomProjection\n \"\"\"\n _check_input_size(n_components, n_features)\n rng = check_random_state(random_state)\n components = rng.normal(\n loc=0.0, scale=1.0 / np.sqrt(n_components), size=(n_components, n_features)\n )\n return components\n\n\ndef _sparse_random_matrix(n_components, n_features, density=\"auto\", random_state=None):\n \"\"\"Generalized Achlioptas random sparse matrix for random projection.\n\n Setting density to 1 / 3 will yield the original matrix by Dimitris\n Achlioptas while setting a lower value will yield the generalization\n by Ping Li et al.\n\n If we note :math:`s = 1 / density`, the components of the random matrix are\n drawn from:\n\n - -sqrt(s) / sqrt(n_components) with probability 1 / 2s\n - 0 with probability 1 - 1 / s\n - +sqrt(s) / sqrt(n_components) with probability 1 / 2s\n\n Read more in the :ref:`User Guide <sparse_random_matrix>`.\n\n Parameters\n ----------\n n_components : int,\n Dimensionality of the target projection space.\n\n n_features : int,\n Dimensionality of the original source space.\n\n density : float or 'auto', default='auto'\n Ratio of non-zero component in the random projection matrix in the\n range `(0, 1]`\n\n If density = 'auto', the value is set to the minimum density\n as recommended by Ping Li et al.: 1 / sqrt(n_features).\n\n Use density = 1 / 3.0 if you want to reproduce the results from\n Achlioptas, 2001.\n\n random_state : int, RandomState instance or None, default=None\n Controls the pseudo random number generator used to generate the matrix\n at fit time.\n Pass an int for reproducible output across multiple function calls.\n See :term:`Glossary <random_state>`.\n\n Returns\n -------\n components : {ndarray, sparse matrix} of shape (n_components, n_features)\n The generated Gaussian random matrix. Sparse matrix will be of CSR\n format.\n\n See Also\n --------\n SparseRandomProjection\n\n References\n ----------\n\n .. [1] Ping Li, T. Hastie and K. W. Church, 2006,\n \"Very Sparse Random Projections\".\n https://web.stanford.edu/~hastie/Papers/Ping/KDD06_rp.pdf\n\n .. [2] D. Achlioptas, 2001, \"Database-friendly random projections\",\n http://www.cs.ucsc.edu/~optas/papers/jl.pdf\n\n \"\"\"\n _check_input_size(n_components, n_features)\n density = _check_density(density, n_features)\n rng = check_random_state(random_state)\n\n if density == 1:\n # skip index generation if totally dense\n components = rng.binomial(1, 0.5, (n_components, n_features)) * 2 - 1\n return 1 / np.sqrt(n_components) * components\n\n else:\n # Generate location of non zero elements\n indices = []\n offset = 0\n indptr = [offset]\n for _ in range(n_components):\n # find the indices of the non-zero components for row i\n n_nonzero_i = rng.binomial(n_features, density)\n indices_i = sample_without_replacement(\n n_features, n_nonzero_i, random_state=rng\n )\n indices.append(indices_i)\n offset += n_nonzero_i\n indptr.append(offset)\n\n indices = np.concatenate(indices)\n\n # Among non zero components the probability of the sign is 50%/50%\n data = rng.binomial(1, 0.5, size=np.size(indices)) * 2 - 1\n\n # build the CSR structure by concatenating the rows\n components = sp.csr_matrix(\n (data, indices, indptr), shape=(n_components, n_features)\n )\n\n return np.sqrt(1 / density) / np.sqrt(n_components) * components\n\n\nclass BaseRandomProjection(\n TransformerMixin, BaseEstimator, _ClassNamePrefixFeaturesOutMixin, metaclass=ABCMeta\n):\n \"\"\"Base class for random projections.\n\n Warning: This class should not be used directly.\n Use derived classes instead.\n \"\"\"\n\n @abstractmethod\n def __init__(\n self, n_components=\"auto\", *, eps=0.1, dense_output=False, random_state=None\n ):\n self.n_components = n_components\n self.eps = eps\n self.dense_output = dense_output\n self.random_state = random_state\n\n @abstractmethod\n def _make_random_matrix(self, n_components, n_features):\n \"\"\"Generate the random projection matrix.\n\n Parameters\n ----------\n n_components : int,\n Dimensionality of the target projection space.\n\n n_features : int,\n Dimensionality of the original source space.\n\n Returns\n -------\n components : {ndarray, sparse matrix} of shape \\\n (n_components, n_features)\n The generated random matrix. Sparse matrix will be of CSR format.\n\n \"\"\"\n\n def fit(self, X, y=None):\n \"\"\"Generate a sparse random projection matrix.\n\n Parameters\n ----------\n X : {ndarray, sparse matrix} of shape (n_samples, n_features)\n Training set: only the shape is used to find optimal random\n matrix dimensions based on the theory referenced in the\n afore mentioned papers.\n\n y : Ignored\n Not used, present here for API consistency by convention.\n\n Returns\n -------\n self : object\n BaseRandomProjection class instance.\n \"\"\"\n X = self._validate_data(X, accept_sparse=[\"csr\", \"csc\"])\n\n n_samples, n_features = X.shape\n\n if self.n_components == \"auto\":\n self.n_components_ = johnson_lindenstrauss_min_dim(\n n_samples=n_samples, eps=self.eps\n )\n\n if self.n_components_ <= 0:\n raise ValueError(\n \"eps=%f and n_samples=%d lead to a target dimension of \"\n \"%d which is invalid\" % (self.eps, n_samples, self.n_components_)\n )\n\n elif self.n_components_ > n_features:\n raise ValueError(\n \"eps=%f and n_samples=%d lead to a target dimension of \"\n \"%d which is larger than the original space with \"\n \"n_features=%d\"\n % (self.eps, n_samples, self.n_components_, n_features)\n )\n else:\n if self.n_components <= 0:\n raise ValueError(\n \"n_components must be greater than 0, got %s\" % self.n_components\n )\n\n elif self.n_components > n_features:\n warnings.warn(\n \"The number of components is higher than the number of\"\n \" features: n_features < n_components (%s < %s).\"\n \"The dimensionality of the problem will not be reduced.\"\n % (n_features, self.n_components),\n DataDimensionalityWarning,\n )\n\n self.n_components_ = self.n_components\n\n # Generate a projection matrix of size [n_components, n_features]\n self.components_ = self._make_random_matrix(self.n_components_, n_features)\n\n # Check contract\n assert self.components_.shape == (self.n_components_, n_features), (\n \"An error has occurred the self.components_ matrix has \"\n \" not the proper shape.\"\n )\n\n return self\n\n def transform(self, X):\n \"\"\"Project the data by using matrix product with the random matrix.\n\n Parameters\n ----------\n X : {ndarray, sparse matrix} of shape (n_samples, n_features)\n The input data to project into a smaller dimensional space.\n\n Returns\n -------\n X_new : {ndarray, sparse matrix} of shape (n_samples, n_components)\n Projected array.\n \"\"\"\n check_is_fitted(self)\n X = self._validate_data(X, accept_sparse=[\"csr\", \"csc\"], reset=False)\n\n if X.shape[1] != self.components_.shape[1]:\n raise ValueError(\n \"Impossible to perform projection:\"\n \"X at fit stage had a different number of features. \"\n \"(%s != %s)\" % (X.shape[1], self.components_.shape[1])\n )\n\n X_new = safe_sparse_dot(X, self.components_.T, dense_output=self.dense_output)\n return X_new\n\n @property\n def _n_features_out(self):\n \"\"\"Number of transformed output features.\n\n Used by _ClassNamePrefixFeaturesOutMixin.get_feature_names_out.\n \"\"\"\n return self.n_components\n\n\nclass GaussianRandomProjection(BaseRandomProjection):\n \"\"\"Reduce dimensionality through Gaussian random projection.\n\n The components of the random matrix are drawn from N(0, 1 / n_components).\n\n Read more in the :ref:`User Guide <gaussian_random_matrix>`.\n\n .. versionadded:: 0.13\n\n Parameters\n ----------\n n_components : int or 'auto', default='auto'\n Dimensionality of the target projection space.\n\n n_components can be automatically adjusted according to the\n number of samples in the dataset and the bound given by the\n Johnson-Lindenstrauss lemma. In that case the quality of the\n embedding is controlled by the ``eps`` parameter.\n\n It should be noted that Johnson-Lindenstrauss lemma can yield\n very conservative estimated of the required number of components\n as it makes no assumption on the structure of the dataset.\n\n eps : float, default=0.1\n Parameter to control the quality of the embedding according to\n the Johnson-Lindenstrauss lemma when `n_components` is set to\n 'auto'. The value should be strictly positive.\n\n Smaller values lead to better embedding and higher number of\n dimensions (n_components) in the target projection space.\n\n random_state : int, RandomState instance or None, default=None\n Controls the pseudo random number generator used to generate the\n projection matrix at fit time.\n Pass an int for reproducible output across multiple function calls.\n See :term:`Glossary <random_state>`.\n\n Attributes\n ----------\n n_components_ : int\n Concrete number of components computed when n_components=\"auto\".\n\n components_ : ndarray of shape (n_components, n_features)\n Random matrix used for the projection.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\n Names of features seen during :term:`fit`. Defined only when `X`\n has feature names that are all strings.\n\n .. versionadded:: 1.0\n\n See Also\n --------\n SparseRandomProjection : Reduce dimensionality through sparse\n random projection.\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn.random_projection import GaussianRandomProjection\n >>> rng = np.random.RandomState(42)\n >>> X = rng.rand(25, 3000)\n >>> transformer = GaussianRandomProjection(random_state=rng)\n >>> X_new = transformer.fit_transform(X)\n >>> X_new.shape\n (25, 2759)\n \"\"\"\n\n def __init__(self, n_components=\"auto\", *, eps=0.1, random_state=None):\n super().__init__(\n n_components=n_components,\n eps=eps,\n dense_output=True,\n random_state=random_state,\n )\n\n def _make_random_matrix(self, n_components, n_features):\n \"\"\" Generate the random projection matrix.\n\n Parameters\n ----------\n n_components : int,\n Dimensionality of the target projection space.\n\n n_features : int,\n Dimensionality of the original source space.\n\n Returns\n -------\n components : {ndarray, sparse matrix} of shape \\\n (n_components, n_features)\n The generated random matrix. Sparse matrix will be of CSR format.\n\n \"\"\"\n random_state = check_random_state(self.random_state)\n return _gaussian_random_matrix(\n n_components, n_features, random_state=random_state\n )\n\n\nclass SparseRandomProjection(BaseRandomProjection):\n \"\"\"Reduce dimensionality through sparse random projection.\n\n Sparse random matrix is an alternative to dense random\n projection matrix that guarantees similar embedding quality while being\n much more memory efficient and allowing faster computation of the\n projected data.\n\n If we note `s = 1 / density` the components of the random matrix are\n drawn from:\n\n - -sqrt(s) / sqrt(n_components) with probability 1 / 2s\n - 0 with probability 1 - 1 / s\n - +sqrt(s) / sqrt(n_components) with probability 1 / 2s\n\n Read more in the :ref:`User Guide <sparse_random_matrix>`.\n\n .. versionadded:: 0.13\n\n Parameters\n ----------\n n_components : int or 'auto', default='auto'\n Dimensionality of the target projection space.\n\n n_components can be automatically adjusted according to the\n number of samples in the dataset and the bound given by the\n Johnson-Lindenstrauss lemma. In that case the quality of the\n embedding is controlled by the ``eps`` parameter.\n\n It should be noted that Johnson-Lindenstrauss lemma can yield\n very conservative estimated of the required number of components\n as it makes no assumption on the structure of the dataset.\n\n density : float or 'auto', default='auto'\n Ratio in the range (0, 1] of non-zero component in the random\n projection matrix.\n\n If density = 'auto', the value is set to the minimum density\n as recommended by Ping Li et al.: 1 / sqrt(n_features).\n\n Use density = 1 / 3.0 if you want to reproduce the results from\n Achlioptas, 2001.\n\n eps : float, default=0.1\n Parameter to control the quality of the embedding according to\n the Johnson-Lindenstrauss lemma when n_components is set to\n 'auto'. This value should be strictly positive.\n\n Smaller values lead to better embedding and higher number of\n dimensions (n_components) in the target projection space.\n\n dense_output : bool, default=False\n If True, ensure that the output of the random projection is a\n dense numpy array even if the input and random projection matrix\n are both sparse. In practice, if the number of components is\n small the number of zero components in the projected data will\n be very small and it will be more CPU and memory efficient to\n use a dense representation.\n\n If False, the projected data uses a sparse representation if\n the input is sparse.\n\n random_state : int, RandomState instance or None, default=None\n Controls the pseudo random number generator used to generate the\n projection matrix at fit time.\n Pass an int for reproducible output across multiple function calls.\n See :term:`Glossary <random_state>`.\n\n Attributes\n ----------\n n_components_ : int\n Concrete number of components computed when n_components=\"auto\".\n\n components_ : sparse matrix of shape (n_components, n_features)\n Random matrix used for the projection. Sparse matrix will be of CSR\n format.\n\n density_ : float in range 0.0 - 1.0\n Concrete density computed from when density = \"auto\".\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\n Names of features seen during :term:`fit`. Defined only when `X`\n has feature names that are all strings.\n\n .. versionadded:: 1.0\n\n See Also\n --------\n GaussianRandomProjection : Reduce dimensionality through Gaussian\n random projection.\n\n References\n ----------\n\n .. [1] Ping Li, T. Hastie and K. W. Church, 2006,\n \"Very Sparse Random Projections\".\n https://web.stanford.edu/~hastie/Papers/Ping/KDD06_rp.pdf\n\n .. [2] D. Achlioptas, 2001, \"Database-friendly random projections\",\n https://users.soe.ucsc.edu/~optas/papers/jl.pdf\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn.random_projection import SparseRandomProjection\n >>> rng = np.random.RandomState(42)\n >>> X = rng.rand(25, 3000)\n >>> transformer = SparseRandomProjection(random_state=rng)\n >>> X_new = transformer.fit_transform(X)\n >>> X_new.shape\n (25, 2759)\n >>> # very few components are non-zero\n >>> np.mean(transformer.components_ != 0)\n 0.0182...\n \"\"\"\n\n def __init__(\n self,\n n_components=\"auto\",\n *,\n density=\"auto\",\n eps=0.1,\n dense_output=False,\n random_state=None,\n ):\n super().__init__(\n n_components=n_components,\n eps=eps,\n dense_output=dense_output,\n random_state=random_state,\n )\n\n self.density = density\n\n def _make_random_matrix(self, n_components, n_features):\n \"\"\" Generate the random projection matrix\n\n Parameters\n ----------\n n_components : int\n Dimensionality of the target projection space.\n\n n_features : int\n Dimensionality of the original source space.\n\n Returns\n -------\n components : {ndarray, sparse matrix} of shape \\\n (n_components, n_features)\n The generated random matrix. Sparse matrix will be of CSR format.\n\n \"\"\"\n random_state = check_random_state(self.random_state)\n self.density_ = _check_density(self.density, n_features)\n return _sparse_random_matrix(\n n_components, n_features, density=self.density_, random_state=random_state\n )\n", "\"\"\"\nLinear Discriminant Analysis and Quadratic Discriminant Analysis\n\"\"\"\n\n# Authors: Clemens Brunner\n# Martin Billinger\n# Matthieu Perrot\n# Mathieu Blondel\n\n# License: BSD 3-Clause\n\nimport warnings\nimport numpy as np\nfrom scipy import linalg\nfrom scipy.special import expit\n\nfrom .base import BaseEstimator, TransformerMixin, ClassifierMixin\nfrom .linear_model._base import LinearClassifierMixin\nfrom .covariance import ledoit_wolf, empirical_covariance, shrunk_covariance\nfrom .utils.multiclass import unique_labels\nfrom .utils.validation import check_is_fitted\nfrom .utils.multiclass import check_classification_targets\nfrom .utils.extmath import softmax\nfrom .preprocessing import StandardScaler\n\n\n__all__ = [\"LinearDiscriminantAnalysis\", \"QuadraticDiscriminantAnalysis\"]\n\n\ndef _cov(X, shrinkage=None, covariance_estimator=None):\n \"\"\"Estimate covariance matrix (using optional covariance_estimator).\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Input data.\n\n shrinkage : {'empirical', 'auto'} or float, default=None\n Shrinkage parameter, possible values:\n - None or 'empirical': no shrinkage (default).\n - 'auto': automatic shrinkage using the Ledoit-Wolf lemma.\n - float between 0 and 1: fixed shrinkage parameter.\n\n Shrinkage parameter is ignored if `covariance_estimator`\n is not None.\n\n covariance_estimator : estimator, default=None\n If not None, `covariance_estimator` is used to estimate\n the covariance matrices instead of relying on the empirical\n covariance estimator (with potential shrinkage).\n The object should have a fit method and a ``covariance_`` attribute\n like the estimators in :mod:`sklearn.covariance``.\n if None the shrinkage parameter drives the estimate.\n\n .. versionadded:: 0.24\n\n Returns\n -------\n s : ndarray of shape (n_features, n_features)\n Estimated covariance matrix.\n \"\"\"\n if covariance_estimator is None:\n shrinkage = \"empirical\" if shrinkage is None else shrinkage\n if isinstance(shrinkage, str):\n if shrinkage == \"auto\":\n sc = StandardScaler() # standardize features\n X = sc.fit_transform(X)\n s = ledoit_wolf(X)[0]\n # rescale\n s = sc.scale_[:, np.newaxis] * s * sc.scale_[np.newaxis, :]\n elif shrinkage == \"empirical\":\n s = empirical_covariance(X)\n else:\n raise ValueError(\"unknown shrinkage parameter\")\n elif isinstance(shrinkage, float) or isinstance(shrinkage, int):\n if shrinkage < 0 or shrinkage > 1:\n raise ValueError(\"shrinkage parameter must be between 0 and 1\")\n s = shrunk_covariance(empirical_covariance(X), shrinkage)\n else:\n raise TypeError(\"shrinkage must be a float or a string\")\n else:\n if shrinkage is not None and shrinkage != 0:\n raise ValueError(\n \"covariance_estimator and shrinkage parameters \"\n \"are not None. Only one of the two can be set.\"\n )\n covariance_estimator.fit(X)\n if not hasattr(covariance_estimator, \"covariance_\"):\n raise ValueError(\n \"%s does not have a covariance_ attribute\"\n % covariance_estimator.__class__.__name__\n )\n s = covariance_estimator.covariance_\n return s\n\n\ndef _class_means(X, y):\n \"\"\"Compute class means.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Input data.\n\n y : array-like of shape (n_samples,) or (n_samples, n_targets)\n Target values.\n\n Returns\n -------\n means : array-like of shape (n_classes, n_features)\n Class means.\n \"\"\"\n classes, y = np.unique(y, return_inverse=True)\n cnt = np.bincount(y)\n means = np.zeros(shape=(len(classes), X.shape[1]))\n np.add.at(means, y, X)\n means /= cnt[:, None]\n return means\n\n\ndef _class_cov(X, y, priors, shrinkage=None, covariance_estimator=None):\n \"\"\"Compute weighted within-class covariance matrix.\n\n The per-class covariance are weighted by the class priors.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Input data.\n\n y : array-like of shape (n_samples,) or (n_samples, n_targets)\n Target values.\n\n priors : array-like of shape (n_classes,)\n Class priors.\n\n shrinkage : 'auto' or float, default=None\n Shrinkage parameter, possible values:\n - None: no shrinkage (default).\n - 'auto': automatic shrinkage using the Ledoit-Wolf lemma.\n - float between 0 and 1: fixed shrinkage parameter.\n\n Shrinkage parameter is ignored if `covariance_estimator` is not None.\n\n covariance_estimator : estimator, default=None\n If not None, `covariance_estimator` is used to estimate\n the covariance matrices instead of relying the empirical\n covariance estimator (with potential shrinkage).\n The object should have a fit method and a ``covariance_`` attribute\n like the estimators in sklearn.covariance.\n If None, the shrinkage parameter drives the estimate.\n\n .. versionadded:: 0.24\n\n Returns\n -------\n cov : array-like of shape (n_features, n_features)\n Weighted within-class covariance matrix\n \"\"\"\n classes = np.unique(y)\n cov = np.zeros(shape=(X.shape[1], X.shape[1]))\n for idx, group in enumerate(classes):\n Xg = X[y == group, :]\n cov += priors[idx] * np.atleast_2d(_cov(Xg, shrinkage, covariance_estimator))\n return cov\n\n\nclass LinearDiscriminantAnalysis(\n LinearClassifierMixin, TransformerMixin, BaseEstimator\n):\n \"\"\"Linear Discriminant Analysis.\n\n A classifier with a linear decision boundary, generated by fitting class\n conditional densities to the data and using Bayes' rule.\n\n The model fits a Gaussian density to each class, assuming that all classes\n share the same covariance matrix.\n\n The fitted model can also be used to reduce the dimensionality of the input\n by projecting it to the most discriminative directions, using the\n `transform` method.\n\n .. versionadded:: 0.17\n *LinearDiscriminantAnalysis*.\n\n Read more in the :ref:`User Guide <lda_qda>`.\n\n Parameters\n ----------\n solver : {'svd', 'lsqr', 'eigen'}, default='svd'\n Solver to use, possible values:\n - 'svd': Singular value decomposition (default).\n Does not compute the covariance matrix, therefore this solver is\n recommended for data with a large number of features.\n - 'lsqr': Least squares solution.\n Can be combined with shrinkage or custom covariance estimator.\n - 'eigen': Eigenvalue decomposition.\n Can be combined with shrinkage or custom covariance estimator.\n\n shrinkage : 'auto' or float, default=None\n Shrinkage parameter, possible values:\n - None: no shrinkage (default).\n - 'auto': automatic shrinkage using the Ledoit-Wolf lemma.\n - float between 0 and 1: fixed shrinkage parameter.\n\n This should be left to None if `covariance_estimator` is used.\n Note that shrinkage works only with 'lsqr' and 'eigen' solvers.\n\n priors : array-like of shape (n_classes,), default=None\n The class prior probabilities. By default, the class proportions are\n inferred from the training data.\n\n n_components : int, default=None\n Number of components (<= min(n_classes - 1, n_features)) for\n dimensionality reduction. If None, will be set to\n min(n_classes - 1, n_features). This parameter only affects the\n `transform` method.\n\n store_covariance : bool, default=False\n If True, explicitly compute the weighted within-class covariance\n matrix when solver is 'svd'. The matrix is always computed\n and stored for the other solvers.\n\n .. versionadded:: 0.17\n\n tol : float, default=1.0e-4\n Absolute threshold for a singular value of X to be considered\n significant, used to estimate the rank of X. Dimensions whose\n singular values are non-significant are discarded. Only used if\n solver is 'svd'.\n\n .. versionadded:: 0.17\n\n covariance_estimator : covariance estimator, default=None\n If not None, `covariance_estimator` is used to estimate\n the covariance matrices instead of relying on the empirical\n covariance estimator (with potential shrinkage).\n The object should have a fit method and a ``covariance_`` attribute\n like the estimators in :mod:`sklearn.covariance`.\n if None the shrinkage parameter drives the estimate.\n\n This should be left to None if `shrinkage` is used.\n Note that `covariance_estimator` works only with 'lsqr' and 'eigen'\n solvers.\n\n .. versionadded:: 0.24\n\n Attributes\n ----------\n coef_ : ndarray of shape (n_features,) or (n_classes, n_features)\n Weight vector(s).\n\n intercept_ : ndarray of shape (n_classes,)\n Intercept term.\n\n covariance_ : array-like of shape (n_features, n_features)\n Weighted within-class covariance matrix. It corresponds to\n `sum_k prior_k * C_k` where `C_k` is the covariance matrix of the\n samples in class `k`. The `C_k` are estimated using the (potentially\n shrunk) biased estimator of covariance. If solver is 'svd', only\n exists when `store_covariance` is True.\n\n explained_variance_ratio_ : ndarray of shape (n_components,)\n Percentage of variance explained by each of the selected components.\n If ``n_components`` is not set then all components are stored and the\n sum of explained variances is equal to 1.0. Only available when eigen\n or svd solver is used.\n\n means_ : array-like of shape (n_classes, n_features)\n Class-wise means.\n\n priors_ : array-like of shape (n_classes,)\n Class priors (sum to 1).\n\n scalings_ : array-like of shape (rank, n_classes - 1)\n Scaling of the features in the space spanned by the class centroids.\n Only available for 'svd' and 'eigen' solvers.\n\n xbar_ : array-like of shape (n_features,)\n Overall mean. Only present if solver is 'svd'.\n\n classes_ : array-like of shape (n_classes,)\n Unique class labels.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\n Names of features seen during :term:`fit`. Defined only when `X`\n has feature names that are all strings.\n\n .. versionadded:: 1.0\n\n See Also\n --------\n QuadraticDiscriminantAnalysis : Quadratic Discriminant Analysis.\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn.discriminant_analysis import LinearDiscriminantAnalysis\n >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])\n >>> y = np.array([1, 1, 1, 2, 2, 2])\n >>> clf = LinearDiscriminantAnalysis()\n >>> clf.fit(X, y)\n LinearDiscriminantAnalysis()\n >>> print(clf.predict([[-0.8, -1]]))\n [1]\n \"\"\"\n\n def __init__(\n self,\n solver=\"svd\",\n shrinkage=None,\n priors=None,\n n_components=None,\n store_covariance=False,\n tol=1e-4,\n covariance_estimator=None,\n ):\n self.solver = solver\n self.shrinkage = shrinkage\n self.priors = priors\n self.n_components = n_components\n self.store_covariance = store_covariance # used only in svd solver\n self.tol = tol # used only in svd solver\n self.covariance_estimator = covariance_estimator\n\n def _solve_lsqr(self, X, y, shrinkage, covariance_estimator):\n \"\"\"Least squares solver.\n\n The least squares solver computes a straightforward solution of the\n optimal decision rule based directly on the discriminant functions. It\n can only be used for classification (with any covariance estimator),\n because\n estimation of eigenvectors is not performed. Therefore, dimensionality\n reduction with the transform is not supported.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data.\n\n y : array-like of shape (n_samples,) or (n_samples, n_classes)\n Target values.\n\n shrinkage : 'auto', float or None\n Shrinkage parameter, possible values:\n - None: no shrinkage.\n - 'auto': automatic shrinkage using the Ledoit-Wolf lemma.\n - float between 0 and 1: fixed shrinkage parameter.\n\n Shrinkage parameter is ignored if `covariance_estimator` i\n not None\n\n covariance_estimator : estimator, default=None\n If not None, `covariance_estimator` is used to estimate\n the covariance matrices instead of relying the empirical\n covariance estimator (with potential shrinkage).\n The object should have a fit method and a ``covariance_`` attribute\n like the estimators in sklearn.covariance.\n if None the shrinkage parameter drives the estimate.\n\n .. versionadded:: 0.24\n\n Notes\n -----\n This solver is based on [1]_, section 2.6.2, pp. 39-41.\n\n References\n ----------\n .. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification\n (Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN\n 0-471-05669-3.\n \"\"\"\n self.means_ = _class_means(X, y)\n self.covariance_ = _class_cov(\n X, y, self.priors_, shrinkage, covariance_estimator\n )\n self.coef_ = linalg.lstsq(self.covariance_, self.means_.T)[0].T\n self.intercept_ = -0.5 * np.diag(np.dot(self.means_, self.coef_.T)) + np.log(\n self.priors_\n )\n\n def _solve_eigen(self, X, y, shrinkage, covariance_estimator):\n \"\"\"Eigenvalue solver.\n\n The eigenvalue solver computes the optimal solution of the Rayleigh\n coefficient (basically the ratio of between class scatter to within\n class scatter). This solver supports both classification and\n dimensionality reduction (with any covariance estimator).\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data.\n\n y : array-like of shape (n_samples,) or (n_samples, n_targets)\n Target values.\n\n shrinkage : 'auto', float or None\n Shrinkage parameter, possible values:\n - None: no shrinkage.\n - 'auto': automatic shrinkage using the Ledoit-Wolf lemma.\n - float between 0 and 1: fixed shrinkage constant.\n\n Shrinkage parameter is ignored if `covariance_estimator` i\n not None\n\n covariance_estimator : estimator, default=None\n If not None, `covariance_estimator` is used to estimate\n the covariance matrices instead of relying the empirical\n covariance estimator (with potential shrinkage).\n The object should have a fit method and a ``covariance_`` attribute\n like the estimators in sklearn.covariance.\n if None the shrinkage parameter drives the estimate.\n\n .. versionadded:: 0.24\n\n Notes\n -----\n This solver is based on [1]_, section 3.8.3, pp. 121-124.\n\n References\n ----------\n .. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification\n (Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN\n 0-471-05669-3.\n \"\"\"\n self.means_ = _class_means(X, y)\n self.covariance_ = _class_cov(\n X, y, self.priors_, shrinkage, covariance_estimator\n )\n\n Sw = self.covariance_ # within scatter\n St = _cov(X, shrinkage, covariance_estimator) # total scatter\n Sb = St - Sw # between scatter\n\n evals, evecs = linalg.eigh(Sb, Sw)\n self.explained_variance_ratio_ = np.sort(evals / np.sum(evals))[::-1][\n : self._max_components\n ]\n evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors\n\n self.scalings_ = evecs\n self.coef_ = np.dot(self.means_, evecs).dot(evecs.T)\n self.intercept_ = -0.5 * np.diag(np.dot(self.means_, self.coef_.T)) + np.log(\n self.priors_\n )\n\n def _solve_svd(self, X, y):\n \"\"\"SVD solver.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data.\n\n y : array-like of shape (n_samples,) or (n_samples, n_targets)\n Target values.\n \"\"\"\n n_samples, n_features = X.shape\n n_classes = len(self.classes_)\n\n self.means_ = _class_means(X, y)\n if self.store_covariance:\n self.covariance_ = _class_cov(X, y, self.priors_)\n\n Xc = []\n for idx, group in enumerate(self.classes_):\n Xg = X[y == group, :]\n Xc.append(Xg - self.means_[idx])\n\n self.xbar_ = np.dot(self.priors_, self.means_)\n\n Xc = np.concatenate(Xc, axis=0)\n\n # 1) within (univariate) scaling by with classes std-dev\n std = Xc.std(axis=0)\n # avoid division by zero in normalization\n std[std == 0] = 1.0\n fac = 1.0 / (n_samples - n_classes)\n\n # 2) Within variance scaling\n X = np.sqrt(fac) * (Xc / std)\n # SVD of centered (within)scaled data\n U, S, Vt = linalg.svd(X, full_matrices=False)\n\n rank = np.sum(S > self.tol)\n # Scaling of within covariance is: V' 1/S\n scalings = (Vt[:rank] / std).T / S[:rank]\n\n # 3) Between variance scaling\n # Scale weighted centers\n X = np.dot(\n (\n (np.sqrt((n_samples * self.priors_) * fac))\n * (self.means_ - self.xbar_).T\n ).T,\n scalings,\n )\n # Centers are living in a space with n_classes-1 dim (maximum)\n # Use SVD to find projection in the space spanned by the\n # (n_classes) centers\n _, S, Vt = linalg.svd(X, full_matrices=0)\n\n if self._max_components == 0:\n self.explained_variance_ratio_ = np.empty((0,), dtype=S.dtype)\n else:\n self.explained_variance_ratio_ = (S ** 2 / np.sum(S ** 2))[\n : self._max_components\n ]\n\n rank = np.sum(S > self.tol * S[0])\n self.scalings_ = np.dot(scalings, Vt.T[:, :rank])\n coef = np.dot(self.means_ - self.xbar_, self.scalings_)\n self.intercept_ = -0.5 * np.sum(coef ** 2, axis=1) + np.log(self.priors_)\n self.coef_ = np.dot(coef, self.scalings_.T)\n self.intercept_ -= np.dot(self.xbar_, self.coef_.T)\n\n def fit(self, X, y):\n \"\"\"Fit the Linear Discriminant Analysis model.\n\n .. versionchanged:: 0.19\n *store_covariance* has been moved to main constructor.\n\n .. versionchanged:: 0.19\n *tol* has been moved to main constructor.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data.\n\n y : array-like of shape (n_samples,)\n Target values.\n\n Returns\n -------\n self : object\n Fitted estimator.\n \"\"\"\n X, y = self._validate_data(\n X, y, ensure_min_samples=2, dtype=[np.float64, np.float32]\n )\n self.classes_ = unique_labels(y)\n n_samples, _ = X.shape\n n_classes = len(self.classes_)\n\n if n_samples == n_classes:\n raise ValueError(\n \"The number of samples must be more than the number of classes.\"\n )\n\n if self.priors is None: # estimate priors from sample\n _, y_t = np.unique(y, return_inverse=True) # non-negative ints\n self.priors_ = np.bincount(y_t) / float(len(y))\n else:\n self.priors_ = np.asarray(self.priors)\n\n if (self.priors_ < 0).any():\n raise ValueError(\"priors must be non-negative\")\n if not np.isclose(self.priors_.sum(), 1.0):\n warnings.warn(\"The priors do not sum to 1. Renormalizing\", UserWarning)\n self.priors_ = self.priors_ / self.priors_.sum()\n\n # Maximum number of components no matter what n_components is\n # specified:\n max_components = min(len(self.classes_) - 1, X.shape[1])\n\n if self.n_components is None:\n self._max_components = max_components\n else:\n if self.n_components > max_components:\n raise ValueError(\n \"n_components cannot be larger than min(n_features, n_classes - 1).\"\n )\n self._max_components = self.n_components\n\n if self.solver == \"svd\":\n if self.shrinkage is not None:\n raise NotImplementedError(\"shrinkage not supported\")\n if self.covariance_estimator is not None:\n raise ValueError(\n \"covariance estimator \"\n \"is not supported \"\n \"with svd solver. Try another solver\"\n )\n self._solve_svd(X, y)\n elif self.solver == \"lsqr\":\n self._solve_lsqr(\n X,\n y,\n shrinkage=self.shrinkage,\n covariance_estimator=self.covariance_estimator,\n )\n elif self.solver == \"eigen\":\n self._solve_eigen(\n X,\n y,\n shrinkage=self.shrinkage,\n covariance_estimator=self.covariance_estimator,\n )\n else:\n raise ValueError(\n \"unknown solver {} (valid solvers are 'svd', \"\n \"'lsqr', and 'eigen').\".format(self.solver)\n )\n if self.classes_.size == 2: # treat binary case as a special case\n self.coef_ = np.array(\n self.coef_[1, :] - self.coef_[0, :], ndmin=2, dtype=X.dtype\n )\n self.intercept_ = np.array(\n self.intercept_[1] - self.intercept_[0], ndmin=1, dtype=X.dtype\n )\n return self\n\n def transform(self, X):\n \"\"\"Project data to maximize class separation.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Input data.\n\n Returns\n -------\n X_new : ndarray of shape (n_samples, n_components)\n Transformed data.\n \"\"\"\n if self.solver == \"lsqr\":\n raise NotImplementedError(\n \"transform not implemented for 'lsqr' solver (use 'svd' or 'eigen').\"\n )\n check_is_fitted(self)\n\n X = self._validate_data(X, reset=False)\n if self.solver == \"svd\":\n X_new = np.dot(X - self.xbar_, self.scalings_)\n elif self.solver == \"eigen\":\n X_new = np.dot(X, self.scalings_)\n\n return X_new[:, : self._max_components]\n\n def predict_proba(self, X):\n \"\"\"Estimate probability.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Input data.\n\n Returns\n -------\n C : ndarray of shape (n_samples, n_classes)\n Estimated probabilities.\n \"\"\"\n check_is_fitted(self)\n\n decision = self.decision_function(X)\n if self.classes_.size == 2:\n proba = expit(decision)\n return np.vstack([1 - proba, proba]).T\n else:\n return softmax(decision)\n\n def predict_log_proba(self, X):\n \"\"\"Estimate log probability.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Input data.\n\n Returns\n -------\n C : ndarray of shape (n_samples, n_classes)\n Estimated log probabilities.\n \"\"\"\n prediction = self.predict_proba(X)\n prediction[prediction == 0.0] += np.finfo(prediction.dtype).tiny\n return np.log(prediction)\n\n def decision_function(self, X):\n \"\"\"Apply decision function to an array of samples.\n\n The decision function is equal (up to a constant factor) to the\n log-posterior of the model, i.e. `log p(y = k | x)`. In a binary\n classification setting this instead corresponds to the difference\n `log p(y = 1 | x) - log p(y = 0 | x)`. See :ref:`lda_qda_math`.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Array of samples (test vectors).\n\n Returns\n -------\n C : ndarray of shape (n_samples,) or (n_samples, n_classes)\n Decision function values related to each class, per sample.\n In the two-class case, the shape is (n_samples,), giving the\n log likelihood ratio of the positive class.\n \"\"\"\n # Only override for the doc\n return super().decision_function(X)\n\n\nclass QuadraticDiscriminantAnalysis(ClassifierMixin, BaseEstimator):\n \"\"\"Quadratic Discriminant Analysis.\n\n A classifier with a quadratic decision boundary, generated\n by fitting class conditional densities to the data\n and using Bayes' rule.\n\n The model fits a Gaussian density to each class.\n\n .. versionadded:: 0.17\n *QuadraticDiscriminantAnalysis*\n\n Read more in the :ref:`User Guide <lda_qda>`.\n\n Parameters\n ----------\n priors : ndarray of shape (n_classes,), default=None\n Class priors. By default, the class proportions are inferred from the\n training data.\n\n reg_param : float, default=0.0\n Regularizes the per-class covariance estimates by transforming S2 as\n ``S2 = (1 - reg_param) * S2 + reg_param * np.eye(n_features)``,\n where S2 corresponds to the `scaling_` attribute of a given class.\n\n store_covariance : bool, default=False\n If True, the class covariance matrices are explicitly computed and\n stored in the `self.covariance_` attribute.\n\n .. versionadded:: 0.17\n\n tol : float, default=1.0e-4\n Absolute threshold for a singular value to be considered significant,\n used to estimate the rank of `Xk` where `Xk` is the centered matrix\n of samples in class k. This parameter does not affect the\n predictions. It only controls a warning that is raised when features\n are considered to be colinear.\n\n .. versionadded:: 0.17\n\n Attributes\n ----------\n covariance_ : list of len n_classes of ndarray \\\n of shape (n_features, n_features)\n For each class, gives the covariance matrix estimated using the\n samples of that class. The estimations are unbiased. Only present if\n `store_covariance` is True.\n\n means_ : array-like of shape (n_classes, n_features)\n Class-wise means.\n\n priors_ : array-like of shape (n_classes,)\n Class priors (sum to 1).\n\n rotations_ : list of len n_classes of ndarray of shape (n_features, n_k)\n For each class k an array of shape (n_features, n_k), where\n ``n_k = min(n_features, number of elements in class k)``\n It is the rotation of the Gaussian distribution, i.e. its\n principal axis. It corresponds to `V`, the matrix of eigenvectors\n coming from the SVD of `Xk = U S Vt` where `Xk` is the centered\n matrix of samples from class k.\n\n scalings_ : list of len n_classes of ndarray of shape (n_k,)\n For each class, contains the scaling of\n the Gaussian distributions along its principal axes, i.e. the\n variance in the rotated coordinate system. It corresponds to `S^2 /\n (n_samples - 1)`, where `S` is the diagonal matrix of singular values\n from the SVD of `Xk`, where `Xk` is the centered matrix of samples\n from class k.\n\n classes_ : ndarray of shape (n_classes,)\n Unique class labels.\n\n n_features_in_ : int\n Number of features seen during :term:`fit`.\n\n .. versionadded:: 0.24\n\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\n Names of features seen during :term:`fit`. Defined only when `X`\n has feature names that are all strings.\n\n .. versionadded:: 1.0\n\n See Also\n --------\n LinearDiscriminantAnalysis : Linear Discriminant Analysis.\n\n Examples\n --------\n >>> from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis\n >>> import numpy as np\n >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])\n >>> y = np.array([1, 1, 1, 2, 2, 2])\n >>> clf = QuadraticDiscriminantAnalysis()\n >>> clf.fit(X, y)\n QuadraticDiscriminantAnalysis()\n >>> print(clf.predict([[-0.8, -1]]))\n [1]\n \"\"\"\n\n def __init__(\n self, *, priors=None, reg_param=0.0, store_covariance=False, tol=1.0e-4\n ):\n self.priors = np.asarray(priors) if priors is not None else None\n self.reg_param = reg_param\n self.store_covariance = store_covariance\n self.tol = tol\n\n def fit(self, X, y):\n \"\"\"Fit the model according to the given training data and parameters.\n\n .. versionchanged:: 0.19\n ``store_covariances`` has been moved to main constructor as\n ``store_covariance``\n\n .. versionchanged:: 0.19\n ``tol`` has been moved to main constructor.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training vector, where `n_samples` is the number of samples and\n `n_features` is the number of features.\n\n y : array-like of shape (n_samples,)\n Target values (integers).\n\n Returns\n -------\n self : object\n Fitted estimator.\n \"\"\"\n X, y = self._validate_data(X, y)\n check_classification_targets(y)\n self.classes_, y = np.unique(y, return_inverse=True)\n n_samples, n_features = X.shape\n n_classes = len(self.classes_)\n if n_classes < 2:\n raise ValueError(\n \"The number of classes has to be greater than one; got %d class\"\n % (n_classes)\n )\n if self.priors is None:\n self.priors_ = np.bincount(y) / float(n_samples)\n else:\n self.priors_ = self.priors\n\n cov = None\n store_covariance = self.store_covariance\n if store_covariance:\n cov = []\n means = []\n scalings = []\n rotations = []\n for ind in range(n_classes):\n Xg = X[y == ind, :]\n meang = Xg.mean(0)\n means.append(meang)\n if len(Xg) == 1:\n raise ValueError(\n \"y has only 1 sample in class %s, covariance is ill defined.\"\n % str(self.classes_[ind])\n )\n Xgc = Xg - meang\n # Xgc = U * S * V.T\n _, S, Vt = np.linalg.svd(Xgc, full_matrices=False)\n rank = np.sum(S > self.tol)\n if rank < n_features:\n warnings.warn(\"Variables are collinear\")\n S2 = (S ** 2) / (len(Xg) - 1)\n S2 = ((1 - self.reg_param) * S2) + self.reg_param\n if self.store_covariance or store_covariance:\n # cov = V * (S^2 / (n-1)) * V.T\n cov.append(np.dot(S2 * Vt.T, Vt))\n scalings.append(S2)\n rotations.append(Vt.T)\n if self.store_covariance or store_covariance:\n self.covariance_ = cov\n self.means_ = np.asarray(means)\n self.scalings_ = scalings\n self.rotations_ = rotations\n return self\n\n def _decision_function(self, X):\n # return log posterior, see eq (4.12) p. 110 of the ESL.\n check_is_fitted(self)\n\n X = self._validate_data(X, reset=False)\n norm2 = []\n for i in range(len(self.classes_)):\n R = self.rotations_[i]\n S = self.scalings_[i]\n Xm = X - self.means_[i]\n X2 = np.dot(Xm, R * (S ** (-0.5)))\n norm2.append(np.sum(X2 ** 2, axis=1))\n norm2 = np.array(norm2).T # shape = [len(X), n_classes]\n u = np.asarray([np.sum(np.log(s)) for s in self.scalings_])\n return -0.5 * (norm2 + u) + np.log(self.priors_)\n\n def decision_function(self, X):\n \"\"\"Apply decision function to an array of samples.\n\n The decision function is equal (up to a constant factor) to the\n log-posterior of the model, i.e. `log p(y = k | x)`. In a binary\n classification setting this instead corresponds to the difference\n `log p(y = 1 | x) - log p(y = 0 | x)`. See :ref:`lda_qda_math`.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Array of samples (test vectors).\n\n Returns\n -------\n C : ndarray of shape (n_samples,) or (n_samples, n_classes)\n Decision function values related to each class, per sample.\n In the two-class case, the shape is (n_samples,), giving the\n log likelihood ratio of the positive class.\n \"\"\"\n dec_func = self._decision_function(X)\n # handle special case of two classes\n if len(self.classes_) == 2:\n return dec_func[:, 1] - dec_func[:, 0]\n return dec_func\n\n def predict(self, X):\n \"\"\"Perform classification on an array of test vectors X.\n\n The predicted class C for each sample in X is returned.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Vector to be scored, where `n_samples` is the number of samples and\n `n_features` is the number of features.\n\n Returns\n -------\n C : ndarray of shape (n_samples,)\n Estimated probabilities.\n \"\"\"\n d = self._decision_function(X)\n y_pred = self.classes_.take(d.argmax(1))\n return y_pred\n\n def predict_proba(self, X):\n \"\"\"Return posterior probabilities of classification.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Array of samples/test vectors.\n\n Returns\n -------\n C : ndarray of shape (n_samples, n_classes)\n Posterior probabilities of classification per class.\n \"\"\"\n values = self._decision_function(X)\n # compute the likelihood of the underlying gaussian models\n # up to a multiplicative constant.\n likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])\n # compute posterior probabilities\n return likelihood / likelihood.sum(axis=1)[:, np.newaxis]\n\n def predict_log_proba(self, X):\n \"\"\"Return log of posterior probabilities of classification.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Array of samples/test vectors.\n\n Returns\n -------\n C : ndarray of shape (n_samples, n_classes)\n Posterior log-probabilities of classification per class.\n \"\"\"\n # XXX : can do better to avoid precision overflows\n probas_ = self.predict_proba(X)\n return np.log(probas_)\n", "\"\"\"Modified Olivetti faces dataset.\n\nThe original database was available from (now defunct)\n\n https://www.cl.cam.ac.uk/research/dtg/attarchive/facedatabase.html\n\nThe version retrieved here comes in MATLAB format from the personal\nweb page of Sam Roweis:\n\n https://cs.nyu.edu/~roweis/\n\"\"\"\n\n# Copyright (c) 2011 David Warde-Farley <wardefar at iro dot umontreal dot ca>\n# License: BSD 3 clause\n\nfrom os.path import exists\nfrom os import makedirs, remove\n\nimport numpy as np\nfrom scipy.io.matlab import loadmat\nimport joblib\n\nfrom . import get_data_home\nfrom ._base import _fetch_remote\nfrom ._base import RemoteFileMetadata\nfrom ._base import _pkl_filepath\nfrom ._base import load_descr\nfrom ..utils import check_random_state, Bunch\n\n# The original data can be found at:\n# https://cs.nyu.edu/~roweis/data/olivettifaces.mat\nFACES = RemoteFileMetadata(\n filename=\"olivettifaces.mat\",\n url=\"https://ndownloader.figshare.com/files/5976027\",\n checksum=\"b612fb967f2dc77c9c62d3e1266e0c73d5fca46a4b8906c18e454d41af987794\",\n)\n\n\ndef fetch_olivetti_faces(\n *,\n data_home=None,\n shuffle=False,\n random_state=0,\n download_if_missing=True,\n return_X_y=False,\n):\n \"\"\"Load the Olivetti faces data-set from AT&T (classification).\n\n Download it if necessary.\n\n ================= =====================\n Classes 40\n Samples total 400\n Dimensionality 4096\n Features real, between 0 and 1\n ================= =====================\n\n Read more in the :ref:`User Guide <olivetti_faces_dataset>`.\n\n Parameters\n ----------\n data_home : str, default=None\n Specify another download and cache folder for the datasets. By default\n all scikit-learn data is stored in '~/scikit_learn_data' subfolders.\n\n shuffle : bool, default=False\n If True the order of the dataset is shuffled to avoid having\n images of the same person grouped.\n\n random_state : int, RandomState instance or None, default=0\n Determines random number generation for dataset shuffling. Pass an int\n for reproducible output across multiple function calls.\n See :term:`Glossary <random_state>`.\n\n download_if_missing : bool, default=True\n If False, raise a IOError if the data is not locally available\n instead of trying to download the data from the source site.\n\n return_X_y : bool, default=False\n If True, returns `(data, target)` instead of a `Bunch` object. See\n below for more information about the `data` and `target` object.\n\n .. versionadded:: 0.22\n\n Returns\n -------\n data : :class:`~sklearn.utils.Bunch`\n Dictionary-like object, with the following attributes.\n\n data: ndarray, shape (400, 4096)\n Each row corresponds to a ravelled\n face image of original size 64 x 64 pixels.\n images : ndarray, shape (400, 64, 64)\n Each row is a face image\n corresponding to one of the 40 subjects of the dataset.\n target : ndarray, shape (400,)\n Labels associated to each face image.\n Those labels are ranging from 0-39 and correspond to the\n Subject IDs.\n DESCR : str\n Description of the modified Olivetti Faces Dataset.\n\n (data, target) : tuple if `return_X_y=True`\n .. versionadded:: 0.22\n \"\"\"\n data_home = get_data_home(data_home=data_home)\n if not exists(data_home):\n makedirs(data_home)\n filepath = _pkl_filepath(data_home, \"olivetti.pkz\")\n if not exists(filepath):\n if not download_if_missing:\n raise IOError(\"Data not found and `download_if_missing` is False\")\n\n print(\"downloading Olivetti faces from %s to %s\" % (FACES.url, data_home))\n mat_path = _fetch_remote(FACES, dirname=data_home)\n mfile = loadmat(file_name=mat_path)\n # delete raw .mat data\n remove(mat_path)\n\n faces = mfile[\"faces\"].T.copy()\n joblib.dump(faces, filepath, compress=6)\n del mfile\n else:\n faces = joblib.load(filepath)\n\n # We want floating point data, but float32 is enough (there is only\n # one byte of precision in the original uint8s anyway)\n faces = np.float32(faces)\n faces = faces - faces.min()\n faces /= faces.max()\n faces = faces.reshape((400, 64, 64)).transpose(0, 2, 1)\n # 10 images per class, 400 images total, each class is contiguous.\n target = np.array([i // 10 for i in range(400)])\n if shuffle:\n random_state = check_random_state(random_state)\n order = random_state.permutation(len(faces))\n faces = faces[order]\n target = target[order]\n faces_vectorized = faces.reshape(len(faces), -1)\n\n fdescr = load_descr(\"olivetti_faces.rst\")\n\n if return_X_y:\n return faces_vectorized, target\n\n return Bunch(data=faces_vectorized, images=faces, target=target, DESCR=fdescr)\n", "r\"\"\"\n=====================================\nMulti-class AdaBoosted Decision Trees\n=====================================\n\nThis example reproduces Figure 1 of Zhu et al [1]_ and shows how boosting can\nimprove prediction accuracy on a multi-class problem. The classification\ndataset is constructed by taking a ten-dimensional standard normal distribution\nand defining three classes separated by nested concentric ten-dimensional\nspheres such that roughly equal numbers of samples are in each class (quantiles\nof the :math:`\\chi^2` distribution).\n\nThe performance of the SAMME and SAMME.R [1]_ algorithms are compared. SAMME.R\nuses the probability estimates to update the additive model, while SAMME uses\nthe classifications only. As the example illustrates, the SAMME.R algorithm\ntypically converges faster than SAMME, achieving a lower test error with fewer\nboosting iterations. The error of each algorithm on the test set after each\nboosting iteration is shown on the left, the classification error on the test\nset of each tree is shown in the middle, and the boost weight of each tree is\nshown on the right. All trees have a weight of one in the SAMME.R algorithm and\ntherefore are not shown.\n\n.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, \"Multi-class AdaBoost\", 2009.\n\n\"\"\"\n\n# Author: Noel Dawe <[email protected]>\n#\n# License: BSD 3 clause\n\nimport matplotlib.pyplot as plt\n\nfrom sklearn.datasets import make_gaussian_quantiles\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.tree import DecisionTreeClassifier\n\n\nX, y = make_gaussian_quantiles(\n n_samples=13000, n_features=10, n_classes=3, random_state=1\n)\n\nn_split = 3000\n\nX_train, X_test = X[:n_split], X[n_split:]\ny_train, y_test = y[:n_split], y[n_split:]\n\nbdt_real = AdaBoostClassifier(\n DecisionTreeClassifier(max_depth=2), n_estimators=600, learning_rate=1\n)\n\nbdt_discrete = AdaBoostClassifier(\n DecisionTreeClassifier(max_depth=2),\n n_estimators=600,\n learning_rate=1.5,\n algorithm=\"SAMME\",\n)\n\nbdt_real.fit(X_train, y_train)\nbdt_discrete.fit(X_train, y_train)\n\nreal_test_errors = []\ndiscrete_test_errors = []\n\nfor real_test_predict, discrete_train_predict in zip(\n bdt_real.staged_predict(X_test), bdt_discrete.staged_predict(X_test)\n):\n real_test_errors.append(1.0 - accuracy_score(real_test_predict, y_test))\n discrete_test_errors.append(1.0 - accuracy_score(discrete_train_predict, y_test))\n\nn_trees_discrete = len(bdt_discrete)\nn_trees_real = len(bdt_real)\n\n# Boosting might terminate early, but the following arrays are always\n# n_estimators long. We crop them to the actual number of trees here:\ndiscrete_estimator_errors = bdt_discrete.estimator_errors_[:n_trees_discrete]\nreal_estimator_errors = bdt_real.estimator_errors_[:n_trees_real]\ndiscrete_estimator_weights = bdt_discrete.estimator_weights_[:n_trees_discrete]\n\nplt.figure(figsize=(15, 5))\n\nplt.subplot(131)\nplt.plot(range(1, n_trees_discrete + 1), discrete_test_errors, c=\"black\", label=\"SAMME\")\nplt.plot(\n range(1, n_trees_real + 1),\n real_test_errors,\n c=\"black\",\n linestyle=\"dashed\",\n label=\"SAMME.R\",\n)\nplt.legend()\nplt.ylim(0.18, 0.62)\nplt.ylabel(\"Test Error\")\nplt.xlabel(\"Number of Trees\")\n\nplt.subplot(132)\nplt.plot(\n range(1, n_trees_discrete + 1),\n discrete_estimator_errors,\n \"b\",\n label=\"SAMME\",\n alpha=0.5,\n)\nplt.plot(\n range(1, n_trees_real + 1), real_estimator_errors, \"r\", label=\"SAMME.R\", alpha=0.5\n)\nplt.legend()\nplt.ylabel(\"Error\")\nplt.xlabel(\"Number of Trees\")\nplt.ylim((0.2, max(real_estimator_errors.max(), discrete_estimator_errors.max()) * 1.2))\nplt.xlim((-20, len(bdt_discrete) + 20))\n\nplt.subplot(133)\nplt.plot(range(1, n_trees_discrete + 1), discrete_estimator_weights, \"b\", label=\"SAMME\")\nplt.legend()\nplt.ylabel(\"Weight\")\nplt.xlabel(\"Number of Trees\")\nplt.ylim((0, discrete_estimator_weights.max() * 1.2))\nplt.xlim((-20, n_trees_discrete + 20))\n\n# prevent overlapping y-axis labels\nplt.subplots_adjust(wspace=0.25)\nplt.show()\n" ]
[ [ "sklearn.utils.fixes.parse_version", "sklearn.get_config", "sklearn.utils.fixes.delayed", "sklearn.config_context", "sklearn.set_config" ], [ "numpy.dot", "numpy.eye", "scipy.linalg.inv", "numpy.sqrt", "numpy.maximum" ], [ "sklearn.datasets.fetch_openml", "sklearn.multioutput.ClassifierChain", "sklearn.multiclass.OneVsRestClassifier", "matplotlib.pyplot.subplots", "sklearn.linear_model.LogisticRegression", "matplotlib.pyplot.tight_layout", "sklearn.metrics.jaccard_score", "sklearn.model_selection.train_test_split", "matplotlib.pyplot.show" ], [ "sklearn.datasets.load_iris", "numpy.finfo", "numpy.tril_indices", "scipy.sparse.dok_matrix", "sklearn.metrics.pairwise_distances", "sklearn.metrics.cluster.silhouette_samples", "numpy.arange", "numpy.sqrt", "scipy.sparse.csr_matrix", "numpy.array", "numpy.zeros", "sklearn.metrics.cluster.davies_bouldin_score", "sklearn.metrics.cluster.calinski_harabasz_score", "numpy.isnan", "numpy.random.RandomState", "sklearn.utils._testing.assert_array_equal", "numpy.ones", "sklearn.metrics.cluster.silhouette_score", "scipy.sparse.lil_matrix", "numpy.unique" ], [ "scipy.sparse.issparse", "scipy.sparse.isspmatrix", "numpy.logical_or", "numpy.zeros", "scipy.sparse.csgraph.connected_components", "scipy.sparse.linalg.eigsh", "scipy.linalg.eigh", "scipy.sparse.eye", "numpy.where", "numpy.unique", "scipy.sparse.csgraph.laplacian" ], [ "numpy.testing.assert_allclose", "numpy.exp", "sklearn.datasets.make_regression", "sklearn.ensemble.HistGradientBoostingRegressor", "sklearn.preprocessing.MinMaxScaler", "sklearn.model_selection.cross_val_score", "numpy.max", "numpy.concatenate", "sklearn.ensemble._hist_gradient_boosting.grower.TreeGrower", "sklearn.base.is_regressor", "numpy.arange", "sklearn.ensemble._hist_gradient_boosting.loss.BinaryCrossEntropy", "sklearn.base.clone", "sklearn.datasets.make_classification", "numpy.array", "numpy.zeros", "sklearn.datasets.make_low_rank_matrix", "numpy.percentile", "sklearn.ensemble._hist_gradient_boosting.loss.LeastSquares", "sklearn.model_selection.train_test_split", "sklearn.utils._openmp_helpers._openmp_effective_n_threads", "sklearn.preprocessing.OneHotEncoder", "sklearn.ensemble._hist_gradient_boosting.binning._BinMapper", "sklearn.ensemble.HistGradientBoostingClassifier", "numpy.isnan", "numpy.asarray", "numpy.random.RandomState", "numpy.testing.assert_array_equal", "numpy.ones", "sklearn.dummy.DummyRegressor", "sklearn.preprocessing.KBinsDiscretizer", "numpy.all", "numpy.unique" ], [ "numpy.concatenate", "numpy.asarray", "numpy.log", "numpy.any", "numpy.sqrt", "numpy.size", "scipy.sparse.csr_matrix" ], [ "numpy.dot", "scipy.linalg.eigh", "scipy.linalg.svd", "numpy.finfo", "numpy.bincount", "numpy.concatenate", "numpy.empty", "numpy.log", "scipy.linalg.lstsq", "numpy.sqrt", "numpy.vstack", "numpy.array", "numpy.zeros", "scipy.special.expit", "numpy.linalg.svd", "numpy.argsort", "numpy.asarray", "numpy.sum", "numpy.add.at", "numpy.unique" ], [ "scipy.io.matlab.loadmat", "numpy.float32" ], [ "sklearn.datasets.make_gaussian_quantiles", "matplotlib.pyplot.xlim", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.ylim", "matplotlib.pyplot.legend", "matplotlib.pyplot.figure", "sklearn.metrics.accuracy_score", "matplotlib.pyplot.ylabel", "sklearn.tree.DecisionTreeClassifier", "matplotlib.pyplot.show", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.subplot" ] ]
davidpneal/adventofcode
[ "f31b5132462b44aeadfdbcffe75f25215961a9ae" ]
[ "2018/day11/day11p2.py" ]
[ "#12/24/2018\n#Find the square which has the largest total power, the square can be anywhere from 1x1 to 300x300\n\n#The package numpy has some tools that can help with the multidimensional arrays and creating the summed area table\n#Note that numpy uses matrix indexing (i,j / row,col) vs cartesian indexing (x,y) --> if the matrix is printed out, it will be \"rotated\"\nimport numpy as np\n\n\n#Puzzle input\nserialNum = 9995\n\n\n\ndef calcCellPower(x,y,serialNum):\n\trackID = x + 10\n\tvalue = ((y * (x + 10)) + serialNum) * rackID\n\n\t#Keep the hundredths digit\n\ts = str(value) \n\thundreds = int(s[len(s)-3])\n\n\tpowerLevel = hundreds - 5\n\n\treturn powerLevel\n\n\n\ndef calcAreaPower(x,y,s):\n\t#Need to bound these params - if we are on the edge (ie, 0) will index outside the table!\n\t#This method will probably cause issues if the solution is near the edge of the grid, but works for the problem here\n\tif x == 0:\n\t\tx = 1\n\t\n\tif y == 0:\n\t\ty = 1\n\t\n\t#Must subtract 1 from the size (s) since the grid size is inclusive; ie, if the grid is 3x3, adding 3 would check a grid that is 4x4\n\tpower = sumTable[x+(s-1)][y+(s-1)] + sumTable[x-1][y-1] - sumTable[x-1][y+(s-1)] - sumTable[x+(s-1)][y-1]\n\treturn power\t\n\n\n\n#Create fuel grid: 300x300, use ints (defaults to float)\nfuelGrid = np.zeros(shape=(300,300),dtype=int)\n\n#Populate the values in the fuelGrid\nfor x in range(300):\n\tfor y in range(300):\n\t\tfuelGrid[x][y] = calcCellPower(x+1, y+1, serialNum)\n\n#Calculate summed area table\nsumTable = fuelGrid.cumsum(axis=0).cumsum(axis=1)\n\n\n#Find the square with the highest power rating, it is identified by the x,y coordinate in the upper left corner\nmax = 0\nfor s in range(299):\n\tfor x in range(300-s):\n\t\tfor y in range(300-s):\n\t\t\tsum = calcAreaPower(x,y,s)\n\t\t\tif sum > max:\n\t\t\t\tmax = sum\n\t\t\t\t#Add one to the answer since the matrix starts from 0\n\t\t\t\tloc = x+1,y+1,s\n\t\t\t\t##print(\"new max:\",max,loc)\n\t\t\nprint(\"Largest total power:\",loc)\nprint(\"Total power:\",max)\n\n\n\n''' MISC\n\nCorrect answer: 233,116,15 \n\n\n#print a partial grid\nfor x in range(10):\n\tprint(fuelGrid[x][:10])\n\t\n\t\n\t\n'''" ]
[ [ "numpy.zeros" ] ]
ddempsey/python_for_geoscientists
[ "428e2eaeb869f8478a3517d01a5fdff6de30e7d2" ]
[ "2_visualisation/mesh_plot.py" ]
[ "# import tools for 3D axes\nfrom matplotlib import pyplot as plt \nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm \nimport numpy as np \n\n# create a grid\nxg = np.linspace(0,1,31) # evenly spaced grid points\nyg = np.linspace(0,1,31)\nymin,ymax = [0.15,0.85] # create a smaller subgrid in the y-dir for coloring\ni1 = np.argmin(abs(yg-ymin))\ni2 = np.argmin(abs(yg-ymax))\nyg2 = yg[i1:i2+1] # subsample y coords\n[X,Y] = np.meshgrid(xg,yg) # create the two mesh grids\n[X2,Y2] = np.meshgrid(xg,yg2)\n\n# create a custom surface\n # parameters\nxm = np.mean(xg)*0.8\nym = np.mean(yg)*1.2\nsx = 0.02*3.\nsy = 0.04*3.\n # function defining the surface in terms of x, y and parameters\ndef r(X,Y): \n return (5-np.exp(-((X-xm)**2/sx+(Y-ym)**2/sy)))*(1-(X/4)**2)*(1+(Y/4)**2)\n\n# create a figure with a 3D projection\nfig = plt.figure(figsize=[15,8])\nax = fig.add_subplot(111, projection='3d')\n\n# plot the function as a wireframe over the large grid\nax.plot_wireframe(X, Y, r(X,Y), lw = 0.5, color = 'k')\n # shade part of the wireframe according to the function value\nCS = ax.plot_surface(X2, Y2, r(X2,Y2), rstride=1, cstride=1,cmap=cm.Oranges, lw = 0.5)\nplt.colorbar(CS, ax=ax)\n\n# display the interactive figure to the screen\nplt.show()" ]
[ [ "matplotlib.pyplot.colorbar", "numpy.exp", "numpy.mean", "matplotlib.pyplot.figure", "matplotlib.pyplot.show", "numpy.linspace", "numpy.meshgrid" ] ]
ing-bank/popmon
[ "729d61a4bfe45715d3970326d28b70b09d7fc13a" ]
[ "popmon/pipeline/report.py" ]
[ "# Copyright (c) 2021 ING Wholesale Banking Advanced Analytics\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of\n# this software and associated documentation files (the \"Software\"), to deal in\n# the Software without restriction, including without limitation the rights to\n# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\n# the Software, and to permit persons to whom the Software is furnished to do so,\n# subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\n# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\n# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\nimport logging\n\nimport pandas as pd\nfrom histogrammar.dfinterface.make_histograms import (\n get_bin_specs,\n get_time_axes,\n make_histograms,\n)\n\nfrom ..base import Module\nfrom ..config import config\nfrom ..pipeline.report_pipelines import (\n ReportPipe,\n expanding_reference,\n external_reference,\n rolling_reference,\n self_reference,\n)\nfrom ..resources import templates_env\n\nlogging.basicConfig(\n level=logging.INFO, format=\"%(asctime)s %(levelname)s [%(module)s]: %(message)s\"\n)\nlogger = logging.getLogger()\n\n_report_pipeline = {\n \"self\": self_reference,\n \"external\": external_reference,\n \"rolling\": rolling_reference,\n \"expanding\": expanding_reference,\n}\n\n\ndef stability_report(\n hists,\n reference_type=\"self\",\n reference=None,\n time_axis=\"\",\n window=10,\n shift=1,\n monitoring_rules=None,\n pull_rules=None,\n features=None,\n skip_empty_plots=True,\n last_n=0,\n plot_hist_n=2,\n report_filepath=None,\n extended_report=True,\n show_stats=config[\"limited_stats\"],\n **kwargs,\n):\n \"\"\"Create a data stability monitoring html report for given dict of input histograms.\n\n :param dict hists: input histograms to be profiled and monitored over time.\n :param reference_type: type or reference used for comparisons. Options [self, external, rolling, expanding].\n default is 'self'.\n :param reference: histograms used as reference. default is None\n :param str time_axis: name of datetime feature, used as time axis, eg 'date'. auto-guessed when not provided.\n :param int window: size of rolling window and/or trend detection. default is 10.\n :param int shift: shift of time-bins in rolling/expanding window. default is 1.\n :param dict monitoring_rules: monitoring rules to generate traffic light alerts.\n The default setting is:\n\n .. code-block:: python\n\n monitoring_rules = {\"*_pull\": [7, 4, -4, -7],\n \"*_zscore\": [7, 4, -4, -7],\n \"[!p]*_unknown_labels\": [0.5, 0.5, 0, 0]}\n\n Note that the (filename based) wildcards such as * apply to all statistic names matching that pattern.\n For example, ``\"*_pull\"`` applies for all features to all statistics ending on \"_pull\".\n You can also specify rules for specific features and/or statistics by leaving out wildcard and putting the\n feature name in front. E.g.\n\n .. code-block:: python\n\n monitoring_rules = {\"featureA:*_pull\": [5, 3, -3, -5],\n \"featureA:nan\": [4, 1, 0, 0],\n \"*_pull\": [7, 4, -4, -7],\n \"nan\": [8, 1, 0, 0]}\n\n In case of multiple rules could apply for a feature's statistic, the most specific one applies.\n So in case of the statistic \"nan\": \"featureA:nan\" is used for \"featureA\", and the other \"nan\" rule\n for all other features.\n :param dict pull_rules: red and yellow (possibly dynamic) boundaries shown in plots in the report.\n Default is:\n\n .. code-block:: python\n\n pull_rules = {\"*_pull\": [7, 4, -4, -7]}\n\n This means that the shown yellow boundaries are at -4, +4 standard deviations around the (reference) mean,\n and the shown red boundaries are at -7, +7 standard deviations around the (reference) mean.\n Note that the (filename based) wildcards such as * apply to all statistic names matching that pattern.\n (The same string logic applies as for monitoring_rules.)\n :param list features: histograms to pick up from the 'hists' dictionary (default is all keys)\n :param bool skip_empty_plots: if false, also show empty plots in report with only nans or zeroes (optional)\n :param int last_n: plot statistic data for last 'n' periods (optional)\n :param int plot_hist_n: plot histograms for last 'n' periods. default is 2 (optional)\n :param str report_filepath: the file path where to output the report (optional)\n :param bool extended_report: if True, show all the generated statistics in the report (optional)\n :param list show_stats: list of statistic name patterns to show in the report. If None, show all (optional)\n :param kwargs: residual keyword arguments passed on to report pipeline.\n :return: dict with results of reporting pipeline\n \"\"\"\n # perform basic input checks\n reference_types = list(_report_pipeline.keys())\n if reference_type not in reference_types:\n raise ValueError(f\"reference_type should be one of {str(reference_types)}.\")\n if not isinstance(hists, dict):\n raise TypeError(\"hists should be a dict of histogrammar histograms.\")\n if reference_type == \"external\" and not isinstance(reference, dict):\n raise TypeError(\"reference should be a dict of histogrammar histograms.\")\n if not isinstance(monitoring_rules, dict):\n monitoring_rules = {\n \"*_pull\": [7, 4, -4, -7],\n \"*_zscore\": [7, 4, -4, -7],\n \"[!p]*_unknown_labels\": [0.5, 0.5, 0, 0],\n }\n if not isinstance(pull_rules, dict):\n pull_rules = {\"*_pull\": [7, 4, -4, -7]}\n\n if (isinstance(time_axis, str) and len(time_axis) == 0) or (\n isinstance(time_axis, bool) and time_axis\n ):\n # auto guess the time_axis: find the most frequent first column name in the histograms list\n first_cols = [k.split(\":\")[0] for k in list(hists.keys())]\n time_axis = max(set(first_cols), key=first_cols.count)\n\n # if limited report is selected, check if stats list is provided, if not, get a default minimal list\n show_stats = show_stats if not extended_report else None\n\n # configuration and datastore for report pipeline\n cfg = {\n \"hists_key\": \"hists\",\n \"ref_hists_key\": \"ref_hists\",\n \"time_axis\": time_axis,\n \"window\": window,\n \"shift\": shift,\n \"monitoring_rules\": monitoring_rules,\n \"pull_rules\": pull_rules,\n \"features\": features,\n \"skip_empty_plots\": skip_empty_plots,\n \"last_n\": last_n,\n \"plot_hist_n\": plot_hist_n,\n \"report_filepath\": report_filepath,\n \"show_stats\": show_stats,\n **kwargs,\n }\n\n datastore = {\"hists\": hists}\n if reference_type == \"external\":\n datastore[\"ref_hists\"] = reference\n\n # execute reporting pipeline\n pipeline = _report_pipeline[reference_type](**cfg)\n stability_report = StabilityReport()\n stability_report.transform(pipeline.transform(datastore))\n return stability_report\n\n\ndef df_stability_report(\n df,\n time_axis,\n features=None,\n binning=\"auto\",\n bin_specs=None,\n time_width=None,\n time_offset=0,\n var_dtype=None,\n reference_type=\"self\",\n reference=None,\n window=10,\n shift=1,\n monitoring_rules=None,\n pull_rules=None,\n skip_empty_plots=True,\n last_n=0,\n plot_hist_n=2,\n report_filepath=None,\n extended_report=True,\n show_stats=config[\"limited_stats\"],\n **kwargs,\n):\n \"\"\"Create a data stability monitoring html report for given pandas or spark dataframe.\n\n :param df: input pandas/spark dataframe to be profiled and monitored over time.\n :param str time_axis: name of datetime feature, used as time axis, eg 'date'. if True, will be auto-guessed.\n If time_axis is set or found, and if no features provided, features becomes: ['date:x', 'date:y', 'date:z'] etc.\n :param list features: columns to pick up from input data. (default is all features).\n For multi-dimensional histograms, separate the column names with a ':'. Example features list is:\n\n .. code-block:: python\n\n features = ['x', 'date', 'date:x', 'date:y', 'date:x:y']\n\n :param str binning: default binning to revert to in case bin_specs not supplied. options are:\n \"unit\" or \"auto\", default is \"auto\". When using \"auto\", semi-clever binning is automatically done.\n :param dict bin_specs: dictionaries used for rebinning numeric or timestamp features.\n An example bin_specs dictionary is:\n\n .. code-block:: python\n\n bin_specs = {'x': {'bin_width': 1, 'bin_offset': 0},\n 'y': {'num': 10, 'low': 0.0, 'high': 2.0},\n 'x:y': [{}, {'num': 5, 'low': 0.0, 'high': 1.0}]}\n\n In the bin specs for x:y, x is not provided (here) and reverts to the 1-dim setting.\n The 'bin_width', 'bin_offset' notation makes an open-ended histogram (for that feature) with given bin width\n and offset. The notation 'num', 'low', 'high' gives a fixed range histogram from 'low' to 'high' with 'num'\n number of bins.\n :param time_width: bin width of time axis. str or number (ns). note: bin_specs takes precedence. (optional)\n\n .. code-block:: text\n\n Examples: '1w', 3600e9 (number of ns),\n anything understood by pd.Timedelta(time_width).value\n\n :param time_offset: bin offset of time axis. str or number (ns). note: bin_specs takes precedence. (optional)\n\n .. code-block:: text\n\n Examples: '1-1-2020', 0 (number of ns since 1-1-1970),\n anything parsed by pd.Timestamp(time_offset).value\n\n :param dict var_dtype: dictionary with specified datatype per feature. auto-guessed when not provided.\n :param reference_type: type or reference used for comparisons. Options [self, external, rolling, expanding].\n default is 'self'.\n :param reference: reference dataframe or histograms. default is None\n :param int window: size of rolling window and/or trend detection. default is 10.\n :param int shift: shift of time-bins in rolling/expanding window. default is 1.\n :param dict monitoring_rules: monitoring rules to generate traffic light alerts.\n The default setting is:\n\n .. code-block:: python\n\n monitoring_rules = {\"*_pull\": [7, 4, -4, -7],\n \"*_zscore\": [7, 4, -4, -7],\n \"[!p]*_unknown_labels\": [0.5, 0.5, 0, 0]}\n\n Note that the (filename based) wildcards such as * apply to all statistic names matching that pattern.\n For example, ``\"*_pull\"`` applies for all features to all statistics ending on \"_pull\".\n You can also specify rules for specific features and/or statistics by leaving out wildcard and putting the\n feature name in front. E.g.\n\n .. code-block:: python\n\n monitoring_rules = {\"featureA:*_pull\": [5, 3, -3, -5],\n \"featureA:nan\": [4, 1, 0, 0],\n \"*_pull\": [7, 4, -4, -7],\n \"nan\": [8, 1, 0, 0]}\n\n In case of multiple rules could apply for a feature's statistic, the most specific one applies.\n So in case of the statistic \"nan\": \"featureA:nan\" is used for \"featureA\", and the other \"nan\" rule\n for all other features.\n :param dict pull_rules: red and yellow (possibly dynamic) boundaries shown in plots in the report.\n Default is:\n\n .. code-block:: python\n\n pull_rules = {\"*_pull\": [7, 4, -4, -7]}\n\n This means that the shown yellow boundaries are at -4, +4 standard deviations around the (reference) mean,\n and the shown red boundaries are at -7, +7 standard deviations around the (reference) mean.\n Note that the (filename based) wildcards such as * apply to all statistic names matching that pattern.\n (The same string logic applies as for monitoring_rules.)\n :param bool skip_empty_plots: if false, also show empty plots in report with only nans or zeroes (optional)\n :param int last_n: plot statistic data for last 'n' periods (optional)\n :param int plot_hist_n: plot histograms for last 'n' periods. default is 2 (optional)\n :param str report_filepath: the file path where to output the report (optional)\n :param bool extended_report: if True, show all the generated statistics in the report (optional)\n :param list show_stats: list of statistic name patterns to show in the report. If None, show all (optional)\n :param kwargs: residual keyword arguments, passed on to stability_report()\n :return: dict with results of reporting pipeline\n \"\"\"\n # basic checks on presence of time_axis\n if not (isinstance(time_axis, str) and len(time_axis) > 0) and not (\n isinstance(time_axis, bool) and time_axis\n ):\n raise ValueError(\"time_axis needs to be a filled string or set to True\")\n if isinstance(time_axis, str) and time_axis not in df.columns:\n raise ValueError(f'time_axis \"{time_axis}\" not found in columns of dataframe.')\n if reference is not None and not isinstance(reference, dict):\n if isinstance(time_axis, str) and time_axis not in reference.columns:\n raise ValueError(\n f'time_axis \"{time_axis}\" not found in columns of reference dataframe.'\n )\n if isinstance(time_axis, bool):\n time_axes = get_time_axes(df)\n num = len(time_axes)\n if num == 1:\n time_axis = time_axes[0]\n logger.info(f'Time-axis automatically set to \"{time_axis}\"')\n elif num == 0:\n raise ValueError(\n \"No obvious time-axes found. Cannot generate stability report.\"\n )\n else:\n raise ValueError(\n f\"Found {num} time-axes: {time_axes}. Set *one* time_axis manually!\"\n )\n if features is not None:\n # by now time_axis is defined. ensure that all histograms start with it.\n if not isinstance(features, list):\n raise TypeError(\n \"features should be list of columns (or combos) to pick up from input data.\"\n )\n features = [\n c if c.startswith(time_axis) else f\"{time_axis}:{c}\" for c in features\n ]\n\n # interpret time_width and time_offset\n if isinstance(time_width, (str, int, float)) and isinstance(\n time_offset, (str, int, float)\n ):\n if bin_specs is None:\n bin_specs = {}\n elif not isinstance(bin_specs, dict):\n raise ValueError(\"bin_specs object is not a dictionary\")\n\n if time_axis in bin_specs:\n raise ValueError(\n f'time-axis \"{time_axis}\" already found in binning specifications.'\n )\n # convert time width and offset to nanoseconds\n time_specs = {\n \"bin_width\": float(pd.Timedelta(time_width).value),\n \"bin_offset\": float(pd.Timestamp(time_offset).value),\n }\n bin_specs[time_axis] = time_specs\n\n reference_hists = None\n if reference is not None:\n reference_type = \"external\"\n if isinstance(reference, dict):\n # 1. reference is dict of histograms\n # extract features and bin_specs from reference histograms\n reference_hists = reference\n features = list(reference_hists.keys())\n bin_specs = get_bin_specs(reference_hists)\n else:\n # 2. reference is pandas or spark dataframe\n # generate histograms and return updated features, bin_specs, time_axis, etc.\n (\n reference_hists,\n features,\n bin_specs,\n time_axis,\n var_dtype,\n ) = make_histograms(\n reference,\n features,\n binning,\n bin_specs,\n time_axis,\n var_dtype,\n ret_specs=True,\n )\n\n # use the same features, bin_specs, time_axis, etc as for reference hists\n hists = make_histograms(\n df,\n features=features,\n binning=binning,\n bin_specs=bin_specs,\n time_axis=time_axis,\n var_dtype=var_dtype,\n )\n\n # generate data stability report\n return stability_report(\n hists,\n reference_type,\n reference_hists,\n time_axis,\n window,\n shift,\n monitoring_rules,\n pull_rules,\n features,\n skip_empty_plots,\n last_n,\n plot_hist_n,\n report_filepath,\n extended_report,\n show_stats,\n **kwargs,\n )\n\n\nclass StabilityReport(Module):\n \"\"\"Representation layer of the report.\n\n Stability report module wraps the representation functionality of the report\n after running the pipeline and generating the report. Report can be represented\n as a HTML string, HTML file or Jupyter notebook's cell output.\n \"\"\"\n\n def __init__(self, read_key=\"html_report\"):\n \"\"\"Initialize an instance of StabilityReport.\n\n :param str read_key: key of HTML report data to read from data store. default is html_report.\n \"\"\"\n super().__init__()\n self.read_key = read_key\n self.html_report = \"\"\n self.datastore = {}\n\n def transform(self, datastore):\n self.datastore = datastore\n self.html_report = self.get_datastore_object(datastore, self.read_key, str)\n\n def _repr_html_(self):\n \"\"\"HTML representation of the class (report) embedded in an iframe.\n\n :return HTML: HTML report in an iframe\n \"\"\"\n from IPython.core.display import display\n\n return display(self.to_notebook_iframe())\n\n def __repr__(self):\n \"\"\"Override so that Jupyter Notebook does not print the object.\"\"\"\n return \"\"\n\n def to_html(self, escape=False):\n \"\"\"HTML code representation of the report (represented as a string).\n\n :param bool escape: escape characters which could conflict with other HTML code. default: False\n :return str: HTML code of the report\n \"\"\"\n import html\n\n return html.escape(self.html_report) if escape else self.html_report\n\n def to_file(self, filename):\n \"\"\"Store HTML report in the local file system.\n\n :param str filename: filename for the HTML report\n \"\"\"\n with open(filename, \"w+\") as file:\n file.write(self.to_html())\n\n def to_notebook_iframe(self, width=\"100%\", height=\"100%\"):\n \"\"\"HTML representation of the class (report) embedded in an iframe.\n\n :param str width: width of the frame to be shown\n :param str height: height of the frame to be shown\n :return HTML: HTML report in an iframe\n \"\"\"\n from IPython.core.display import HTML\n\n # get iframe's snippet code, insert report's HTML code and display it as HTML\n return HTML(\n templates_env(\n filename=\"notebook_iframe.html\",\n src=self.to_html(escape=True),\n width=width,\n height=height,\n )\n )\n\n def regenerate(\n self,\n last_n=0,\n skip_first_n=0,\n skip_last_n=0,\n plot_hist_n=2,\n skip_empty_plots=True,\n report_filepath=None,\n store_key=\"html_report\",\n sections_key=\"report_sections\",\n extended_report=True,\n show_stats=config[\"limited_stats\"],\n ):\n \"\"\"Regenerate HTML report with different plot settings\n\n :param int last_n: plot statistic data for last 'n' periods (optional)\n :param int skip_first_n: in plot skip first 'n' periods. last_n takes precedence (optional)\n :param int skip_last_n: in plot skip last 'n' periods. last_n takes precedence (optional)\n :param int plot_hist_n: plot histograms for last 'n' periods. default is 2 (optional)\n :param bool skip_empty_plots: if false, also show empty plots in report with only nans or zeroes (optional)\n :param str report_filepath: the file path where to output the report (optional)\n :param str sections_key: key to store sections data in the datastore. default is 'report_sections'.\n :param str store_key: key to store the HTML report data in the datastore. default is 'html_report'\n :param bool extended_report: if True, show all the generated statistics in the report (optional)\n :param list show_stats: list of statistic name patterns to show in the report. If None, show all (optional)\n :return HTML: HTML report in an iframe\n \"\"\"\n # basic checks\n if not self.datastore:\n self.logger.warning(\"Empty datastore, cannot regenerate report.\")\n return None\n\n # start from clean slate\n if sections_key in self.datastore:\n del self.datastore[sections_key]\n if store_key in self.datastore:\n del self.datastore[store_key]\n\n # if limited report is selected, check if stats list is provided, if not, get a default minimal list\n show_stats = show_stats if not extended_report else None\n\n pipeline = ReportPipe(\n sections_key=sections_key,\n last_n=last_n,\n skip_first_n=skip_first_n,\n skip_last_n=skip_last_n,\n skip_empty_plots=skip_empty_plots,\n plot_hist_n=plot_hist_n,\n report_filepath=report_filepath,\n show_stats=show_stats,\n )\n stability_report = StabilityReport()\n stability_report.transform(pipeline.transform(self.datastore))\n return stability_report\n" ]
[ [ "pandas.Timestamp", "pandas.Timedelta" ] ]
HanChangHun/dsn_fewshot
[ "dbe8d637bce1cb17bfb7c7fd7784bcdebb79085c" ]
[ "Conv4/algorithm/subspace_projection.py" ]
[ "import torch\nimport torch.nn as nn\n\nclass Subspace_Projection(nn.Module):\n def __init__(self, num_dim=5):\n super().__init__()\n self.num_dim = num_dim\n\n def create_subspace(self, supportset_features, class_size, sample_size):\n all_hyper_planes = []\n means = []\n for ii in range(class_size):\n num_sample = sample_size\n all_support_within_class_t = supportset_features[ii]\n meann = torch.mean(all_support_within_class_t, dim=0)\n means.append(meann)\n all_support_within_class_t = all_support_within_class_t - meann.unsqueeze(0).repeat(num_sample, 1)\n all_support_within_class = torch.transpose(all_support_within_class_t, 0, 1)\n uu, s, v = torch.svd(all_support_within_class.double(), some=False)\n uu = uu.float()\n all_hyper_planes.append(uu[:, :self.num_dim])\n\n all_hyper_planes = torch.stack(all_hyper_planes, dim=0)\n means = torch.stack(means)\n\n if len(all_hyper_planes.size()) < 3:\n all_hyper_planes = all_hyper_planes.unsqueeze(-1)\n\n return all_hyper_planes, means\n\n\n def projection_metric(self, target_features, hyperplanes, mu):\n eps = 1e-12\n batch_size = target_features.shape[0]\n class_size = hyperplanes.shape[0]\n\n similarities = []\n\n discriminative_loss = 0.0\n\n for j in range(class_size):\n h_plane_j = hyperplanes[j].unsqueeze(0).repeat(batch_size, 1, 1)\n target_features_expanded = (target_features - mu[j].expand_as(target_features)).unsqueeze(-1)\n projected_query_j = torch.bmm(h_plane_j, torch.bmm(torch.transpose(h_plane_j, 1, 2), target_features_expanded))\n projected_query_j = torch.squeeze(projected_query_j) + mu[j].unsqueeze(0).repeat(batch_size, 1)\n projected_query_dist_inter = target_features - projected_query_j\n\n #Training per epoch is slower but less epochs in total\n query_loss = -torch.sqrt(torch.sum(projected_query_dist_inter * projected_query_dist_inter, dim=-1) + eps) # norm ||.||\n\n #Training per epoch is faster but more epochs in total\n #query_loss = -torch.sum(projected_query_dist_inter * projected_query_dist_inter, dim=-1) # Squared norm ||.||^2\n\n similarities.append(query_loss)\n\n for k in range(class_size):\n if j != k:\n temp_loss = torch.mm(torch.transpose(hyperplanes[j], 0, 1), hyperplanes[k]) ## discriminative subspaces (Conv4 only, ResNet12 is computationally expensive)\n discriminative_loss = discriminative_loss + torch.sum(temp_loss*temp_loss)\n\n similarities = torch.stack(similarities, dim=1)\n\n return similarities, discriminative_loss\n" ]
[ [ "torch.stack", "torch.squeeze", "torch.transpose", "torch.mean", "torch.sum" ] ]
rosefiero/AI-102-AIEngineer
[ "6d2ffa3b578e600fee908fa93107f73f3d74ece3" ]
[ "20-ocr/Python/read-text/read-text.py" ]
[ "from dotenv import load_dotenv\nimport os\nimport time\nfrom PIL import Image, ImageDraw\nfrom matplotlib import pyplot as plt\n\n# Import namespaces\n# import namespaces\nfrom azure.cognitiveservices.vision.computervision import ComputerVisionClient\nfrom azure.cognitiveservices.vision.computervision.models import OperationStatusCodes\nfrom msrest.authentication import CognitiveServicesCredentials\n\n\ndef main():\n\n global cv_client\n\n try:\n # Get Configuration Settings\n load_dotenv()\n cog_endpoint = os.getenv('COG_SERVICE_ENDPOINT')\n cog_key = os.getenv('COG_SERVICE_KEY')\n\n # Authenticate Computer Vision client\n credential = CognitiveServicesCredentials(cog_key) \n cv_client = ComputerVisionClient(cog_endpoint, credential)\n \n # Menu for text reading functions\n print('1: Use OCR API\\n2: Use Read API\\n3: Read handwriting\\nAny other key to quit')\n command = input('Enter a number:')\n if command == '1':\n image_file = os.path.join('images','Lincoln.jpg')\n GetTextOcr(image_file)\n elif command =='2':\n image_file = os.path.join('images','Rome.pdf')\n GetTextRead(image_file)\n elif command =='3':\n image_file = os.path.join('images','Note.jpg')\n GetTextRead(image_file)\n \n\n except Exception as ex:\n print(ex)\n\ndef GetTextOcr(image_file):\n print('Reading text in {}\\n'.format(image_file))\n # Use OCR API to read text in image\n with open(image_file, mode=\"rb\") as image_data:\n ocr_results = cv_client.recognize_printed_text_in_stream(image_data)\n\n # Prepare image for drawing\n fig = plt.figure(figsize=(7, 7))\n img = Image.open(image_file)\n draw = ImageDraw.Draw(img)\n\n # Process the text line by line\n for region in ocr_results.regions:\n for line in region.lines:\n\n # Show the position of the line of text\n l,t,w,h = list(map(int, line.bounding_box.split(',')))\n draw.rectangle(((l,t), (l+w, t+h)), outline='magenta', width=5)\n\n # Read the words in the line of text\n line_text = ''\n for word in line.words:\n line_text += word.text + ' '\n print(line_text.rstrip())\n\n # Save the image with the text locations highlighted\n plt.axis('off')\n plt.imshow(img)\n outputfile = 'ocr_results.jpg'\n fig.savefig(outputfile)\n print('Results saved in', outputfile)\n\n\n\ndef GetTextRead(image_file):\n print('Reading text in {}\\n'.format(image_file))\n # Use Read API to read text in image\n with open(image_file, mode=\"rb\") as image_data:\n read_op = cv_client.read_in_stream(image_data, raw=True)\n\n # Get the async operation ID so we can check for the results\n operation_location = read_op.headers[\"Operation-Location\"]\n operation_id = operation_location.split(\"/\")[-1]\n\n # Wait for the asynchronous operation to complete\n while True:\n read_results = cv_client.get_read_result(operation_id)\n if read_results.status not in [OperationStatusCodes.running, OperationStatusCodes.not_started]:\n break\n time.sleep(1)\n\n # If the operation was successfuly, process the text line by line\n if read_results.status == OperationStatusCodes.succeeded:\n for page in read_results.analyze_result.read_results:\n for line in page.lines:\n print(line.text)\n\n\n\n\nif __name__ == \"__main__\":\n main()" ]
[ [ "matplotlib.pyplot.imshow", "matplotlib.pyplot.axis", "matplotlib.pyplot.figure" ] ]
goncaloperes/bokeh
[ "b857d2d17d7c19779bb0a7be2601d8238fb1d5e9", "894731860c53b7c9ddd0057dee85cf064278dc0e", "894731860c53b7c9ddd0057dee85cf064278dc0e", "894731860c53b7c9ddd0057dee85cf064278dc0e", "b857d2d17d7c19779bb0a7be2601d8238fb1d5e9", "b857d2d17d7c19779bb0a7be2601d8238fb1d5e9" ]
[ "tests/unit/bokeh/core/property/test_primitive.py", "examples/plotting/file/multi_legend.py", "examples/plotting/file/toolbar_autohide.py", "examples/plotting/file/candlestick.py", "examples/app/movies/main.py", "tests/unit/bokeh/util/test_hex.py" ]
[ "#-----------------------------------------------------------------------------\n# Copyright (c) 2012 - 2021, Anaconda, Inc., and Bokeh Contributors.\n# All rights reserved.\n#\n# The full license is in the file LICENSE.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Boilerplate\n#-----------------------------------------------------------------------------\nimport pytest ; pytest\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\n# External imports\nimport numpy as np\n\n# Bokeh imports\nfrom _util_property import _TestHasProps, _TestModel\nfrom bokeh._testing.util.api import verify_all\n\n# Module under test\nimport bokeh.core.property.primitive as bcpp # isort:skip\n\n#-----------------------------------------------------------------------------\n# Setup\n#-----------------------------------------------------------------------------\n\nALL = (\n 'Bool',\n 'Complex',\n 'Int',\n 'Float',\n 'Null',\n 'String',\n)\n\n#-----------------------------------------------------------------------------\n# General API\n#-----------------------------------------------------------------------------\n\n\nclass Test_Bool:\n def test_valid(self) -> None:\n prop = bcpp.Bool()\n\n assert prop.is_valid(False)\n assert prop.is_valid(True)\n\n assert prop.is_valid(np.bool8(False))\n assert prop.is_valid(np.bool8(True))\n\n def test_invalid(self) -> None:\n prop = bcpp.Bool()\n\n assert not prop.is_valid(None)\n assert not prop.is_valid(0)\n assert not prop.is_valid(1)\n assert not prop.is_valid(0.0)\n assert not prop.is_valid(1.0)\n assert not prop.is_valid(1.0+1.0j)\n assert not prop.is_valid(\"\")\n assert not prop.is_valid(())\n assert not prop.is_valid([])\n assert not prop.is_valid({})\n assert not prop.is_valid(_TestHasProps())\n assert not prop.is_valid(_TestModel())\n\n assert not prop.is_valid(np.int8(0))\n assert not prop.is_valid(np.int8(1))\n assert not prop.is_valid(np.int16(0))\n assert not prop.is_valid(np.int16(1))\n assert not prop.is_valid(np.int32(0))\n assert not prop.is_valid(np.int32(1))\n assert not prop.is_valid(np.int64(0))\n assert not prop.is_valid(np.int64(1))\n assert not prop.is_valid(np.uint8(0))\n assert not prop.is_valid(np.uint8(1))\n assert not prop.is_valid(np.uint16(0))\n assert not prop.is_valid(np.uint16(1))\n assert not prop.is_valid(np.uint32(0))\n assert not prop.is_valid(np.uint32(1))\n assert not prop.is_valid(np.uint64(0))\n assert not prop.is_valid(np.uint64(1))\n assert not prop.is_valid(np.float16(0))\n assert not prop.is_valid(np.float16(1))\n assert not prop.is_valid(np.float32(0))\n assert not prop.is_valid(np.float32(1))\n assert not prop.is_valid(np.float64(0))\n assert not prop.is_valid(np.float64(1))\n assert not prop.is_valid(np.complex64(1.0+1.0j))\n assert not prop.is_valid(np.complex128(1.0+1.0j))\n if hasattr(np, \"complex256\"):\n assert not prop.is_valid(np.complex256(1.0+1.0j))\n\n def test_has_ref(self) -> None:\n prop = bcpp.Bool()\n assert not prop.has_ref\n\n def test_str(self) -> None:\n prop = bcpp.Bool()\n assert str(prop) == \"Bool\"\n\n\nclass Test_Complex:\n def test_valid(self) -> None:\n prop = bcpp.Complex()\n\n assert prop.is_valid(0)\n assert prop.is_valid(1)\n assert prop.is_valid(0.0)\n assert prop.is_valid(1.0)\n assert prop.is_valid(1.0+1.0j)\n\n assert prop.is_valid(np.int8(0))\n assert prop.is_valid(np.int8(1))\n assert prop.is_valid(np.int16(0))\n assert prop.is_valid(np.int16(1))\n assert prop.is_valid(np.int32(0))\n assert prop.is_valid(np.int32(1))\n assert prop.is_valid(np.int64(0))\n assert prop.is_valid(np.int64(1))\n assert prop.is_valid(np.uint8(0))\n assert prop.is_valid(np.uint8(1))\n assert prop.is_valid(np.uint16(0))\n assert prop.is_valid(np.uint16(1))\n assert prop.is_valid(np.uint32(0))\n assert prop.is_valid(np.uint32(1))\n assert prop.is_valid(np.uint64(0))\n assert prop.is_valid(np.uint64(1))\n assert prop.is_valid(np.float16(0))\n assert prop.is_valid(np.float16(1))\n assert prop.is_valid(np.float32(0))\n assert prop.is_valid(np.float32(1))\n assert prop.is_valid(np.float64(0))\n assert prop.is_valid(np.float64(1))\n assert prop.is_valid(np.complex64(1.0+1.0j))\n assert prop.is_valid(np.complex128(1.0+1.0j))\n if hasattr(np, \"complex256\"):\n assert prop.is_valid(np.complex256(1.0+1.0j))\n\n # TODO (bev) should fail\n assert prop.is_valid(False)\n assert prop.is_valid(True)\n\n def test_invalid(self) -> None:\n prop = bcpp.Complex()\n\n assert not prop.is_valid(None)\n assert not prop.is_valid(\"\")\n assert not prop.is_valid(())\n assert not prop.is_valid([])\n assert not prop.is_valid({})\n assert not prop.is_valid(_TestHasProps())\n assert not prop.is_valid(_TestModel())\n\n assert not prop.is_valid(np.bool8(False))\n assert not prop.is_valid(np.bool8(True))\n\n def test_has_ref(self) -> None:\n prop = bcpp.Complex()\n assert not prop.has_ref\n\n def test_str(self) -> None:\n prop = bcpp.Complex()\n assert str(prop) == \"Complex\"\n\n\nclass Test_Float:\n def test_valid(self) -> None:\n prop = bcpp.Float()\n\n assert prop.is_valid(0)\n assert prop.is_valid(1)\n assert prop.is_valid(0.0)\n assert prop.is_valid(1.0)\n\n assert prop.is_valid(np.int8(0))\n assert prop.is_valid(np.int8(1))\n assert prop.is_valid(np.int16(0))\n assert prop.is_valid(np.int16(1))\n assert prop.is_valid(np.int32(0))\n assert prop.is_valid(np.int32(1))\n assert prop.is_valid(np.int64(0))\n assert prop.is_valid(np.int64(1))\n assert prop.is_valid(np.uint8(0))\n assert prop.is_valid(np.uint8(1))\n assert prop.is_valid(np.uint16(0))\n assert prop.is_valid(np.uint16(1))\n assert prop.is_valid(np.uint32(0))\n assert prop.is_valid(np.uint32(1))\n assert prop.is_valid(np.uint64(0))\n assert prop.is_valid(np.uint64(1))\n assert prop.is_valid(np.float16(0))\n assert prop.is_valid(np.float16(1))\n assert prop.is_valid(np.float32(0))\n assert prop.is_valid(np.float32(1))\n assert prop.is_valid(np.float64(0))\n assert prop.is_valid(np.float64(1))\n\n # TODO (bev) should fail\n assert prop.is_valid(False)\n assert prop.is_valid(True)\n\n def test_invalid(self) -> None:\n prop = bcpp.Float()\n\n assert not prop.is_valid(None)\n assert not prop.is_valid(1.0+1.0j)\n assert not prop.is_valid(\"\")\n assert not prop.is_valid(())\n assert not prop.is_valid([])\n assert not prop.is_valid({})\n assert not prop.is_valid(_TestHasProps())\n assert not prop.is_valid(_TestModel())\n\n assert not prop.is_valid(np.bool8(False))\n assert not prop.is_valid(np.bool8(True))\n assert not prop.is_valid(np.complex64(1.0+1.0j))\n assert not prop.is_valid(np.complex128(1.0+1.0j))\n if hasattr(np, \"complex256\"):\n assert not prop.is_valid(np.complex256(1.0+1.0j))\n\n def test_has_ref(self) -> None:\n prop = bcpp.Float()\n assert not prop.has_ref\n\n def test_str(self) -> None:\n prop = bcpp.Float()\n assert str(prop) == \"Float\"\n\n\nclass Test_Int:\n def test_valid(self) -> None:\n prop = bcpp.Int()\n\n assert prop.is_valid(0)\n assert prop.is_valid(1)\n\n assert prop.is_valid(np.int8(0))\n assert prop.is_valid(np.int8(1))\n assert prop.is_valid(np.int16(0))\n assert prop.is_valid(np.int16(1))\n assert prop.is_valid(np.int32(0))\n assert prop.is_valid(np.int32(1))\n assert prop.is_valid(np.int64(0))\n assert prop.is_valid(np.int64(1))\n assert prop.is_valid(np.uint8(0))\n assert prop.is_valid(np.uint8(1))\n assert prop.is_valid(np.uint16(0))\n assert prop.is_valid(np.uint16(1))\n assert prop.is_valid(np.uint32(0))\n assert prop.is_valid(np.uint32(1))\n assert prop.is_valid(np.uint64(0))\n assert prop.is_valid(np.uint64(1))\n\n # TODO (bev) should fail\n assert prop.is_valid(False)\n assert prop.is_valid(True)\n\n def test_invalid(self) -> None:\n prop = bcpp.Int()\n\n assert not prop.is_valid(None)\n assert not prop.is_valid(0.0)\n assert not prop.is_valid(1.0)\n assert not prop.is_valid(1.0+1.0j)\n assert not prop.is_valid(\"\")\n assert not prop.is_valid(())\n assert not prop.is_valid([])\n assert not prop.is_valid({})\n assert not prop.is_valid(_TestHasProps())\n assert not prop.is_valid(_TestModel())\n\n assert not prop.is_valid(np.bool8(False))\n assert not prop.is_valid(np.bool8(True))\n assert not prop.is_valid(np.float16(0))\n assert not prop.is_valid(np.float16(1))\n assert not prop.is_valid(np.float32(0))\n assert not prop.is_valid(np.float32(1))\n assert not prop.is_valid(np.float64(0))\n assert not prop.is_valid(np.float64(1))\n assert not prop.is_valid(np.complex64(1.0+1.0j))\n assert not prop.is_valid(np.complex128(1.0+1.0j))\n if hasattr(np, \"complex256\"):\n assert not prop.is_valid(np.complex256(1.0+1.0j))\n\n def test_has_ref(self) -> None:\n prop = bcpp.Int()\n assert not prop.has_ref\n\n def test_str(self) -> None:\n prop = bcpp.Int()\n assert str(prop) == \"Int\"\n\n\nclass Test_String:\n def test_valid(self) -> None:\n prop = bcpp.String()\n\n assert prop.is_valid(\"\")\n assert prop.is_valid(\"6\")\n\n def test_invalid(self) -> None:\n prop = bcpp.String()\n\n assert not prop.is_valid(None)\n assert not prop.is_valid(False)\n assert not prop.is_valid(True)\n assert not prop.is_valid(0)\n assert not prop.is_valid(1)\n assert not prop.is_valid(0.0)\n assert not prop.is_valid(1.0)\n assert not prop.is_valid(1.0+1.0j)\n\n assert not prop.is_valid(())\n assert not prop.is_valid([])\n assert not prop.is_valid({})\n assert not prop.is_valid(_TestHasProps())\n assert not prop.is_valid(_TestModel())\n\n def test_has_ref(self) -> None:\n prop = bcpp.String()\n assert not prop.has_ref\n\n def test_str(self) -> None:\n prop = bcpp.String()\n assert str(prop) == \"String\"\n\n#-----------------------------------------------------------------------------\n# Dev API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Private API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Code\n#-----------------------------------------------------------------------------\n\nTest___all__ = verify_all(bcpp, ALL)\n", "import numpy as np\n\nfrom bokeh.models import Legend, LegendItem\nfrom bokeh.plotting import figure, output_file, show\nfrom bokeh.sampledata.stocks import AAPL, MSFT\n\n\ndef datetime(x):\n return np.array(x, dtype=np.datetime64)\n\np = figure(background_fill_color=\"#fafafa\", x_axis_type=\"datetime\",\n plot_width=800, plot_height=350)\n\nr = p.multi_line([datetime(AAPL['date']), datetime(MSFT['date'])],\n [AAPL['adj_close'], MSFT['adj_close']],\n color=[\"navy\", \"crimson\"], line_width=2, alpha=0.6)\n\nlegend = Legend(items=[\n LegendItem(label=\"AAPL\", renderers=[r], index=0),\n LegendItem(label=\"MSFT\", renderers=[r], index=1),\n], location=\"top_left\")\np.add_layout(legend)\n\noutput_file(\"multi_legend.html\")\n\nshow(p)\n", "import numpy as np\n\nfrom bokeh.layouts import row\nfrom bokeh.plotting import figure, output_file, show\n\nN = 1000\nx = np.random.random(size=N) * 100\ny = np.random.random(size=N) * 100\nradii = np.random.random(size=N) * 1.5\ncolors = [\"#%02x%02x%02x\" % (int(r), int(g), 150) for r, g in zip(50+2*x, 30+2*y)]\n\ndef make_plot(autohide=None):\n p = figure(width=300, height=300, title='Autohiding toolbar' if autohide else 'Not autohiding toolbar')\n p.scatter(x, y, radius=radii, fill_color=colors, fill_alpha=0.6, line_color=None)\n p.toolbar.autohide = autohide\n return p\n\noutput_file(\"toolbar_autohide.html\", title=\"toolbar_autohide example\")\n\nshow(row(make_plot(True), make_plot(False)))\n", "from math import pi\n\nimport pandas as pd\n\nfrom bokeh.plotting import figure, output_file, show\nfrom bokeh.sampledata.stocks import MSFT\n\ndf = pd.DataFrame(MSFT)[:50]\ndf[\"date\"] = pd.to_datetime(df[\"date\"])\n\ninc = df.close > df.open\ndec = df.open > df.close\nw = 12*60*60*1000 # half day in ms\n\nTOOLS = \"pan,wheel_zoom,box_zoom,reset,save\"\n\np = figure(x_axis_type=\"datetime\", tools=TOOLS, plot_width=1000, title = \"MSFT Candlestick\")\np.xaxis.major_label_orientation = pi/4\np.grid.grid_line_alpha=0.3\n\np.segment(df.date, df.high, df.date, df.low, color=\"black\")\np.vbar(df.date[inc], w, df.open[inc], df.close[inc], fill_color=\"#D5E1DD\", line_color=\"black\")\np.vbar(df.date[dec], w, df.open[dec], df.close[dec], fill_color=\"#F2583E\", line_color=\"black\")\n\noutput_file(\"candlestick.html\", title=\"candlestick.py example\")\n\nshow(p) # open a browser\n", "import sqlite3 as sql\nfrom os.path import dirname, join\n\nimport numpy as np\nimport pandas.io.sql as psql\n\nfrom bokeh.io import curdoc\nfrom bokeh.layouts import column, row\nfrom bokeh.models import ColumnDataSource, Div, Select, Slider, TextInput\nfrom bokeh.plotting import figure\nfrom bokeh.sampledata.movies_data import movie_path\n\nconn = sql.connect(movie_path)\nquery = open(join(dirname(__file__), 'query.sql')).read()\nmovies = psql.read_sql(query, conn)\n\nmovies[\"color\"] = np.where(movies[\"Oscars\"] > 0, \"orange\", \"grey\")\nmovies[\"alpha\"] = np.where(movies[\"Oscars\"] > 0, 0.9, 0.25)\nmovies.fillna(0, inplace=True) # just replace missing values with zero\nmovies[\"revenue\"] = movies.BoxOffice.apply(lambda x: '{:,d}'.format(int(x)))\n\nwith open(join(dirname(__file__), \"razzies-clean.csv\")) as f:\n razzies = f.read().splitlines()\nmovies.loc[movies.imdbID.isin(razzies), \"color\"] = \"purple\"\nmovies.loc[movies.imdbID.isin(razzies), \"alpha\"] = 0.9\n\naxis_map = {\n \"Tomato Meter\": \"Meter\",\n \"Numeric Rating\": \"numericRating\",\n \"Number of Reviews\": \"Reviews\",\n \"Box Office (dollars)\": \"BoxOffice\",\n \"Length (minutes)\": \"Runtime\",\n \"Year\": \"Year\",\n}\n\ndesc = Div(text=open(join(dirname(__file__), \"description.html\")).read(), sizing_mode=\"stretch_width\")\n\n# Create Input controls\nreviews = Slider(title=\"Minimum number of reviews\", value=80, start=10, end=300, step=10)\nmin_year = Slider(title=\"Year released\", start=1940, end=2014, value=1970, step=1)\nmax_year = Slider(title=\"End Year released\", start=1940, end=2014, value=2014, step=1)\noscars = Slider(title=\"Minimum number of Oscar wins\", start=0, end=4, value=0, step=1)\nboxoffice = Slider(title=\"Dollars at Box Office (millions)\", start=0, end=800, value=0, step=1)\ngenre = Select(title=\"Genre\", value=\"All\",\n options=open(join(dirname(__file__), 'genres.txt')).read().split())\ndirector = TextInput(title=\"Director name contains\")\ncast = TextInput(title=\"Cast names contains\")\nx_axis = Select(title=\"X Axis\", options=sorted(axis_map.keys()), value=\"Tomato Meter\")\ny_axis = Select(title=\"Y Axis\", options=sorted(axis_map.keys()), value=\"Number of Reviews\")\n\n# Create Column Data Source that will be used by the plot\nsource = ColumnDataSource(data=dict(x=[], y=[], color=[], title=[], year=[], revenue=[], alpha=[]))\n\nTOOLTIPS=[\n (\"Title\", \"@title\"),\n (\"Year\", \"@year\"),\n (\"$\", \"@revenue\")\n]\n\np = figure(plot_height=600, plot_width=700, title=\"\", toolbar_location=None, tooltips=TOOLTIPS, sizing_mode=\"scale_both\")\np.circle(x=\"x\", y=\"y\", source=source, size=7, color=\"color\", line_color=None, fill_alpha=\"alpha\")\n\n\ndef select_movies():\n genre_val = genre.value\n director_val = director.value.strip()\n cast_val = cast.value.strip()\n selected = movies[\n (movies.Reviews >= reviews.value) &\n (movies.BoxOffice >= (boxoffice.value * 1e6)) &\n (movies.Year >= min_year.value) &\n (movies.Year <= max_year.value) &\n (movies.Oscars >= oscars.value)\n ]\n if (genre_val != \"All\"):\n selected = selected[selected.Genre.str.contains(genre_val)==True]\n if (director_val != \"\"):\n selected = selected[selected.Director.str.contains(director_val)==True]\n if (cast_val != \"\"):\n selected = selected[selected.Cast.str.contains(cast_val)==True]\n return selected\n\n\ndef update():\n df = select_movies()\n x_name = axis_map[x_axis.value]\n y_name = axis_map[y_axis.value]\n\n p.xaxis.axis_label = x_axis.value\n p.yaxis.axis_label = y_axis.value\n p.title.text = \"%d movies selected\" % len(df)\n source.data = dict(\n x=df[x_name],\n y=df[y_name],\n color=df[\"color\"],\n title=df[\"Title\"],\n year=df[\"Year\"],\n revenue=df[\"revenue\"],\n alpha=df[\"alpha\"],\n )\n\ncontrols = [reviews, boxoffice, genre, min_year, max_year, oscars, director, cast, x_axis, y_axis]\nfor control in controls:\n control.on_change('value', lambda attr, old, new: update())\n\ninputs = column(*controls, width=320)\n\nl = column(desc, row(inputs, p), sizing_mode=\"scale_both\")\n\nupdate() # initial load of the data\n\ncurdoc().add_root(l)\ncurdoc().title = \"Movies\"\n", "#-----------------------------------------------------------------------------\n# Copyright (c) 2012 - 2021, Anaconda, Inc., and Bokeh Contributors.\n# All rights reserved.\n#\n# The full license is in the file LICENSE.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Boilerplate\n#-----------------------------------------------------------------------------\nimport pytest ; pytest\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\n# External imports\nimport numpy as np\n\n# Module under test\nimport bokeh.util.hex as buh # isort:skip\n\n#-----------------------------------------------------------------------------\n# Setup\n#-----------------------------------------------------------------------------\n\nnp.random.seed(0)\nn = 500\nx = 2 + np.random.standard_normal(n)\ny = 2 + np.random.standard_normal(n)\n\n#-----------------------------------------------------------------------------\n# General API\n#-----------------------------------------------------------------------------\n\n\nclass Test_axial_to_cartesian:\n def test_default_aspect_pointytop(self) -> None:\n q = np.array([0, 0, 0, 1, -1, 1, -1])\n r = np.array([0, 1, -1, 0, 1, -1, 0])\n\n x, y = buh.axial_to_cartesian(q, r, 1, \"pointytop\")\n\n sq3 = np.sqrt(3)\n assert list(x) == [0, sq3/2, -sq3/2, sq3, -sq3/2, sq3/2, -sq3]\n assert list(y) == [-0.0, -1.5, 1.5, -0.0, -1.5, 1.5, -0.0]\n\n\n def test_default_aspect_flattop(self) -> None:\n q = np.array([0, 0, 0, 1, -1, 1, -1])\n r = np.array([0, 1, -1, 0, 1, -1, 0])\n\n x, y = buh.axial_to_cartesian(q, r, 1, \"flattop\")\n\n sq3 = np.sqrt(3)\n assert list(x) == [0.0, 0.0, 0.0, 1.5, -1.5, 1.5, -1.5]\n assert list(y) == [0, -sq3, sq3, -sq3/2, -sq3/2, sq3/2, sq3/2]\n\n\nclass Test_cartesian_to_axial:\n def test_default_aspect_pointytop(self) -> None:\n x = np.array([0, -2, 2, -1.5, -1.5, 1.5, 1.5])\n y = np.array([0, 0, 0, 1.5, -1.5, 1.5, -1.5])\n\n q, r = buh.cartesian_to_axial(x, y, 1, \"pointytop\")\n\n assert list(zip(q, r)) == [\n (0,0), (-1, 0), (1,0), (0,-1), (-1, 1), (1, -1), (0,1)\n ]\n\n def test_default_aspect_flattop(self) -> None:\n x = np.array([0, 0, 0, 1.5, -1.5, 1.5, -1.5])\n y = np.array([0, -2, 2, -1.5, -1.5, 1.5, 1.5])\n\n q, r = buh.cartesian_to_axial(x, y, 1, \"flattop\")\n\n assert list(zip(q, r)) == [\n (0,0), (0,1), (0,-1), (1, 0), (-1, 1), (1, -1), (-1,0)\n ]\n\n\nclass Test_hexbin:\n # hexbin requires pandas\n\n def test_gaussian_pointytop(self, pd) -> None:\n bins = buh.hexbin(x, y, 2)\n assert list(bins.q) == [0,0,1,1,1,2,2]\n assert list(bins.r) == [-1,0,-2,-1,0,-2,-1]\n assert list(bins.counts) == [9,54,1,313,98,3,22]\n\n assert bins.equals(buh.hexbin(x, y, 2, \"pointytop\"))\n\n def test_gaussian_flattop(self, pd) -> None:\n bins = buh.hexbin(x, y, 2, \"flattop\")\n assert list(bins.q) == [0, 0, 1, 1, 1, 2]\n assert list(bins.r) == [-1, 0, -2, -1, 0, -2]\n assert list(bins.counts) == [95, 57, 14, 324, 8, 2]\n\n#-----------------------------------------------------------------------------\n# Dev API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Private API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Code\n#-----------------------------------------------------------------------------\n" ]
[ [ "numpy.int8", "numpy.uint8", "numpy.uint32", "numpy.bool8", "numpy.float16", "numpy.complex256", "numpy.uint16", "numpy.complex128", "numpy.float64", "numpy.complex64", "numpy.float32", "numpy.uint64", "numpy.int64", "numpy.int32", "numpy.int16" ], [ "numpy.array" ], [ "numpy.random.random" ], [ "pandas.to_datetime", "pandas.DataFrame" ], [ "numpy.where", "pandas.io.sql.read_sql" ], [ "numpy.random.seed", "numpy.array", "numpy.random.standard_normal", "numpy.sqrt" ] ]
chinvib66/Niffler
[ "6fcf46c505249ac116b16ed2efda92685ba153c1" ]
[ "modules/png-extraction/ImageExtractor.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport os\nimport glob \nfrom shutil import copyfile\nimport hashlib\nimport json\nimport sys\nimport subprocess\nimport logging\nfrom multiprocessing import Pool\nimport pdb\nimport time\nimport pickle\nimport numpy as np\nimport pandas as pd\nimport pydicom as dicom \nimport png\n# pydicom imports needed to handle data errors\nfrom pydicom import config\nfrom pydicom import datadict\nfrom pydicom import values \n\nimport pathlib\nconfigs = {}\n\n\ndef initialize_config_and_execute(config_values):\n global configs\n configs = config_values\n # Applying checks for paths\n \n p1 = pathlib.PurePath(configs['DICOMHome'])\n dicom_home = p1.as_posix() # the folder containing your dicom files\n\n p2 = pathlib.PurePath(configs['OutputDirectory'])\n output_directory = p2.as_posix()\n\n print_images = configs['PrintImages']\n print_only_common_headers = configs['CommonHeadersOnly']\n depth = int(configs['Depth'])\n processes = int(configs['UseProcesses']) # how many processes to use.\n flattened_to_level = configs['FlattenedToLevel']\n email = configs['YourEmail']\n send_email = configs['SendEmail']\n no_splits = int(configs['SplitIntoChunks'])\n is16Bit = configs['is16Bit']\n \n metadata_col_freq_threshold = 0.1\n\n png_destination = output_directory + '/extracted-images/'\n failed = output_directory + '/failed-dicom/'\n maps_directory = output_directory + '/maps/'\n meta_directory = output_directory + '/meta/'\n\n LOG_FILENAME = output_directory + '/ImageExtractor.out'\n pickle_file = output_directory + '/ImageExtractor.pickle'\n\n # record the start time\n t_start = time.time()\n\n if not os.path.exists(output_directory):\n os.makedirs(output_directory)\n\n logging.basicConfig(filename=LOG_FILENAME, level=logging.DEBUG)\n\n if not os.path.exists(maps_directory):\n os.makedirs(maps_directory)\n\n if not os.path.exists(meta_directory):\n os.makedirs(meta_directory)\n\n if not os.path.exists(png_destination):\n os.makedirs(png_destination)\n\n if not os.path.exists(failed):\n os.makedirs(failed)\n\n if not os.path.exists(failed + \"/1\"):\n os.makedirs(failed + \"/1\")\n\n if not os.path.exists(failed + \"/2\"):\n os.makedirs(failed + \"/2\")\n\n if not os.path.exists(failed + \"/3\"):\n os.makedirs(failed + \"/3\")\n\n if not os.path.exists(failed + \"/4\"):\n os.makedirs(failed + \"/4\")\n\n logging.info(\"------- Values Initialization DONE -------\")\n final_res = execute(pickle_file, dicom_home, output_directory, print_images, print_only_common_headers, depth,\n processes, flattened_to_level, email, send_email, no_splits, is16Bit, png_destination,\n failed, maps_directory, meta_directory, LOG_FILENAME, metadata_col_freq_threshold, t_start)\n return final_res\n\n\n# Function for getting tuple for field,val pairs\ndef get_tuples(plan, outlist = None, key = \"\"):\n if len(key)>0:\n key = key + \"_\"\n if not outlist:\n outlist = []\n for aa in plan.dir():\n try:\n hasattr(plan,aa)\n except TypeError as e:\n logging.warning('Type Error encountered')\n if hasattr(plan, aa) and aa!= 'PixelData':\n value = getattr(plan, aa)\n start = len(outlist)\n # if dicom sequence extract tags from each element\n if type(value) is dicom.sequence.Sequence:\n for nn, ss in enumerate(list(value)):\n newkey = \"_\".join([key,(\"%d\"%nn),aa]) if len(key) else \"_\".join([(\"%d\"%nn),aa])\n candidate = get_tuples(ss,outlist=None,key=newkey)\n # if extracted tuples are too big condense to a string\n if len(candidate)>2000:\n outlist.append((newkey,str(candidate)))\n else:\n outlist.extend(candidate)\n else:\n if type(value) is dicom.valuerep.DSfloat:\n value = float(value)\n elif type(value) is dicom.valuerep.IS:\n value = str(value)\n elif type(value) is dicom.valuerep.MultiValue:\n value = tuple(value)\n elif type(value) is dicom.uid.UID:\n value = str(value)\n outlist.append((key + aa, value))\n # appends name, value pair for this file. these are later concatenated to the dataframe\n return outlist\n\n\ndef extract_headers(f_list_elem):\n nn,ff = f_list_elem # unpack enumerated list\n plan = dicom.dcmread(ff, force=True) # reads in dicom file\n # checks if this file has an image\n c=True\n try:\n check = plan.pixel_array # throws error if dicom file has no image\n except:\n c = False\n kv = get_tuples(plan) # gets tuple for field,val pairs for this file. function defined above\n # dicom images should not have more than 300\n if len(kv)>500:\n logging.debug(str(len(kv)) + \" dicoms produced by \" + ff)\n kv.append(('file', f_list_elem[1])) # adds my custom field with the original filepath\n kv.append(('has_pix_array',c)) # adds my custom field with if file has image\n if c:\n # adds my custom category field - useful if classifying images before processing\n kv.append(('category','uncategorized'))\n else:\n kv.append(('category','no image')) # adds my custom category field, makes note as imageless\n return dict(kv)\n\n\n# Function to extract pixel array information\n# takes an integer used to index into the global filedata dataframe\n# returns tuple of\n# filemapping: dicom to png paths (as str)\n# fail_path: dicom to failed folder (as tuple)\n# found_err: error code produced when processing\ndef extract_images(filedata, i, png_destination, flattened_to_level, failed, is16Bit):\n ds = dicom.dcmread(filedata.iloc[i].loc['file'], force=True) # read file in\n found_err=None\n filemapping = \"\"\n fail_path = \"\"\n try:\n im = ds.pixel_array # pull image from read dicom\n imName=os.path.split(filedata.iloc[i].loc['file'])[1][:-4] # get file name ex: IM-0107-0022\n\n if flattened_to_level == 'patient':\n ID = filedata.iloc[i].loc['PatientID'] # Unique identifier for the Patient.\n folderName = hashlib.sha224(ID.encode('utf-8')).hexdigest()\n # check for existence of patient folder. Create if it does not exist.\n os.makedirs(png_destination + folderName,exist_ok=True)\n elif flattened_to_level == 'study':\n ID1 = filedata.iloc[i].loc['PatientID'] # Unique identifier for the Patient.\n try:\n ID2 = filedata.iloc[i].loc['StudyInstanceUID'] # Unique identifier for the Study.\n except:\n ID2='ALL-STUDIES'\n folderName = hashlib.sha224(ID1.encode('utf-8')).hexdigest() + \"/\" + \\\n hashlib.sha224(ID2.encode('utf-8')).hexdigest()\n # check for existence of the folder tree patient/study/series. Create if it does not exist.\n os.makedirs(png_destination + folderName,exist_ok=True)\n else:\n ID1=filedata.iloc[i].loc['PatientID'] # Unique identifier for the Patient.\n try:\n ID2=filedata.iloc[i].loc['StudyInstanceUID'] # Unique identifier for the Study.\n ID3=filedata.iloc[i].loc['SeriesInstanceUID'] # Unique identifier of the Series.\n except:\n ID2='ALL-STUDIES'\n ID3='ALL-SERIES'\n folderName = hashlib.sha224(ID1.encode('utf-8')).hexdigest() + \"/\" + \\\n hashlib.sha224(ID2.encode('utf-8')).hexdigest() + \"/\" + \\\n hashlib.sha224(ID3.encode('utf-8')).hexdigest()\n # check for existence of the folder tree patient/study/series. Create if it does not exist.\n os.makedirs(png_destination + folderName,exist_ok=True)\n\n\n pngfile = png_destination+folderName + '/' + hashlib.sha224(imName.encode('utf-8')).hexdigest() + '.png'\n dicom_path = filedata.iloc[i].loc['file']\n image_path = png_destination+folderName+'/' + hashlib.sha224(imName.encode('utf-8')).hexdigest() + '.png'\n if is16Bit == 'True' or is16Bit == 'true':\n # write the PNG file as a 16-bit greyscale \n image_2d = ds.pixel_array.astype(np.double) \n # # Rescaling grey scale between 0-255\n image_2d_scaled = (np.maximum(image_2d,0) / image_2d.max()) * 65535.0 \n # # Convert to uint\n shape = ds.pixel_array.shape\n image_2d_scaled = np.uint16(image_2d_scaled) \n with open(pngfile , 'wb') as png_file:\n w = png.Writer(shape[1], shape[0], greyscale=True,bitdepth=16)\n w.write(png_file, image_2d_scaled)\n else: \n shape = ds.pixel_array.shape\n # Convert to float to avoid overflow or underflow losses.\n image_2d = ds.pixel_array.astype(float)\n # Rescaling grey scale between 0-255\n image_2d_scaled = (np.maximum(image_2d,0) / image_2d.max()) * 255.0\n # onvert to uint\n image_2d_scaled = np.uint8(image_2d_scaled)\n # Write the PNG file\n with open(pngfile , 'wb') as png_file:\n w = png.Writer(shape[1], shape[0], greyscale=True)\n w.write(png_file, image_2d_scaled)\n filemapping = filedata.iloc[i].loc['file'] + ', ' + pngfile + '\\n'\n except AttributeError as error:\n found_err = error\n logging.error(found_err)\n fail_path = filedata.iloc[i].loc['file'], failed + '1/' + \\\n os.path.split(filedata.iloc[i].loc['file'])[1][:-4]+'.dcm'\n except ValueError as error:\n found_err = error\n logging.error(found_err)\n fail_path = filedata.iloc[i].loc['file'], failed + '2/' + \\\n os.path.split(filedata.iloc[i].loc['file'])[1][:-4]+'.dcm'\n except BaseException as error:\n found_err = error\n logging.error(found_err)\n fail_path = filedata.iloc[i].loc['file'], failed + '3/' + \\\n os.path.split(filedata.iloc[i].loc['file'])[1][:-4]+'.dcm'\n except Exception as error:\n found_err = error\n logging.error(found_err)\n fail_path = filedata.iloc[i].loc['file'], failed + '4/' + \\\n os.path.split(filedata.iloc[i].loc['file'])[1][:-4]+'.dcm'\n return (filemapping, fail_path, found_err)\n\n\n# Function when pydicom fails to read a value attempt to read as other types.\ndef fix_mismatch_callback(raw_elem, **kwargs):\n try:\n if raw_elem.VR: \n values.convert_value(raw_elem.VR, raw_elem)\n except BaseException as err:\n for vr in kwargs['with_VRs']:\n try:\n values.convert_value(vr, raw_elem)\n except ValueError:\n pass\n else:\n raw_elem = raw_elem._replace(VR=vr)\n return raw_elem\n\n\ndef get_path(depth, dicom_home):\n directory = dicom_home + '/'\n i = 0\n while i < depth:\n directory += \"*/\"\n i += 1\n return directory + \"*.dcm\"\n\n \n# Function used by pydicom.\ndef fix_mismatch(with_VRs=['PN', 'DS', 'IS']):\n \"\"\"A callback function to check that RawDataElements are translatable\n with their provided VRs. If not, re-attempt translation using\n some other translators.\n Parameters\n ----------\n with_VRs : list, [['PN', 'DS', 'IS']]\n A list of VR strings to attempt if the raw data element value cannot\n be translated with the raw data element's VR.\n Returns\n -------\n No return value. The callback function will return either\n the original RawDataElement instance, or one with a fixed VR.\n \"\"\"\n dicom.config.data_element_callback = fix_mismatch_callback\n config.data_element_callback_kwargs = {\n 'with_VRs': with_VRs,\n } \n\n\ndef execute(pickle_file, dicom_home, output_directory, print_images, print_only_common_headers, depth,\n processes, flattened_to_level, email, send_email, no_splits, is16Bit, png_destination,\n failed, maps_directory, meta_directory, LOG_FILENAME, metadata_col_freq_threshold, t_start):\n err = None\n fix_mismatch()\n if processes == 0.5: # use half the cores to avoid high ram usage\n core_count = int(os.cpu_count()/2)\n elif processes == 0: # use all the cores\n core_count = int(os.cpu_count())\n elif processes < os.cpu_count(): # use the specified number of cores to avoid high ram usage\n core_count = processes\n else:\n core_count = int(os.cpu_count())\n # get set up to create dataframe\n dirs = os.listdir(dicom_home)\n # gets all dicom files. if editing this code, get filelist into the format of a list of strings,\n # with each string as the file path to a different dicom file.\n file_path = get_path(depth, dicom_home)\n\n if os.path.isfile(pickle_file):\n f=open(pickle_file,'rb')\n filelist=pickle.load(f)\n else:\n filelist=glob.glob(file_path, recursive=True) # search the folders at the depth we request and finds all dicoms\n pickle.dump(filelist,open(pickle_file,'wb'))\n file_chunks = np.array_split(filelist,no_splits)\n logging.info('Number of dicom files: ' + str(len(filelist)))\n\n try:\n ff = filelist[0] # load first file as a template to look at all\n except IndexError:\n logging.error(\"There is no file present in the given folder in \" + file_path)\n sys.exit(1)\n\n plan = dicom.dcmread(ff, force=True)\n logging.debug('Loaded the first file successfully')\n\n keys = [(aa) for aa in plan.dir() if (hasattr(plan, aa) and aa != 'PixelData')]\n # checks for images in fields and prints where they are\n for field in plan.dir():\n if (hasattr(plan, field) and field!='PixelData'):\n entry = getattr(plan, field)\n if type(entry) is bytes:\n logging.debug(field)\n logging.debug(str(entry))\n\n for i,chunk in enumerate(file_chunks):\n csv_destination = \"{}/meta/metadata_{}.csv\".format(output_directory,i)\n mappings = \"{}/maps/mapping_{}.csv\".format(output_directory,i)\n fm = open(mappings, \"w+\")\n filemapping = 'Original DICOM file location, PNG location \\n'\n fm.write(filemapping)\n\n # add a check to see if the metadata has already been extracted\n # step through whole file list, read in file, append fields to future dataframe of all files\n\n headerlist = []\n # start up a multi processing pool\n # for every item in filelist send data to a subprocess and run extract_headers func\n # output is then added to headerlist as they are completed (no ordering is done)\n with Pool(core_count) as p:\n res= p.imap_unordered(extract_headers, enumerate(chunk))\n for i,e in enumerate(res):\n headerlist.append(e)\n data = pd.DataFrame(headerlist)\n logging.info('Chunk ' + str(i) + ' Number of fields per file : ' + str(len(data.columns)))\n # find common fields\n # make dataframe containing all fields and all files minus those removed in previous block\n # export csv file of final dataframe\n export_csv = data.to_csv(csv_destination, index = None, header=True)\n fields=data.keys()\n count = 0 # potential painpoint\n # writting of log handled by main process\n if print_images:\n logging.info(\"Start processing Images\")\n filedata = data\n total = len(chunk)\n stamp = time.time()\n for i in range(len(filedata)):\n (fmap,fail_path,err) = extract_images(filedata, i, png_destination, flattened_to_level, failed, is16Bit)\n if err:\n count +=1\n copyfile(fail_path[0],fail_path[1])\n err_msg = str(count) + ' out of ' + str(len(chunk)) + ' dicom images have failed extraction'\n logging.error(err_msg)\n else:\n fm.write(fmap)\n fm.close()\n logging.info('Chunk run time: %s %s', time.time() - t_start, ' seconds!')\n\n logging.info('Generating final metadata file')\n\n col_names = dict()\n all_headers = dict()\n total_length = 0\n\n metas = glob.glob( \"{}*.csv\".format(meta_directory))\n # for each meta file identify the columns that are not na's for at least 10% (metadata_col_freq_threshold) of data\n for meta in metas:\n m = pd.read_csv(meta,dtype='str')\n d_len = m.shape[0]\n total_length += d_len\n\n for e in m.columns:\n col_pop = d_len - np.sum(m[e].isna()) # number of populated rows for this column in this metadata file\n\n if e in col_names:\n col_names[e] += col_pop\n else:\n col_names[e] = col_pop\n \n # all_headers keeps track of number of appearances of each header. We later use this count to ensure that\n # the headers we use are present in all metadata files.\n if e in all_headers:\n all_headers[e] += 1\n else:\n all_headers[e] = 1\n\n loadable_names = list()\n for k in col_names.keys():\n if k in all_headers and all_headers[k] >= no_splits: # no_splits == number of batches used \n if col_names[k] >= metadata_col_freq_threshold*total_length:\n loadable_names.append(k) # use header only if it's present in every metadata file\n \n # load every metadata file using only valid columns\n meta_list = list()\n for meta in metas:\n m = pd.read_csv(meta,dtype='str',usecols=loadable_names)\n meta_list.append(m)\n merged_meta = pd.concat(meta_list,ignore_index=True)\n merged_meta.to_csv('{}/metadata.csv'.format(output_directory),index=False)\n # getting a single mapping file\n logging.info('Generatign final mapping file')\n mappings = glob.glob(\"{}/maps/*.csv\".format(output_directory))\n map_list = list()\n for mapping in mappings:\n map_list.append(pd.read_csv(mapping,dtype='str'))\n merged_maps = pd.concat(map_list,ignore_index=True)\n if print_only_common_headers == 'True' or print_only_common_headers == 'true':\n mask_common_fields = merged_maps.isnull().mean() < 0.1\n common_fields = set(np.asarray(merged_maps.columns)[mask_common_fields])\n merged_maps = merged_maps[common_fields]\n merged_maps.to_csv('{}/mapping.csv'.format(output_directory),index=False)\n\n if send_email == 'True' or send_email == 'true':\n subprocess.call('echo \"Niffler has successfully completed the png conversion\" | mail -s \"The image conversion'\n ' has been complete\" {0}'.format(email), shell=True)\n # Record the total run-time\n logging.info('Total run time: %s %s', time.time() - t_start, ' seconds!')\n logging.shutdown() # Closing logging file after extraction is done !!\n logs = []\n logs.append(err)\n logs.append(\"The PNG conversion is SUCCESSFUL\")\n return logs\n\n\nif __name__ == \"__main__\":\n with open('config.json', 'r') as f:\n niffler = json.load(f)\n\n initialize_config_and_execute(niffler)\n" ]
[ [ "numpy.uint8", "numpy.maximum", "numpy.asarray", "numpy.uint16", "pandas.DataFrame", "pandas.concat", "pandas.read_csv", "numpy.array_split" ] ]
jasonfan1997/umd_icecube_analysis_tutorial
[ "50bf3af27f81d719953ac225f199e733b5c0bddf" ]
[ "mla/mla/sensitivtiy.py" ]
[ "'''Core functionality'''\r\n\r\nfrom __future__ import print_function, division\r\nimport os, sys, glob, numpy as np, matplotlib, scipy, time\r\nfrom scipy import stats, interpolate, optimize\r\nfrom math import pi\r\nimport numpy.lib.recfunctions as rf\r\nfrom mla.spectral import *\r\nfrom mla.tools import *\r\nfrom mla.timing import *\r\nfrom mla.core import *\r\nfrom mla.injection import *\r\nimport scipy.stats\r\nfrom copy import deepcopy\r\nfrom matplotlib import pyplot as plt, colors\r\n\r\n\r\nclass PS_sensitivity():\r\n def __init__(self):\r\n pass\r\n \r\n def background_building(self, data, sim, bkg_bins=np.linspace(-1.0, 1.0, 501), bkg_2dbins=[np.linspace(-1,1,100),np.linspace(1,8,100)],gamma_points = np.arange(-4, -1, 0.25),save_file = None):\r\n r''' Building the background distribution\r\n args:\r\n data:The Background\r\n sim: Monte Carlo simulation\r\n spectrum: Spectrum , could be a BaseSpectrum object or a string name PowerLaw\r\n bkg_2dbins: The sindec and logE binning for energy S/B histogram.\r\n gamma_points: The set of gamma for PowerLaw energy weighting.\r\n save_file: location to save the background file.Default is not saving.\r\n '''\r\n self.energybins = bkg_2dbins\r\n if save_file is not None:\r\n bkg_file = save_file + \"bkg_dec.pkl\"\r\n sob_file = save_file + \"bkd_SOB.npy\"\r\n self.bkg_spline = build_bkg_spline(data , bins = bkg_bins , file_name = bkg_file)\r\n self.ratio,self.gamma_points = build_energy_2dhistogram(data, sim ,bkg_2dbins ,gamma_points,file_name = sob_file)\r\n else:\r\n self.bkg_spline = build_bkg_spline(data , bins = bkg_bins )\r\n self.ratio,self.gamma_points = build_energy_2dhistogram(data, sim ,bkg_2dbins ,gamma_points)\r\n return\r\n \r\n def load_background(self, dir_name, bkg_bins=np.linspace(-1.0, 1.0, 501), bkg_2dbins=[np.linspace(-1,1,100),np.linspace(1,8,100)],gamma_points = np.arange(-4, -1, 0.25)):\r\n r''' Loading background distribution\r\n args:\r\n dir_name:Location to the file\r\n spectrum: Spectrum , could be a BaseSpectrum object or a string name PowerLaw\r\n bkg_2dbins: The sindec and logE binning for energy S/B histogram.\r\n gamma_points: The set of gamma for PowerLaw energy weighting.\r\n '''\r\n self.energybins = bkg_2dbins\r\n bkg_file = dir_name + \"bkg_dec.pkl\"\r\n sob_file = dir_name + \"bkd_SOB.npy\"\r\n with open(bkg_file, 'rb') as f:\r\n self.bkg_spline = pickle.load(f)\r\n self.ratio = np.load(sob_file)\r\n self.gamma_points = gamma_points\r\n return\r\n \r\n def set_point_source(self, ra , dec , data , sim , spectrum , signal_time_profile = None , background_time_profile = (0,1)):\r\n r'''Set the location of the source and load the information of the model.\r\n ra: RA of the source in rad\r\n dec: Declination of the source in rad\r\n data:The data\r\n sim: Monte Carlo simulation\r\n spectrum: Spectrum , could be a BaseSpectrum object or a string name PowerLaw\r\n signal_time_profile: generic_profile object. This is the signal time profile.Default is the same as background_time_profile.\r\n background_time_profile: generic_profile object or the list of the start time and end time. This is the background time profile.Default is a (0,1) tuple which will create a uniform_profile from 0 to 1.\r\n '''\r\n self.point_source=LLH_point_source(ra , dec , data , sim , spectrum , signal_time_profile = signal_time_profile , background_time_profile = background_time_profile,gamma_points=self.gamma_points,bkg_dec_spline=self.bkg_spline,sob_maps = self.ratio)\r\n self.background_time_profile = deepcopy(self.point_source.background_time_profile)\r\n self.signal_time_profile = deepcopy(self.point_source.signal_time_profile)\r\n return\r\n \r\n def set_backround(self, background ,grl ,background_window = 14):\r\n r'''Setting the background information which will later be used when drawing data as background\r\n args:\r\n background:Background data\r\n grl:The good run list\r\n background_window: The time window(days) that will be used to estimated the background rate and drawn sample from.Default is 14 days\r\n '''\r\n start_time = self.background_time_profile.get_range()[0]\r\n fully_contained = (grl['start'] >= start_time-background_window) &\\\r\n (grl['stop'] < start_time)\r\n start_contained = (grl['start'] < start_time-background_window) &\\\r\n (grl['stop'] > start_time-background_window)\r\n background_runs = (fully_contained | start_contained)\r\n if not np.any(background_runs):\r\n print(\"ERROR: No runs found in GRL for calculation of \"\r\n \"background rates!\")\r\n raise RuntimeError\r\n background_grl = grl[background_runs]\r\n \r\n # Get the number of events we see from these runs and scale \r\n # it to the number we expect for our search livetime.\r\n n_background = background_grl['events'].sum()\r\n n_background /= background_grl['livetime'].sum()\r\n n_background *= self.background_time_profile.effective_exposure()\r\n self.n_background = n_background\r\n self.background = background\r\n return\r\n \r\n def set_injection( self, sim , gamma = -2, signal_time_profile = None , background_time_profile = (0,1), sampling_width = np.radians(1) ,ra = None,dec = None):\r\n r'''Set the details of the injection.\r\n sim: Simulation data\r\n gamma: Spectral index of the injection spectrum\r\n signal_time_profile: generic_profile object. This is the signal time profile.Default is the same as background_time_profile.\r\n background_time_profile: generic_profile object or the list of the start time and end time. This is the background time profile.Default is a (0,1) tuple which will create a uniform_profile from 0 to 1.\r\n '''\r\n spectrum = PowerLaw( 100e3, 1, gamma)\r\n self.PS_injector = PSinjector(spectrum, sim , signal_time_profile = None , background_time_profile = background_time_profile)\r\n if ra is None:\r\n self.PS_injector.set_source_location(self.point_source.ra,self.point_source.dec,sampling_width = sampling_width)\r\n else:\r\n self.PS_injector.set_source_location(ra,dec,sampling_width = sampling_width)\r\n return\r\n \r\n def draw_data(self):\r\n r'''Draw data sample\r\n return:\r\n background: background sample\r\n '''\r\n n_background_observed = np.random.poisson(self.n_background)\r\n background = np.random.choice(self.background, n_background_observed).copy()\r\n background['time'] = self.background_time_profile.random(len(background))\r\n return background\r\n \r\n def draw_signal(self):\r\n r'''Draw signal sample\r\n return:\r\n signal: signal sample\r\n '''\r\n return self.PS_injector.sample_from_spectrum()\r\n \r\n \r\n \r\n def build_background_TS(self,n_trials = 1000):\r\n r'''build background TS distribution\r\n args:\r\n n_trials: Number of trials\r\n return:\r\n TS: The TS array\r\n '''\r\n TS = []\r\n for i in range(n_trials):\r\n self.point_source.update_data(self.draw_data())\r\n TS.append(self.point_source.eval_llh_fit_ns()[1])\r\n return np.array(TS)\r\n \r\n def build_signal_TS(self, signal_trials = 200 ,result = False ,result_file = None):\r\n r'''build signal TS distribution\r\n args:\r\n signal_trials: Number of trials\r\n result: Whether storing the full result in self.result.Default is False.\r\n result_file:Whether storing the full result in file.Default is False.\r\n \r\n return:\r\n TS: The TS array\r\n '''\r\n TS = []\r\n ts_result = []\r\n for i in range(signal_trials):\r\n data = self.draw_data()\r\n signal = self.draw_signal()\r\n signal = rf.drop_fields(signal, [n for n in signal.dtype.names \\\r\n if not n in data.dtype.names])\r\n self.point_source.update_data(np.concatenate([data,signal]))\r\n TS.append(self.point_source.eval_llh_fit_ns()[1])\r\n ts_result.append(self.point_source.get_fit_result)\r\n if result:\r\n np.save(result_file, np.array(ts_result))\r\n return np.array(TS)\r\n \r\n def calculate_ratio_passthreshold(self,bkg_trials = 1000, signal_trials = 200 ,result = False ,result_file = None):\r\n r'''Calculate the ratio of signal trials passing the threshold\r\n args:\r\n bkg_trials : Number of background trials\r\n signal_trials: Number of signal trials\r\n result: Whether storing the full result in self.result.Default is False.\r\n result_file:Whether storing the full result in file.Default is False.\r\n \r\n return:\r\n result:The ratio of passing(both for three sigma and median of the background\r\n '''\r\n signal_ts = self.build_signal_TS(signal_trials ,result = result ,result_file = result_file)\r\n result = [(signal_ts > self.bkg_three_sigma ).sum()/float(len(signal_ts)), (signal_ts > self.bkg_median).sum()/float(len(signal_ts))]\r\n return result\r\n \r\n def calculate_sensitivity(self, bkg_trials = 1000, signal_trials = 200, gamma = -2, list_N = [1e-17] ,N_factor = 2 , make_plot = None ,Threshold_list=[90] , Threshold_potential = [50],result_save = False ,result_file = None):\r\n r'''Calculate the sensitivity plus the discovery potential\r\n args:\r\n bkg_trials : Number of background trials\r\n signal_trials: Number of signal trials\r\n gamma: Spectral index of the injection signal\r\n list_N:The list of flux norm to test and build the spline\r\n N_factor: Factor for Flux increments .If the maximum in list_N still wasn't enough to pass the threshold, the program will enter a while loop with N_factor*N tested each times until the N passed the threshold.\r\n make_plot: The file name of the plot saved. Default is not saving\r\n Threshold_list: The list of threshold of signal TS passing Median of the background TS. \r\n Threshold_potential: The list of threshold of signal TS passing 3 sigma of the background TS. \r\n result: Whether storing the full result in self.result.Default is False.\r\n result_file:Whether storing the full result in file.Default is False.\r\n\r\n '''\r\n self.Threshold_list = Threshold_list\r\n self.Threshold_potential = Threshold_potential\r\n max_threshold = np.array(Threshold_list).max()\r\n max_potential = np.array(Threshold_potential).max()\r\n list_N = np.array(deepcopy(list_N))\r\n result = []\r\n self.ts_bkg = self.build_background_TS(bkg_trials)\r\n self.bkg_median = np.percentile(self.ts_bkg , 50)\r\n self.bkg_three_sigma = np.percentile(self.ts_bkg , 99.7)\r\n for N in list_N:\r\n print(\"Now testing : \"+ str(N))\r\n spectrum = PowerLaw( 100e3, N, gamma)\r\n self.PS_injector.update_spectrum(spectrum)\r\n tempresult = self.calculate_ratio_passthreshold(bkg_trials = 1000, signal_trials = 200, result = result_save ,result_file = result_file)\r\n print(tempresult)\r\n result.append(tempresult)\r\n if tempresult[0] < max_potential*0.01 or tempresult[1] < max_threshold*0.01:\r\n reach_max = False\r\n N = N * N_factor\r\n list_N = np.append(list_N,N)\r\n else:\r\n reach_max = True\r\n while not reach_max:\r\n print(\"Now testing : \"+ str(N))\r\n spectrum = PowerLaw( 100e3, N, gamma)\r\n self.PS_injector.update_spectrum(spectrum)\r\n tempresult = self.calculate_ratio_passthreshold(bkg_trials = 1000, signal_trials = 200, result = result_save ,result_file = result_file)\r\n print(tempresult)\r\n result.append(tempresult)\r\n if tempresult[0] < max_potential*0.01 or tempresult[1] < max_threshold*0.01:\r\n N = N * N_factor\r\n list_N = np.append(list_N,N)\r\n else:\r\n reach_max = True\r\n result = np.array(result)\r\n self.result = result\r\n self.list_N = list_N\r\n self.spline_sigma = interpolate.UnivariateSpline(list_N,result[:,0] , ext = 3)\r\n self.spline_sen = interpolate.UnivariateSpline( list_N,result[:,1] , ext = 3)\r\n Threshold_result = []\r\n Threshold_potential_result = []\r\n for i in Threshold_list:\r\n tempspline = interpolate.UnivariateSpline(list_N,result[:,1]-i*0.01 , ext = 3)\r\n Threshold_result.append(tempspline.roots()[0])\r\n print(\"Threshold: \" + str(i) + \", N : \" + str(self.spline_sen(i*0.01)))\r\n for i in Threshold_potential:\r\n tempspline = interpolate.UnivariateSpline(list_N,result[:,0]-i*0.01 , ext = 3)\r\n Threshold_potential_result.append(tempspline.roots()[0])\r\n print(\"Threshold_potential: \" + str(i) + \", N : \" + str(self.spline_sigma(i*0.01))) \r\n self.Threshold_result = Threshold_result\r\n self.Threshold_potential_result = Threshold_potential_result\r\n if make_plot != None :\r\n self.make_plot(make_plot)\r\n return\r\n \r\n def make_plot(self,file_name):\r\n r'''save plot to file_name\r\n '''\r\n fig, ax = plt.subplots(figsize = (12,12))\r\n ax.scatter(self.list_N,self.result[:,1],label = 'sensitiviy point',color='r')\r\n ax.scatter(self.list_N,self.result[:,0],label = 'potential point',color='b')\r\n ax.set_xlim(self.list_N[0],self.list_N[-1])\r\n ax.plot(np.linspace(self.list_N[0],self.list_N[-1],1000),self.spline_sen(np.linspace(self.list_N[0],self.list_N[-1],1000)),label = 'sensitiviy spline',color='r')\r\n ax.plot(np.linspace(self.list_N[0],self.list_N[-1],1000),self.spline_sigma(np.linspace(self.list_N[0],self.list_N[-1],1000)),label = 'potential spline',color='b')\r\n for i in range(len(self.Threshold_result)):\r\n ax.axvline(self.Threshold_result[i],label = 'sensitiviy '+str(self.Threshold_list[i]),color='r')\r\n for i in range(len(self.Threshold_potential_result)):\r\n ax.axvline(self.Threshold_potential_result[i],label = 'potential '+str(self.Threshold_potential[i]),color='b')\r\n ax.set_title(\"Flux norm vs passing ratio\",fontsize=14)\r\n ax.set_xlabel(r\"Flux Norm($GeV cm^{-2} s^{-1}$)\",fontsize=14)\r\n ax.set_ylabel(r\"Passing ratio\",fontsize=14)\r\n ax.legend(fontsize=14)\r\n fig.savefig(file_name)\r\n plt.close()" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.random.choice", "scipy.interpolate.UnivariateSpline", "numpy.percentile", "numpy.random.poisson", "numpy.load", "matplotlib.pyplot.close", "matplotlib.pyplot.subplots", "numpy.radians", "numpy.any", "numpy.arange", "numpy.append", "numpy.linspace", "numpy.lib.recfunctions.drop_fields" ] ]
yetyetanotherusername/vaex
[ "71ff313486f9ee3a142d9fb4e80c7bdc0e1270c5", "71ff313486f9ee3a142d9fb4e80c7bdc0e1270c5", "71ff313486f9ee3a142d9fb4e80c7bdc0e1270c5" ]
[ "tests/join_test.py", "tests/evaluate_test.py", "packages/vaex-core/vaex/legacy.py" ]
[ "import pytest\nimport vaex\nimport numpy as np\nimport numpy.ma\n\ndf_a = vaex.from_arrays(a=np.array(['A', 'B', 'C']),\n x=np.array([0., 1., 2.]),\n y=np.ma.array([0., 9., 2.], mask=[False, True, False]),\n m=np.ma.array([1, 2, 3], mask=[False, True, False])\n )\n\ndf_b = vaex.from_arrays(b=np.array(['A', 'B', 'D']),\n x=np.array([2., 1., 0.]),\n y=np.ma.array([9., 1., 2.], mask=[True, False, False]),\n m=np.ma.array([3, 1, 2], mask=[True, False, False])\n )\n\ndf_dup = vaex.from_arrays(b=np.array(['A', 'B', 'A']),\n x=np.array([2., 1., 2.]),\n y=np.ma.array([9., 1., 9.], mask=[True, False, False]),\n m=np.ma.array([3, 1, 2], mask=[True, True, False])\n )\n\ndf_c = vaex.from_arrays(c=np.array(['B', 'C']),\n z1=np.array([-1., -2.]),\n z2=np.array([True, False]),\n )\n\ndf_d = vaex.from_arrays(a=np.array(['B', 'C', 'D']),\n x1=np.array(['dog', 'cat', 'mouse']),\n x2=np.array([3.1, 25, np.nan]),\n )\n\ndf_e = vaex.from_arrays(a=np.array(['X', 'Y', 'Z']),\n x1=np.array(['dog', 'cat', 'mouse']),\n x2=np.array([3.1, 25, np.nan]),\n )\n\n\ndef test_no_on():\n # just adds the columns\n df = df_a.join(df_b, rsuffix='_r')\n assert df.columns['b'] is df_b.columns['b']\n\n\ndef test_join_masked():\n df = df_a.join(other=df_b, left_on='m', right_on='m', rsuffix='_r')\n assert df.evaluate('m').tolist() == [1, None, 3]\n assert df.evaluate('m_r').tolist() == [1, None, None]\n\n\ndef test_join_nomatch():\n df = df_a.join(df_e, on='a', rprefix='r_')\n assert df.x2.tolist() == [None, None, None]\n\n\ndef test_left_a_b():\n df = df_a.join(other=df_b, left_on='a', right_on='b', rsuffix='_r')\n assert df.evaluate('a').tolist() == ['A', 'B', 'C']\n assert df.evaluate('b').tolist() == ['A', 'B', None]\n assert df.evaluate('x').tolist() == [0, 1, 2]\n assert df.evaluate('x_r').tolist() == [2, 1, None]\n assert df.evaluate('y').tolist() == [0, None, 2]\n assert df.evaluate('y_r').tolist() == [None, 1, None]\n\ndef test_join_indexed():\n df = df_a.join(other=df_b, left_on='a', right_on='b', rsuffix='_r')\n df_X = df_a.join(df, left_on='a', right_on='b', rsuffix='_r')\n assert df_X.evaluate('b').tolist() == ['A', 'B', None]\n\n\ndef test_left_a_b_filtered():\n df_af = df_a[df_a.x > 0]\n df = df_af.join(other=df_b, left_on='a', right_on='b', rsuffix='_r')\n assert df.evaluate('a').tolist() == ['B', 'C']\n assert df.evaluate('b').tolist() == ['B', None]\n assert df.evaluate('x').tolist() == [1, 2]\n assert df.evaluate('x_r').tolist() == [1, None]\n assert df.evaluate('y').tolist() == [None, 2]\n assert df.evaluate('y_r').tolist() == [1, None]\n\n # actually, even though the filter is applied, all rows will be matched\n # since the filter can change\n df.set_selection(None, vaex.dataset.FILTER_SELECTION_NAME)\n assert df.evaluate('a').tolist() == ['A', 'B', 'C']\n assert df.evaluate('b').tolist() == ['A', 'B', None]\n assert df.evaluate('x').tolist() == [0, 1, 2]\n assert df.evaluate('x_r').tolist() == [2, 1, None]\n assert df.evaluate('y').tolist() == [0, None, 2]\n assert df.evaluate('y_r').tolist() == [None, 1, None]\n\n # if we extract, that shouldn't be the case\n df_af = df_a[df_a.x > 0].extract()\n df = df_af.join(other=df_b, left_on='a', right_on='b', rsuffix='_r')\n df.set_selection(None, vaex.dataset.FILTER_SELECTION_NAME)\n assert df.evaluate('a').tolist() == ['B', 'C']\n assert df.evaluate('b').tolist() == ['B', None]\n assert df.evaluate('x').tolist() == [1, 2]\n assert df.evaluate('x_r').tolist() == [1, None]\n assert df.evaluate('y').tolist() == [None, 2]\n assert df.evaluate('y_r').tolist() == [1, None]\n\ndef test_inner_a_b_filtered():\n df_a_filtered = df_a[df_a.x > 0]\n df = df_a_filtered.join(other=df_b, left_on='a', right_on='b', rsuffix='_r', how='inner')\n assert df.evaluate('a').tolist() == ['B']\n assert df.evaluate('b').tolist() == ['B']\n assert df.evaluate('x').tolist() == [1]\n assert df.evaluate('x_r').tolist() == [1]\n assert df.evaluate('y').tolist() == [None]\n assert df.evaluate('y_r').tolist() == [1]\n\ndef test_right_x_x():\n df = df_a.join(other=df_b, on='x', rsuffix='_r', how='right')\n assert df.evaluate('a').tolist() == ['C', 'B', 'A']\n assert df.evaluate('b').tolist() == ['A', 'B', 'D']\n assert df.evaluate('x').tolist() == [2, 1, 0]\n assert df.evaluate('x_r').tolist() == [2, 1, 0]\n assert df.evaluate('y').tolist() == [2, None, 0]\n assert df.evaluate('y_r').tolist() == [None, 1, 2]\n assert 'y_r' not in df_b\n\n\ndef test_left_dup():\n df = df_a.join(df_dup, left_on='a', right_on='b', rsuffix='_r', allow_duplication=True)\n assert len(df) == 4\n # df = df_a.join(df_dup, on='x', rsuffix='_r')\n # df = df_a.join(df_dup, on='m', rsuffix='_r')\n\n\ndef test_left_a_c():\n df = df_a.join(df_c, left_on='a', right_on='c', how='left')\n assert df.a.tolist() == ['A', 'B', 'C']\n assert df.x.tolist() == [0, 1, 2]\n assert df.y.tolist() == [0., None, 2.]\n assert df.m.tolist() == [1, None, 3]\n assert df.c.tolist() == [None, 'B', 'C']\n assert df.z1.tolist() == [None, -1., -2.]\n assert df.z2.tolist() == [None, True, False]\n\n\ndef test_join_a_a_suffix_check():\n df = df_a.join(df_a, on='a', lsuffix='_left', rsuffix='_right')\n assert set(df.column_names) == {'a_left', 'x_left', 'y_left', 'm_left', 'a_right', 'x_right', 'y_right', 'm_right'}\n\n\ndef test_join_a_a_prefix_check():\n df = df_a.join(df_a, on='a', lprefix='left_', rprefix='right_')\n assert set(df.column_names) == {'left_a', 'left_x', 'left_y', 'left_m', 'right_a', 'right_x', 'right_y', 'right_m'}\n\n\ndef test_inner_a_d():\n df = df_a.join(df_d, on='a', right_on='a', how='inner', rsuffix='_r')\n assert df.a.tolist() == ['B', 'C']\n assert df.x.tolist() == [1., 2.]\n assert df.y.tolist() == [None, 2.]\n assert df.m.tolist() == [None, 3.]\n assert df.x1.tolist() == ['dog', 'cat']\n assert df.x2.tolist() == [3.1, 25.]\n\n\[email protected](reason='full join not supported yet')\ndef test_full_a_d():\n df = df_a.join(df_d, on='a', right_on='a', how='full')\n assert df.a.tolist() == ['A', 'B', 'C', 'D']\n assert df.x.tolist() == [0., 1., 2., None]\n assert df.y.tolist() == [0., None, 2., None]\n assert df.m.tolist() == [1, None, 3, None]\n assert df.x1.tolist() == [None, 'dog', 'cat', 'mouse']\n assert df.x2.tolist() == [None, 3.1, 25., np.nan]\n np.testing.assert_array_equal(np.array(df_d.x2.values), np.array([3.1, 25., np.nan]))\n\n\ndef test_left_virtual_filter():\n df = df_a.join(df_d, on='a', how='left', rsuffix='_b')\n df['r'] = df.x + df.x2\n df = df[df.r > 10]\n assert set(df[0]) == {'C', 2.0, 2.0, 3, 'C', 'cat', 25.0, 27.0}\n\n\ndef test_left_on_virtual_col():\n mapper = {0: 'A', 1: 'B', 2: 'C'}\n df_a['aa'] = df_a.x.map(mapper=mapper)\n df = df_a.join(df_d, left_on='aa', right_on='a', rsuffix='_right')\n assert df.a.tolist() == ['A', 'B', 'C']\n assert df.aa.tolist() == ['A', 'B', 'C']\n assert df.x.tolist() == [0, 1, 2]\n assert df.y.tolist() == [0., None, 2.]\n assert df.m.tolist() == [1, None, 3]\n assert df.x1.tolist() == [None, 'dog', 'cat']\n assert df.x2.tolist() == [None, 3.1, 25.]\n assert df.a_right.tolist() == [None, 'B', 'C']\n\n\ndef test_join_filtered_inner():\n df_a_filtered = df_a[df_a.y > 0]\n df_joined = df_a_filtered.join(other=df_b, on='x', how='inner', rsuffix='_', allow_duplication=True)\n assert len(df_joined) == len(df_a_filtered)\n\n x = np.arange(20)\n df = vaex.from_arrays(x=x, y=x**2)\n df = df[df.x > 5]\n dfj = df.join(df, on='x', rsuffix='right_', how='inner')\n repr(dfj) # trigger issue with selection cache\n\n\ndef test_join_duplicate_column():\n df_left = vaex.from_arrays(index=[1, 2, 3], x=[10, 20, 30])\n df_right = vaex.from_arrays(index=[1, 2, 3], y=[0.1, 0.2, 0.3])\n\n df = df_left.join(df_right, on='index')\n assert df.column_count() == 3\n assert set(df.column_names) == {'index', 'x', 'y'}\n assert df['index'] == [1, 2, 3]\n assert df.x.tolist() == [10, 20, 30]\n assert df.y.tolist() == [0.1, 0.2, 0.3]\n", "import vaex\nimport numpy as np\n\ndef test_evaluate_function_filtered_df():\n # Custom function to be applied to a filtered DataFrame\n def custom_func(x):\n assert 4 not in x; return x**2\n\n df = vaex.from_arrays(x=np.arange(10))\n df_filtered = df[df.x!=4]\n df_filtered.add_function('custom_function', custom_func)\n df_filtered['y'] = df_filtered.func.custom_function(df_filtered.x)\n assert df_filtered.y.tolist() == [0, 1, 4, 9, 25, 36, 49, 64, 81]\n\n # sliced exactly at the start of where we are going to filter\n # this used to trigger a bug in df.dtype, which would evaluate the first row\n df_sliced = df[4:]\n df_filtered = df_sliced[df_sliced.x!=4]\n df_filtered.add_function('custom_function', custom_func)\n df_filtered['y'] = df_filtered.func.custom_function(df_filtered.x)\n assert df_filtered.y.tolist() == [25, 36, 49, 64, 81]\n", "# -*- coding: utf-8 -*-\nfrom __future__ import division, print_function\nimport numpy as np\nimport vaex\nfrom .tasks import Task, TaskMapReduce\nfrom .utils import _parse_f\nimport six\n\n\ndef _asfloat(a):\n if a.dtype.type == np.float64 and a.strides[0] == 8:\n return a\n else:\n return a.astype(np.float64, copy=False)\n\nclass TaskMapReduceLegacy(TaskMapReduce):\n def __init__(self, *args, **kwargs):\n kwargs = kwargs.copy()\n kwargs['ignore_filter'] = True\n TaskMapReduce.__init__(self, *args, **kwargs)\n\nclass TaskHistogram(Task):\n def __init__(self, df, subspace, expressions, size, limits, masked=False, weight=None):\n self.size = size\n self.limits = limits\n Task.__init__(self, df, expressions, name=\"histogram\")\n self.subspace = subspace\n self.dtype = np.float64\n self.masked = masked\n self.weight = weight\n # self.grids = vaex.grids.Grids(self.df, self.df.executor.thread_pool, *expressions)\n # self.grids.ranges = limits\n # self.grids.grids[\"counts\"] = vaex.grids.Grid(self.grids, size, self.dimension, None)\n shape1 = (self.size,) * self.dimension\n try:\n self.size[0]\n shape1 = tuple(self.size)\n except:\n pass\n shape = (self.subspace.executor.thread_pool.nthreads,) + shape1\n self.data = np.zeros(shape, dtype=self.dtype)\n self.ranges_flat = []\n self.minima = []\n self.maxima = []\n for limit in self.limits:\n self.ranges_flat.extend(limit)\n vmin, vmax = limit\n self.minima.append(vmin)\n self.maxima.append(vmax)\n if self.weight is not None:\n self.expressions_all.append(weight)\n # print self.ranges_flat\n\n def __repr__(self):\n name = self.__class__.__module__ + \".\" + self.__class__.__name__\n return \"<%s(df=%r, expressions=%r, size=%r, limits=%r)> instance at 0x%x\" % (name, self.df, self.expressions, self.size, self.limits, id(self))\n\n def map(self, thread_index, i1, i2, filter_mask, *blocks):\n class Info(object):\n pass\n info = Info()\n info.i1 = i1\n info.i2 = i2\n info.first = i1 == 0\n info.last = i2 == self.df.length_unfiltered()\n info.size = i2 - i1\n # print \"bin\", i1, i2, info.last\n # self.grids[\"counts\"].bin_block(info, *blocks)\n # mask = self.df.mask\n data = self.data[thread_index]\n\n blocks = [_asfloat(block) for block in blocks]\n\n if self.masked or self.df.filtered:\n mask = self.df.evaluate_selection_mask(\"default\" if self.masked else None, i1=i1, i2=i2)\n blocks = [block[mask] for block in blocks]\n\n subblock_weight = None\n if len(blocks) == len(self.expressions) + 1:\n subblock_weight = blocks[-1]\n blocks = list(blocks[:-1])\n # print subblocks[0]\n # print subblocks[1]\n\n if self.dimension == 1:\n vaex.vaexfast.histogram1d(blocks[0], subblock_weight, data, *self.ranges_flat)\n elif self.dimension == 2:\n # if subblock_weight is None:\n # #print \"speedup?\"\n # histogram_numba(blocks[0], blocks[1], subblock_weight, data, *self.ranges_flat)\n # else:\n vaex.vaexfast.histogram2d(blocks[0], blocks[1], subblock_weight, data, *self.ranges_flat)\n # vaex.vaexfast.statisticNd([blocks[0], blocks[1]], subblock_weight, data, self.minima, self.maxima, 0)\n elif self.dimension == 3:\n vaex.vaexfast.histogram3d(blocks[0], blocks[1], blocks[2], subblock_weight, data, *self.ranges_flat)\n else:\n blocks = list(blocks) # histogramNd wants blocks to be a list\n vaex.vaexfast.histogramNd(blocks, subblock_weight, data, self.minima, self.maxima)\n\n return i1\n # return map(self._map, blocks)#[self.map(block) for block in blocks]\n\n def reduce(self, results):\n for i in range(1, self.subspace.executor.thread_pool.nthreads):\n self.data[0] += self.data[i]\n return self.data[0]\n # return self.data\n\n\nclass SubspaceGridded(object):\n def __init__(self, subspace_bounded, grid, vx=None, vy=None, vcounts=None):\n self.subspace_bounded = subspace_bounded\n self.grid = grid\n self.vx = vx\n self.vy = vy\n self.vcounts = vcounts\n\n def vector(self, weightx, weighty, size=32):\n counts = self.subspace_bounded.gridded_by_histogram(size=size)\n vx = self.subspace_bounded.gridded_by_histogram(size=size, weight=weightx)\n vy = self.subspace_bounded.gridded_by_histogram(size=size, weight=weighty)\n return SubspaceGridded(self.subspace_bounded, self.grid, vx=vx, vy=vy, vcounts=counts)\n\n def filter_gaussian(self, sigmas=1):\n import scipy.ndimage\n return SubspaceGridded(self.subspace_bounded, scipy.ndimage.filters.gaussian_filter(self.grid, sigmas))\n\n def clip_relative(self, v1, v2):\n vmin = self.grid.min()\n vmax = self.grid.max()\n width = vmax - vmin\n return SubspaceGridded(self.subspace_bounded, np.clip(self.grid, vmin + v1 * width, vmin + v2 * width))\n\n def volr(self, **kwargs):\n import vaex.notebook\n return vaex.notebook.volr(subspace_gridded=self, **kwargs)\n\n def plot(self, axes=None, **kwargs):\n self.subspace_bounded.subspace.plot(np.log1p(self.grid), limits=self.subspace_bounded.bounds, axes=axes, **kwargs)\n\n def mean_line(self, axis=0, **kwargs):\n from matplotlib import pylab\n assert axis in [0, 1]\n other_axis = 0 if axis == 1 else 1\n xmin, xmax = self.subspace_bounded.bounds[axis]\n ymin, ymax = self.subspace_bounded.bounds[other_axis]\n x = vaex.utils.linspace_centers(xmin, xmax, self.grid.shape[axis])\n y = vaex.utils.linspace_centers(ymin, ymax, self.grid.shape[other_axis])\n print(y)\n if axis == 0:\n counts = np.sum(self.grid, axis=axis)\n means = np.sum(self.grid * y[np.newaxis, :].T, axis=axis) / counts\n else:\n counts = np.sum(self.grid, axis=axis)\n means = np.sum(self.grid * y[:, np.newaxis].T, axis=axis) / counts\n if axis == 0:\n result = pylab.plot(x, means, **kwargs)\n else:\n result = pylab.plot(means, x, **kwargs)\n\n self.subspace_bounded.lim()\n return result, x, means\n\n def _repr_png_(self):\n from matplotlib import pylab\n fig, ax = pylab.subplots()\n self.plot(axes=ax, f=np.log1p)\n import vaex.utils\n if all([k is not None for k in [self.vx, self.vy, self.vcounts]]):\n N = self.vx.grid.shape[0]\n bounds = self.subspace_bounded.bounds\n print(bounds)\n positions = [vaex.utils.linspace_centers(bounds[i][0], bounds[i][1], N) for i in range(self.subspace_bounded.subspace.dimension)]\n print(positions)\n mask = self.vcounts.grid > 0\n vx = np.zeros_like(self.vx.grid)\n vy = np.zeros_like(self.vy.grid)\n vx[mask] = self.vx.grid[mask] / self.vcounts.grid[mask]\n vy[mask] = self.vy.grid[mask] / self.vcounts.grid[mask]\n # vx = self.vx.grid / self.vcounts.grid\n # vy = self.vy.grid / self.vcounts.grid\n x2d, y2d = np.meshgrid(positions[0], positions[1])\n ax.quiver(x2d[mask], y2d[mask], vx[mask], vy[mask])\n # print x2d\n # print y2d\n # print vx\n # print vy\n # ax.quiver(x2d, y2d, vx, vy)\n ax.title.set_text(r\"$\\log(1+counts)$\")\n ax.set_xlabel(self.subspace_bounded.subspace.expressions[0])\n ax.set_ylabel(self.subspace_bounded.subspace.expressions[1])\n # pylab.savefig\n # from .io import StringIO\n from six import StringIO\n file_object = StringIO()\n fig.canvas.print_png(file_object)\n pylab.close(fig)\n return file_object.getvalue()\n\n def cube_png(self, f=np.log1p, colormap=\"afmhot\", file=\"cube.png\"):\n if self.grid.shape != ((128,) * 3):\n logger.error(\"only 128**3 cubes are supported\")\n return None\n colormap_name = \"afmhot\"\n import matplotlib.cm\n colormap = matplotlib.cm.get_cmap(colormap_name)\n mapping = matplotlib.cm.ScalarMappable(cmap=colormap)\n # pixmap = QtGui.QPixmap(32*2, 32)\n data = np.zeros((128 * 8, 128 * 16, 4), dtype=np.uint8)\n\n # mi, ma = 1*10**self.mod1, self.data3d.max()*10**self.mod2\n grid = f(self.grid)\n vmin, vmax = grid.min(), grid.max()\n grid_normalized = (grid - vmin) / (vmax - vmin)\n # intensity_normalized = (np.log(self.data3d + 1.) - np.log(mi)) / (np.log(ma) - np.log(mi));\n import PIL.Image\n for y2d in range(8):\n for x2d in range(16):\n zindex = x2d + y2d * 16\n I = grid_normalized[zindex]\n rgba = mapping.to_rgba(I, bytes=True) # .reshape(Nx, 4)\n # print rgba.shape\n subdata = data[y2d * 128:(y2d + 1) * 128, x2d * 128:(x2d + 1) * 128]\n for i in range(3):\n subdata[:, :, i] = rgba[:, :, i]\n subdata[:, :, 3] = (grid_normalized[zindex] * 255).astype(np.uint8) # * 0 + 255\n if 0:\n filename = \"cube%03d.png\" % zindex\n img = PIL.Image.frombuffer(\"RGB\", (128, 128), subdata[:, :, 0:3] * 1)\n print((\"saving to\", filename))\n img.save(filename)\n img = PIL.Image.frombuffer(\"RGBA\", (128 * 16, 128 * 8), data, 'raw') # , \"RGBA\", 0, -1)\n # filename = \"cube.png\"\n # print \"saving to\", file\n img.save(file, \"png\")\n\n if 0:\n filename = \"colormap.png\"\n print((\"saving to\", filename))\n height, width = self.colormap_data.shape[:2]\n img = PIL.Image.frombuffer(\"RGB\", (width, height), self.colormap_data)\n img.save(filename)\n\n\nclass SubspaceBounded(object):\n def __init__(self, subspace, bounds):\n self.subspace = subspace\n self.bounds = bounds\n\n def histogram(self, size=256, weight=None):\n return self.subspace.histogram(limits=self.bounds, size=size, weight=weight)\n\n def gridded(self, size=256, weight=None):\n return self.gridded_by_histogram(size=size, weight=weight)\n\n def gridded_by_histogram(self, size=256, weight=None):\n grid = self.histogram(size=size, weight=weight)\n return SubspaceGridded(self, grid)\n\n def lim(self):\n from matplotlib import pylab\n xmin, xmax = self.bounds[0]\n ymin, ymax = self.bounds[1]\n pylab.xlim(xmin, xmax)\n pylab.ylim(ymin, ymax)\n\n\nclass Subspaces(object):\n \"\"\"\n :type: subspaces: list[Subspace]\n\n \"\"\"\n\n def __init__(self, subspaces):\n self.subspaces = subspaces\n self.expressions = set()\n first_subspace = self.subspaces[0]\n self.delay = first_subspace.delay\n self.dimension = first_subspace.dimension\n self.df = self.subspaces[0].df\n for subspace in self.subspaces:\n assert subspace.df == self.subspaces[0].df\n assert subspace.delay == self.subspaces[0].delay\n assert subspace.dimension == self.subspaces[0].dimension, \"subspace is of dimension %s, while first subspace if of dimension %s\" % (subspace.dimension, self.subspaces[0].dimension)\n # assert subspace.sele== self.subspaces[0].delay\n self.expressions.update(subspace.expressions)\n self.expressions = list(self.expressions)\n self.subspace = self.df(*list(self.expressions), delay=self.delay, executor=first_subspace.executor)\n\n # def _repr_html_(self):\n\n def __len__(self):\n return len(self.subspaces)\n\n def names(self, seperator=\" \"):\n return [seperator.join(subspace.expressions) for subspace in self.subspaces]\n\n def expressions_list(self):\n return [subspace.expressions for subspace in self.subspaces]\n\n def selected(self):\n return Subspaces([subspace.selected() for subspace in self.subspaces])\n\n def _unpack(self, values):\n value_map = dict(zip(self.expressions, values))\n return [[value_map[ex] for ex in subspace.expressions] for subspace in self.subspaces]\n\n def _pack(self, values):\n value_map = {}\n for subspace_values, subspace in zip(values, self.subspaces):\n for value, expression in zip(subspace_values, subspace.expressions):\n if expression in value_map:\n if isinstance(value, np.ndarray):\n assert np.all(value_map[expression] == value), \"inconsistency in subspaces, value for expression %r is %r in one case, and %r in the other\" % (expression, value, value_map[expression])\n else:\n assert value_map[expression] == value, \"inconsistency in subspaces, value for expression %r is %r in one case, and %r in the other\" % (expression, value, value_map[expression])\n else:\n value_map[expression] = value\n return [value_map[expression] for expression in self.expressions]\n\n def minmax(self):\n if self.delay:\n return self.subspace.minmax().then(self._unpack)\n else:\n return self._unpack(self.subspace.minmax())\n\n def limits_sigma(self, sigmas=3, square=False):\n if self.delay:\n return self.subspace.limits_sigma(sigmas=sigmas, square=square).then(self._unpack)\n else:\n return self._unpack(self.subspace.limits_sigma(sigmas=sigmas, square=square))\n\n def mutual_information(self, limits=None, size=256):\n if limits is not None:\n limits = self._pack(limits)\n\n def mutual_information(limits):\n return vaex.promise.listPromise([vaex.promise.Promise.fulfilled(subspace.mutual_information(subspace_limits, size=size)) for subspace_limits, subspace in zip(limits, self.subspaces)])\n # return histograms\n if limits is None:\n limits_promise = vaex.promise.Promise.fulfilled(self.subspace.minmax())\n else:\n limits_promise = vaex.promise.Promise.fulfilled(limits)\n limits_promise = limits_promise.then(self._unpack)\n promise = limits_promise.then(mutual_information)\n return promise if self.delay else promise.get()\n\n def mean(self):\n if self.delay:\n return self.subspace.mean().then(self._unpack)\n else:\n means = self.subspace.mean()\n return self._unpack(means)\n\n def var(self, means=None):\n # 'pack' means, and check if it makes sence\n if means is not None:\n means = self._pack(means)\n\n def var(means):\n return self.subspace.var(means=means)\n if self.delay:\n # if means is None:\n # return self.subspace.mean().then(var).then(self._unpack)\n # else:\n return var(means).then(self._unpack)\n else:\n # if means is None:\n # means = self.subspace.mean()\n # logger.debug(\"means: %r\", means)\n return self._unpack(var(means=means))\n\n def correlation(self, means=None, vars=None):\n def var(means):\n return self.subspace.var(means=means)\n\n def correlation(means_and_vars):\n means, vars = means_and_vars\n means, vars = self._unpack(means), self._unpack(vars)\n # return self.subspace.correlation(means=means, vars=vars)\n return vaex.promise.listPromise([subspace.correlation(means=subspace_mean, vars=subspace_var) for subspace_mean, subspace_var, subspace in zip(means, vars, self.subspaces)])\n if means is not None:\n means = self._pack(means)\n if vars is not None:\n vars = self._pack(vars)\n if self.delay:\n if means is None:\n mean_promise = self.subspace.mean()\n else:\n mean_promise = vaex.promise.Promise.fulfilled(means)\n if vars is None:\n var_promise = mean_promise.then(var)\n else:\n var_promise = vaex.promise.Promise.fulfilled(vars)\n mean_and_var_calculated = vaex.promise.listPromise(mean_promise, var_promise)\n return mean_and_var_calculated.then(correlation)\n else:\n if means is None:\n means = self.subspace.mean()\n if vars is None:\n vars = self.subspace.var(means=means)\n means = self._unpack(means)\n vars = self._unpack(vars)\n return [subspace.correlation(means=subspace_mean, vars=subspace_var) for subspace_mean, subspace_var, subspace in zip(means, vars, self.subspaces)]\n # return correlation((means, vars))\n\n # def bounded_by(self, limits_list):\n # return SubspacesBounded(SubspaceBounded(subspace, limits) for subspace, limit in zip(self.subspaces, limits_list))\n\n\nclass Subspace(object):\n \"\"\"A Subspace represent a subset of columns or expressions from a df.\n\n subspace are not instantiated directly, but by 'calling' the df like this:\n\n >>> subspace_xy = some_df(\"x\", \"y\")\n >>> subspace_r = some_df(\"sqrt(x**2+y**2)\")\n\n See `vaex.df.Dataset` for more documentation.\n\n \"\"\"\n\n def __init__(self, df, expressions, executor, delay, masked=False):\n \"\"\"\n\n :param Dataset df: the df the subspace refers to\n :param list[str] expressions: list of expressions that forms the subspace\n :param Executor executor: responsible for executing the tasks\n :param bool delay: return answers directly, or as a promise\n :param bool masked: work on the selection or not\n :return:\n \"\"\"\n self.df = df\n self.expressions = expressions\n self.executor = executor\n self.delay = delay\n self.is_masked = masked\n\n def __repr__(self):\n name = self.__class__.__module__ + \".\" + self.__class__.__name__\n return \"<%s(df=%r, expressions=%r, delay=%r, is_masked=%r)> instance at 0x%x\" % (name, self.df, self.expressions, self.delay, self.is_masked, id(self))\n\n @property\n def dimension(self):\n return len(self.expressions)\n\n def get_selection(self):\n return self.df.get_selection(\"default\") if self.is_masked else None\n\n def is_selected(self):\n return self.is_masked\n\n def selected(self):\n return self.__class__(self.df, expressions=self.expressions, executor=self.executor, delay=self.delay, masked=True)\n\n def delayhronous(self):\n return self.__class__(self.df, expressions=self.expressions, executor=self.executor, delay=True, masked=self.is_masked)\n\n def image_rgba_save(self, filename, data=None, rgba8=None, **kwargs):\n if rgba8 is not None:\n data = self.image_rgba_data(rgba8=rgba8, **kwargs)\n if data is None:\n data = self.image_rgba_data(**kwargs)\n with open(filename, \"wb\") as f:\n f.write(data)\n\n def image_rgba_notebook(self, data=None, rgba8=None, **kwargs):\n if rgba8 is not None:\n data = self.image_rgba_data(rgba8=rgba8, **kwargs)\n if data is None:\n data = self.image_rgba_data(**kwargs)\n from IPython.display import display, Image\n return Image(data=data)\n\n def image_rgba_data(self, rgba8=None, format=\"png\", pil_draw=False, **kwargs):\n import PIL.Image\n import PIL.ImageDraw\n from six import StringIO\n if rgba8 is None:\n rgba8 = self.image_rgba(**kwargs)\n img = PIL.Image.frombuffer(\"RGBA\", rgba8.shape[:2], rgba8, 'raw') # , \"RGBA\", 0, -1)\n if pil_draw:\n draw = PIL.ImageDraw.Draw(img)\n pil_draw(draw)\n\n f = StringIO()\n img.save(f, format)\n return f.getvalue()\n\n def image_rgba_url(self, rgba8=None, **kwargs):\n if rgba8 is None:\n rgba8 = self.image_rgba(**kwargs)\n import PIL.Image\n img = PIL.Image.frombuffer(\"RGBA\", rgba8.shape[:2], rgba8, 'raw') # , \"RGBA\", 0, -1)\n from six import StringIO\n f = StringIO()\n img.save(f, \"png\")\n from base64 import b64encode\n imgurl = \"data:image/png;base64,\" + b64encode(f.getvalue()) + \"\"\n return imgurl\n\n def normalize_grid(self, grid):\n grid = grid * 1 # copy\n mask = (grid > 0) & np.isfinite(grid)\n if grid.sum():\n grid -= grid[mask].min()\n grid /= grid[mask].max()\n else:\n grid[:] = 0\n return grid\n\n def limits(self, value, square=False):\n \"\"\"TODO: doc + server side implementation\"\"\"\n if isinstance(value, six.string_types):\n import re\n match = re.match(r\"(\\d*)(\\D*)\", value)\n if match is None:\n raise ValueError(\"do not understand limit specifier %r, examples are 90%, 3sigma\")\n else:\n value, type = match.groups()\n import ast\n value = ast.literal_eval(value)\n type = type.strip()\n if type in [\"s\", \"sigma\"]:\n return self.limits_sigma(value)\n elif type in [\"ss\", \"sigmasquare\"]:\n return self.limits_sigma(value, square=True)\n elif type in [\"%\", \"percent\"]:\n return self.limits_percentage(value)\n elif type in [\"%s\", \"%square\", \"percentsquare\"]:\n return self.limits_percentage(value, square=True)\n if value is None:\n return self.limits_percentage(square=square)\n else:\n return value\n\n def image_rgba(self, grid=None, size=256, limits=None, square=False, center=None, weight=None, weight_stat=\"mean\", figsize=None,\n aspect=\"auto\", f=lambda x: x, axes=None, xlabel=None, ylabel=None,\n group_by=None, group_limits=None, group_colors='jet', group_labels=None, group_count=10, cmap=\"afmhot\",\n vmin=None, vmax=None,\n pre_blend=False, background_color=\"white\", background_alpha=1., normalize=True, color=None):\n f = _parse_f(f)\n if grid is None:\n limits = self.limits(limits)\n if limits is None:\n limits = self.limits_sigma()\n if group_limits is None and group_by:\n group_limits = tuple(self.df(group_by).minmax()[0]) + (group_count,)\n if weight_stat == \"mean\" and weight is not None:\n grid = self.bin_mean(weight, limits=limits, size=size, group_limits=group_limits, group_by=group_by)\n else:\n grid = self.histogram(limits=limits, size=size, weight=weight, group_limits=group_limits, group_by=group_by)\n if grid is None: # cancel occured\n return\n import matplotlib.cm\n background_color = np.array(matplotlib.colors.colorConverter.to_rgb(background_color))\n if group_by:\n gmin, gmax, group_count = group_limits\n if isinstance(group_colors, six.string_types):\n group_colors = matplotlib.cm.get_cmap(group_colors)\n if isinstance(group_colors, matplotlib.colors.Colormap):\n group_count = group_limits[2]\n colors = [group_colors(k / float(group_count - 1.)) for k in range(group_count)]\n else:\n colors = [matplotlib.colors.colorConverter.to_rgba(k) for k in group_colors]\n total = np.sum(grid, axis=0).T\n # grid /= total\n mask = total > 0\n alpha = total - total[mask].min()\n alpha[~mask] = 0\n alpha = total / alpha.max()\n rgba = grid.T.dot(colors)\n\n def _norm(data):\n mask = np.isfinite(data)\n data = data - data[mask].min()\n data /= data[mask].max()\n return data\n rgba[..., 3] = (f(alpha))\n # rgba[...,3] = 1\n rgba[total == 0, 3] = 0.\n mask = alpha > 0\n if 1:\n for i in range(3):\n rgba[..., i] /= total\n # rgba[...,i] /= rgba[...,0:3].max()\n rgba[~mask, i] = background_color[i]\n rgba = (np.swapaxes(rgba, 0, 1))\n else:\n if color:\n color = np.array(matplotlib.colors.colorConverter.to_rgba(color))\n rgba = np.zeros(grid.shape + (4,))\n rgba[..., 0:4] = color\n data = f(grid)\n mask = (grid > 0) & np.isfinite(data)\n if vmin is None:\n vmin = data[mask].min()\n if vmax is None:\n vmax = data[mask].max()\n if mask.sum():\n data -= vmin\n data /= vmax\n data[~mask] = 0\n else:\n data[:] = 0\n rgba[..., 3] = data\n else:\n cmap = matplotlib.cm.get_cmap(cmap)\n data = f(grid)\n if normalize:\n mask = (data > 0) & np.isfinite(data)\n if vmin is None:\n vmin = data[mask].min()\n if vmax is None:\n vmax = data[mask].max()\n if mask.sum():\n data -= vmin\n data /= vmax\n else:\n data[:] = 0\n data[~mask] = 0\n data = np.clip(data, 0, 1)\n rgba = cmap(data)\n if normalize:\n rgba[~mask, 3] = 0\n rgba[..., 3] = 1 # data\n # rgba8 = np.swapaxes(rgba8, 0, 1)\n # white = np.ones_like(rgba[...,0:3])\n if pre_blend:\n # rgba[...,3] = background_alpha\n rgb = rgba[..., :3].T\n alpha = rgba[..., 3].T\n rgb[:] = rgb * alpha + background_color[:3].reshape(3, 1, 1) * (1 - alpha)\n alpha[:] = alpha + background_alpha * (1 - alpha)\n rgba = np.clip(rgba, 0, 1)\n rgba8 = (rgba * 255).astype(np.uint8)\n return rgba8\n\n def plot_vectors(self, expression_x, expression_y, limits, wx=None, wy=None, counts=None, size=32, axes=None, **kwargs):\n import pylab\n # refactor: should go to bin_means_xy\n if counts is None:\n counts = self.histogram(size=size, limits=limits)\n if wx is None:\n wx = self.histogram(size=size, weight=expression_x, limits=limits)\n if wy is None:\n wy = self.histogram(size=size, weight=expression_y, limits=limits)\n N = size\n positions = [vaex.utils.linspace_centers(limits[i][0], limits[i][1], N) for i in range(self.dimension)]\n # print(positions)\n mask = counts > 0\n vx = wx / counts\n vy = wy / counts\n vx[counts == 0] = 0\n vy[counts == 0] = 0\n # vx = self.vx.grid / self.vcounts.grid\n # vy = self.vy.grid / self.vcounts.grid\n x2d, y2d = np.meshgrid(positions[0], positions[1])\n if axes is None:\n axes = pylab.gca()\n axes.quiver(x2d[mask], y2d[mask], vx[mask], vy[mask], **kwargs)\n\n def plot(self, grid=None, size=256, limits=None, square=False, center=None, weight=None, weight_stat=\"mean\", figsize=None,\n aspect=\"auto\", f=\"identity\", axes=None, xlabel=None, ylabel=None,\n group_by=None, group_limits=None, group_colors='jet', group_labels=None, group_count=None,\n vmin=None, vmax=None,\n cmap=\"afmhot\",\n **kwargs):\n \"\"\"Plot the subspace using sane defaults to get a quick look at the data.\n\n :param grid: A 2d numpy array with the counts, if None it will be calculated using limits provided and Subspace.histogram\n :param size: Passed to Subspace.histogram\n :param limits: Limits for the subspace in the form [[xmin, xmax], [ymin, ymax]], if None it will be calculated using Subspace.limits_sigma\n :param square: argument passed to Subspace.limits_sigma\n :param Executor executor: responsible for executing the tasks\n :param figsize: (x, y) tuple passed to pylab.figure for setting the figure size\n :param aspect: Passed to matplotlib's axes.set_aspect\n :param xlabel: String for label on x axis (may contain latex)\n :param ylabel: Same for y axis\n :param kwargs: extra argument passed to axes.imshow, useful for setting the colormap for instance, e.g. cmap='afmhot'\n :return: matplotlib.image.AxesImage\n\n \"\"\"\n import pylab\n f = _parse_f(f)\n limits = self.limits(limits)\n if limits is None:\n limits = self.limits_sigma()\n # if grid is None:\n if group_limits is None and group_by:\n group_limits = tuple(self.df(group_by).minmax()[0]) + (group_count,)\n # grid = self.histogram(limits=limits, size=size, weight=weight, group_limits=group_limits, group_by=group_by)\n if figsize is not None:\n pylab.figure(num=None, figsize=figsize, dpi=80, facecolor='w', edgecolor='k')\n if axes is None:\n axes = pylab.gca()\n fig = pylab.gcf()\n # if xlabel:\n pylab.xlabel(xlabel or self.expressions[0])\n # if ylabel:\n pylab.ylabel(ylabel or self.expressions[1])\n # axes.set_aspect(aspect)\n rgba8 = self.image_rgba(grid=grid, size=size, limits=limits, square=square, center=center, weight=weight, weight_stat=weight_stat,\n f=f, axes=axes,\n group_by=group_by, group_limits=group_limits, group_colors=group_colors, group_count=group_count,\n vmin=vmin, vmax=vmax,\n cmap=cmap)\n import matplotlib\n if group_by:\n if isinstance(group_colors, six.string_types):\n group_colors = matplotlib.cm.get_cmap(group_colors)\n if isinstance(group_colors, matplotlib.colors.Colormap):\n group_count = group_limits[2]\n colors = [group_colors(k / float(group_count - 1.)) for k in range(group_count)]\n else:\n colors = [matplotlib.colors.colorConverter.to_rgba(k) for k in group_colors]\n colormap = matplotlib.colors.ListedColormap(colors)\n gmin, gmax, group_count = group_limits # [:2]\n delta = (gmax - gmin) / (group_count - 1.)\n norm = matplotlib.colors.Normalize(gmin - delta / 2, gmax + delta / 2)\n sm = matplotlib.cm.ScalarMappable(norm, colormap)\n sm.set_array(1) # make matplotlib happy (strange behavious)\n colorbar = fig.colorbar(sm)\n if group_labels:\n colorbar.set_ticks(np.arange(gmin, gmax + delta / 2, delta))\n colorbar.set_ticklabels(group_labels)\n else:\n colorbar.set_ticks(np.arange(gmin, gmax + delta / 2, delta))\n colorbar.set_ticklabels(map(lambda x: \"%f\" % x, np.arange(gmin, gmax + delta / 2, delta)))\n colorbar.ax.set_ylabel(group_by)\n # matplotlib.colorbar.ColorbarBase(axes, norm=norm, cmap=colormap)\n im = axes.imshow(rgba8, extent=np.array(limits).flatten(), origin=\"lower\", aspect=aspect, **kwargs)\n else:\n norm = matplotlib.colors.Normalize(0, 23)\n sm = matplotlib.cm.ScalarMappable(norm, cmap)\n sm.set_array(1) # make matplotlib happy (strange behavious)\n colorbar = fig.colorbar(sm)\n im = axes.imshow(rgba8, extent=np.array(limits).flatten(), origin=\"lower\", aspect=aspect, **kwargs)\n colorbar = None\n return im, colorbar\n\n def plot1d(self, grid=None, size=64, limits=None, weight=None, figsize=None, f=\"identity\", axes=None, xlabel=None, ylabel=None, **kwargs):\n \"\"\"Plot the subspace using sane defaults to get a quick look at the data.\n\n :param grid: A 2d numpy array with the counts, if None it will be calculated using limits provided and Subspace.histogram\n :param size: Passed to Subspace.histogram\n :param limits: Limits for the subspace in the form [[xmin, xmax], [ymin, ymax]], if None it will be calculated using Subspace.limits_sigma\n :param figsize: (x, y) tuple passed to pylab.figure for setting the figure size\n :param xlabel: String for label on x axis (may contain latex)\n :param ylabel: Same for y axis\n :param kwargs: extra argument passed to ...,\n\n \"\"\"\n import pylab\n f = _parse_f(f)\n limits = self.limits(limits)\n assert self.dimension == 1, \"can only plot 1d, not %s\" % self.dimension\n if limits is None:\n limits = self.limits_sigma()\n if grid is None:\n grid = self.histogram(limits=limits, size=size, weight=weight)\n if figsize is not None:\n pylab.figure(num=None, figsize=figsize, dpi=80, facecolor='w', edgecolor='k')\n if axes is None:\n axes = pylab.gca()\n # if xlabel:\n pylab.xlabel(xlabel or self.expressions[0])\n # if ylabel:\n # pylab.ylabel(ylabel or self.expressions[1])\n pylab.ylabel(\"counts\" or ylabel)\n # axes.set_aspect(aspect)\n N = len(grid)\n xmin, xmax = limits[0]\n return pylab.plot(np.arange(N) / (N - 1.0) * (xmax - xmin) + xmin, f(grid,), drawstyle=\"steps\", **kwargs)\n # pylab.ylim(-1, 6)\n\n def plot_histogram_bq(self, f=\"identity\", size=64, limits=None, color=\"red\", bq_cleanup=True):\n import vaex.ext.bqplot\n limits = self.limits(limits)\n plot = vaex.ext.bqplot.BqplotHistogram(self, color, size, limits)\n if not hasattr(self, \"_bqplot\"):\n self._bqplot = {}\n self._bqplot[\"cleanups\"] = []\n else:\n if bq_cleanup:\n for cleanup in self._bqplot[\"cleanups\"]:\n cleanup()\n self._bqplot[\"cleanups\"] = []\n\n def cleanup(callback=plot.callback):\n self.df.signal_selection_changed.disconnect(callback=callback)\n self._bqplot[\"cleanups\"].append(cleanup)\n\n return plot\n\n def plot_bq(self, grid=None, size=256, limits=None, square=False, center=None, weight=None, figsize=None,\n aspect=\"auto\", f=\"identity\", fig=None, axes=None, xlabel=None, ylabel=None, title=None,\n group_by=None, group_limits=None, group_colors='jet', group_labels=None, group_count=None,\n cmap=\"afmhot\", scales=None, tool_select=False, bq_cleanup=True,\n **kwargs):\n import vaex.ext.bqplot\n import bqplot.interacts\n import bqplot.pyplot as p\n import ipywidgets as widgets\n import bqplot as bq\n f = _parse_f(f)\n limits = self.limits(limits)\n import vaex.ext.bqplot\n vaex.ext.bqplot.patch()\n if not hasattr(self, \"_bqplot\"):\n self._bqplot = {}\n self._bqplot[\"cleanups\"] = []\n else:\n if bq_cleanup:\n for cleanup in self._bqplot[\"cleanups\"]:\n cleanup()\n self._bqplot[\"cleanups\"] = []\n if limits is None:\n limits = self.limits_sigma()\n # if fig is None:\n if scales is None:\n x_scale = bq.LinearScale(min=limits[0][0], max=limits[0][1])\n y_scale = bq.LinearScale(min=limits[1][0], max=limits[1][1])\n scales = {'x': x_scale, 'y': y_scale}\n else:\n x_scale = scales[\"x\"]\n y_scale = scales[\"y\"]\n if 1:\n fig = p.figure() # actually, bqplot doesn't return it\n fig = p.current_figure()\n fig.fig_color = \"black\" # TODO, take the color from the colormap\n fig.padding_y = 0\n # if we don't do this, bqplot may flip some axes... report this bug\n x = np.arange(10)\n y = x**2\n p.plot(x, y, scales=scales)\n # p.xlim(*limits[0])\n # p.ylim(*limits[1])\n # if grid is None:\n if group_limits is None and group_by:\n group_limits = tuple(self.df(group_by).minmax()[0]) + (group_count,)\n # fig = p.\n # if xlabel:\n fig.axes[0].label = xlabel or self.expressions[0]\n # if ylabel:\n fig.axes[1].label = ylabel or self.expressions[1]\n if title:\n fig.title = title\n # axes.set_aspect(aspect)\n rgba8 = self.image_rgba(grid=grid, size=size, limits=limits, square=square, center=center, weight=weight,\n f=f, axes=axes,\n group_by=group_by, group_limits=group_limits, group_colors=group_colors, group_count=group_count,\n cmap=cmap)\n # x_scale = p._context[\"scales\"][\"x\"]\n # y_scale = p._context[\"scales\"][\"y\"]\n src = \"http://localhost:8888/kernelspecs/python2/logo-64x64.png\"\n import bqplot.marks\n im = vaex.ext.bqplot.Image(src=src, scales=scales, x=0, y=0, width=1, height=1)\n if 0:\n size = 20\n x_data = np.arange(size)\n line = bq.Lines(x=x_data, y=np.random.randn(size), scales={'x': x_scale, 'y': y_scale},\n stroke_width=3, colors=['red'])\n\n ax_x = bq.Axis(scale=x_scale, tick_format='0.2f', grid_lines='solid')\n ax_y = bq.Axis(scale=y_scale, orientation='vertical', tick_format='0.2f', grid_lines='solid')\n panzoom = bq.PanZoom(scales={'x': [x_scale], 'y': [y_scale]})\n lasso = bqplot.interacts.LassoSelector()\n brush = bqplot.interacts.BrushSelector(x_scale=x_scale, y_scale=y_scale, color=\"green\")\n fig = bq.Figure(marks=[line, im], axes=[ax_x, ax_y], min_width=100, min_height=100, interaction=panzoom)\n else:\n fig.marks = list(fig.marks) + [im]\n\n def make_image(executor, limits):\n # print \"make image\" * 100\n self.executor = executor\n if self.df.has_selection():\n sub = self.selected()\n else:\n sub = self\n return sub.image_rgba(limits=limits, size=size, f=f)\n progress = widgets.FloatProgress(value=0.0, min=0.0, max=1.0, step=0.01)\n updater = vaex.ext.bqplot.DebouncedThreadedUpdater(self, size, im, make_image, progress_widget=progress)\n\n def update_image():\n limits = [x_scale.min, x_scale.max], [y_scale.min, y_scale.max]\n # print limits\n # print \"update...\", limits\n # vxbq.debounced_threaded_update(self.df, im, make_image2, limits=limits)\n updater.update(limits)\n\n def update(*args):\n update_image()\n y_scale.observe(update, \"min\")\n y_scale.observe(update, \"max\")\n x_scale.observe(update, \"min\")\n x_scale.observe(update, \"max\")\n update_image()\n # fig = kwargs.pop('figure', p.current_figure())\n tools = []\n tool_actions = []\n panzoom = bq.PanZoom(scales={'x': [x_scale], 'y': [y_scale]})\n tool_actions_map = {u\"m\": panzoom}\n tool_actions.append(u\"m\")\n\n fig.interaction = panzoom\n if tool_select:\n brush = bqplot.interacts.BrushSelector(x_scale=x_scale, y_scale=y_scale, color=\"green\")\n tool_actions_map[\"b\"] = brush\n tool_actions.append(\"b\")\n\n def update_selection(*args):\n def f():\n if brush.selected:\n (x1, y1), (x2, y2) = brush.selected\n ex1, ex2 = self.expressions\n mode = modes_names[modes_labels.index(button_selection_mode.value)]\n self.df.select_rectangle(ex1, ex2, limits=[[x1, x2], [y1, y2]], mode=mode)\n else:\n self.df.select_nothing()\n updater.update_select(f)\n brush.observe(update_selection, \"selected\")\n # fig.interaction = brush\n # callback = self.df.signal_selection_changed.connect(lambda df: update_image())\n callback = self.df.signal_selection_changed.connect(lambda df: updater.update_direct_safe())\n\n def cleanup(callback=callback):\n self.df.signal_selection_changed.disconnect(callback=callback)\n self._bqplot[\"cleanups\"].append(cleanup)\n\n button_select_nothing = widgets.Button(icon=\"fa-trash-o\")\n\n def select_nothing(button):\n self.df.select_nothing()\n button_select_nothing.on_click(select_nothing)\n tools.append(button_select_nothing)\n modes_names = \"replace and or xor subtract\".split()\n modes_labels = \"= & | ^ -\".split()\n button_selection_mode = widgets.ToggleButtons(description='', options=modes_labels)\n tools.append(button_selection_mode)\n\n def change_interact(*args):\n # print \"change\", args\n fig.interaction = tool_actions_map[button_action.value]\n # tool_actions = [\"m\", \"b\"]\n # tool_actions = [(\"m\", \"m\"), (\"b\", \"b\")]\n button_action = widgets.ToggleButtons(description='', options=tool_actions, icons=[\"fa-arrows\", \"fa-pencil-square-o\"])\n button_action.observe(change_interact, \"value\")\n tools.insert(0, button_action)\n button_action.value = \"m\" # tool_actions[-1]\n if len(tools) == 1:\n tools = []\n tools = widgets.HBox(tools)\n\n box_layout = widgets.Layout(display='flex',\n flex_flow='column',\n # border='solid',\n width='100%', height=\"100%\")\n fig.fig_margin = {'bottom': 40, 'left': 60, 'right': 10, 'top': 40}\n # fig.min_height = 700\n # fig.min_width = 400\n fig.layout = box_layout\n return widgets.VBox([fig, progress, tools])\n\n def figlarge(self, size=(10, 10)):\n import pylab\n pylab.figure(num=None, figsize=size, dpi=80, facecolor='w', edgecolor='k')\n\n # def bounded(self):\n # return self.bounded_by_minmax()\n\n def bounded_by(self, limits):\n \"\"\"Returns a bounded subspace (SubspaceBounded) with limits as given by limits\n\n :param limits: sequence of [(min, max), ..., (min, max)] values\n :rtype: SubspaceBounded\n \"\"\"\n return SubspaceBounded(self, np.array(limits))\n\n def bounded_by_minmax(self):\n \"\"\"Returns a bounded subspace (SubspaceBounded) with limits given by Subspace.minmax()\n\n :rtype: SubspaceBounded\n \"\"\"\n bounds = self.minmax()\n return SubspaceBounded(self, bounds)\n\n bounded = bounded_by_minmax\n\n def bounded_by_sigmas(self, sigmas=3, square=False):\n \"\"\"Returns a bounded subspace (SubspaceBounded) with limits given by Subspace.limits_sigma()\n\n :rtype: SubspaceBounded\n \"\"\"\n bounds = self.limits_sigma(sigmas=sigmas, square=square)\n return SubspaceBounded(self, bounds)\n\n def minmax(self):\n \"\"\"Return a sequence of [(min, max), ..., (min, max)] corresponding to each expression in this subspace ignoring NaN.\n \"\"\"\n raise NotImplementedError\n\n def mean(self):\n \"\"\"Return a sequence of [mean, ... , mean] corresponding to the mean of each expression in this subspace ignoring NaN.\n \"\"\"\n raise NotImplementedError\n\n def var(self, means=None):\n \"\"\"Return a sequence of [var, ... , var] corresponding to the variance of each expression in this subspace ignoring NaN.\n \"\"\"\n raise NotImplementedError\n\n def sum(self):\n \"\"\"Return a sequence of [sum, ... , sum] corresponding to the sum of values of each expression in this subspace ignoring NaN.\"\"\"\n raise NotImplementedError\n\n def histogram(self, limits, size=256, weight=None):\n \"\"\"Return a grid of shape (size, ..., size) corresponding to the dimensionality of this subspace containing the counts in each element\n\n The type of the grid of np.float64\n\n \"\"\"\n raise NotImplementedError\n\n def limits_sigma(self, sigmas=3, square=False):\n raise NotImplementedError\n\n def row(self, index):\n return np.array([self.df.evaluate(expression, i1=index, i2=index + 1)[0] for expression in self.expressions])\n\n\nclass SubspaceLocal(Subspace):\n \"\"\"Subclass of subspace which implemented methods that can be run locally.\n \"\"\"\n\n def _toarray(self, list):\n return np.array(list)\n\n @property\n def pre(self):\n self.executor.pre\n\n @property\n def post(self):\n self.executor.post\n\n def _task(self, task, progressbar=False):\n \"\"\"Helper function for returning tasks results, result when immediate is True, otherwise the task itself, which is a promise\"\"\"\n if self.delay:\n # should return a task or a promise nesting it\n return self.executor.schedule(task)\n else:\n import vaex.utils\n callback = None\n try:\n if progressbar == True:\n def update(fraction):\n bar.update(fraction)\n return True\n bar = vaex.utils.progressbar(task.name)\n callback = self.executor.signal_progress.connect(update)\n elif progressbar:\n callback = self.executor.signal_progress.connect(progressbar)\n result = self.executor.run(task)\n if progressbar == True:\n bar.finish()\n sys.stdout.write('\\n')\n return result\n finally:\n if callback:\n self.executor.signal_progress.disconnect(callback)\n\n def minmax(self, progressbar=False):\n def min_max_reduce(minmax1, minmax2):\n if minmax1 is None:\n return minmax2\n if minmax2 is None:\n return minmax1\n result = []\n for d in range(self.dimension):\n min1, max1 = minmax1[d]\n min2, max2 = minmax2[d]\n result.append((min(min1, min2), max(max1, max2)))\n return result\n\n def min_max_map(thread_index, i1, i2, *blocks):\n if self.is_masked or self.df.filtered:\n mask = self.df.evaluate_selection_mask(\"default\" if self.is_masked else None, i1=i1, i2=i2)\n blocks = [block[mask] for block in blocks]\n is_empty = all(~mask)\n if is_empty:\n return None\n # with lock:\n # print blocks\n # with lock:\n # print thread_index, i1, i2, blocks\n blocks = [_asfloat(block) for block in blocks]\n return [vaex.vaexfast.find_nan_min_max(block) for block in blocks]\n if 0: # TODO: implement using statisticNd and benchmark\n minmaxes = np.zeros((len(blocks), 2), dtype=float)\n minmaxes[:, 0] = np.inf\n minmaxes[:, 1] = -np.inf\n for i, block in enumerate(blocks):\n vaex.vaexfast.statisticNd([], block, minmaxes[i, :], [], [], 2)\n # minmaxes[~np.isfinite(minmaxes)] = np.nan\n return minmaxes\n task = TaskMapReduceLegacy(self.df, self.expressions, min_max_map, min_max_reduce, self._toarray, info=True, name=\"minmax\")\n return self._task(task, progressbar=progressbar)\n\n def mean(self):\n return self._moment(1)\n\n def _moment(self, moment=1):\n def mean_reduce(means_and_counts1, means_and_counts2):\n means_and_counts = []\n for (mean1, count1), (mean2, count2) in zip(means_and_counts1, means_and_counts2):\n means_and_counts.append([np.nansum([mean1 * count1, mean2 * count2]) / (count1 + count2), count1 + count2])\n return means_and_counts\n\n def remove_counts(means_and_counts):\n return self._toarray(means_and_counts)[:, 0]\n\n def mean_map(thread_index, i1, i2, *blocks):\n if self.is_masked or self.df.filtered:\n mask = self.df.evaluate_selection_mask(\"default\" if self.is_masked else None, i1=i1, i2=i2)\n return [(np.nanmean(block[mask]**moment), np.count_nonzero(~np.isnan(block[mask]))) for block in blocks]\n else:\n return [(np.nanmean(block**moment), np.count_nonzero(~np.isnan(block))) for block in blocks]\n task = TaskMapReduceLegacy(self.df, self.expressions, mean_map, mean_reduce, remove_counts, info=True)\n return self._task(task)\n\n def var(self, means=None):\n # variances are linear, use the mean to reduce\n def vars_reduce(vars_and_counts1, vars_and_counts2):\n vars_and_counts = []\n for (var1, count1), (var2, count2) in zip(vars_and_counts1, vars_and_counts2):\n vars_and_counts.append([np.nansum([var1 * count1, var2 * count2]) / (count1 + count2), count1 + count2])\n return vars_and_counts\n\n def remove_counts(vars_and_counts):\n return self._toarray(vars_and_counts)[:, 0]\n if self.is_masked or self.df.filtered:\n def var_map(thread_index, i1, i2, *blocks):\n mask = self.df.evaluate_selection_mask(\"default\" if self.is_masked else None, i1=i1, i2=i2)\n if means is not None:\n return [(np.nanmean((block[mask] - mean)**2), np.count_nonzero(~np.isnan(block[mask]))) for block, mean in zip(blocks, means)]\n else:\n return [(np.nanmean(block[mask]**2), np.count_nonzero(~np.isnan(block[mask]))) for block in blocks]\n task = TaskMapReduceLegacy(self.df, self.expressions, var_map, vars_reduce, remove_counts, info=True)\n else:\n def var_map(*blocks):\n if means is not None:\n return [(np.nanmean((block - mean)**2), np.count_nonzero(~np.isnan(block))) for block, mean in zip(blocks, means)]\n else:\n return [(np.nanmean(block**2), np.count_nonzero(~np.isnan(block))) for block in blocks]\n task = TaskMapReduceLegacy(self.df, self.expressions, var_map, vars_reduce, remove_counts)\n return self._task(task)\n\n def correlation(self, means=None, vars=None):\n if self.dimension != 2:\n raise ValueError(\"correlation is only defined for 2d subspaces, not %dd\" % self.dimension)\n\n def do_correlation(means, vars):\n meanx, meany = means\n sigmax, sigmay = vars[0]**0.5, vars[1]**0.5\n\n def remove_counts_and_normalize(covar_and_count):\n covar, counts = covar_and_count\n return covar / counts / (sigmax * sigmay)\n\n def covars_reduce(covar_and_count1, covar_and_count2):\n if covar_and_count1 is None:\n return covar_and_count2\n if covar_and_count2 is None:\n return covar_and_count1\n else:\n covar1, count1 = covar_and_count1\n covar2, count2 = covar_and_count2\n return [np.nansum([covar1, covar2]), count1 + count2]\n\n mask = self.df.mask\n\n def covar_map(thread_index, i1, i2, *blocks):\n # return [(np.nanmean((block[mask[i1:i2]]-mean)**2), np.count_nonzero(~np.isnan(block[mask[i1:i2]]))) for block, mean in zip(blocks, means)]\n blockx, blocky = blocks\n if self.is_masked:\n blockx, blocky = blockx[mask[i1:i2]], blocky[mask[i1:i2]]\n counts = np.count_nonzero(~(np.isnan(blockx) | np.isnan(blocky)))\n if counts == 0:\n return None\n else:\n return np.nansum((blockx - meanx) * (blocky - meany)), counts\n\n task = TaskMapReduceLegacy(self.df, self.expressions, covar_map, covars_reduce, remove_counts_and_normalize, info=True)\n return self._task(task)\n if means is None:\n if self.delay:\n means_wrapper = [None]\n\n def do_vars(means):\n means_wrapper[0] = means\n return self.var(means)\n\n def do_correlation_wrapper(vars):\n return do_correlation(means_wrapper[0], vars)\n return self.mean().then(do_vars).then(do_correlation_wrapper)\n else:\n means = self.mean()\n vars = self.var(means=means)\n return do_correlation(means, vars)\n else:\n if vars is None:\n if self.delay:\n def do_correlation_wrapper(vars):\n return do_correlation(means, vars)\n return self.vars(means=means).then(do_correlation_wrapper)\n else:\n vars = self.var(means)\n return do_correlation(means, vars)\n else:\n if means is None:\n means = self.mean()\n if vars is None:\n vars = self.var(means=means)\n return do_correlation(means, vars)\n\n def sum(self):\n def nansum(x): return np.nansum(x, dtype=np.float64)\n # TODO: we can speed up significantly using our own nansum, probably the same for var and mean\n nansum = vaex.vaexfast.nansum\n if self.is_masked or self.df.filtered:\n task = TaskMapReduceLegacy(self.df,\n self.expressions, lambda thread_index, i1, i2, *blocks: [nansum(block[self.df.evaluate_selection_mask(\"default\" if self.is_masked else None, i1=i1, i2=i2)])\n for block in blocks],\n lambda a, b: np.array(a) + np.array(b), self._toarray, info=True)\n else:\n task = TaskMapReduceLegacy(self.df, self.expressions, lambda *blocks: [nansum(block) for block in blocks], lambda a, b: np.array(a) + np.array(b), self._toarray)\n return self._task(task)\n\n def histogram(self, limits, size=256, weight=None, progressbar=False, group_by=None, group_limits=None):\n expressions = self.expressions\n if group_by:\n expressions = list(expressions) + [group_by]\n limits = list(limits) + [group_limits[:2]] # [[group_limits[0] - 0,5, group_limits[1]+0.5]]\n # assert group_limits[2] == 1\n size = (group_limits[2],) + (size,) * (len(expressions) - 1)\n task = TaskHistogram(self.df, self, expressions, size, limits, masked=self.is_masked, weight=weight)\n return self._task(task, progressbar=progressbar)\n\n def bin_mean(self, expression, limits, size=256, progressbar=False, group_by=None, group_limits=None):\n # todo, fix progressbar into two...\n counts = self.histogram(limits=limits, size=size, progressbar=progressbar, group_by=group_by, group_limits=group_limits)\n weighted = self.histogram(limits=limits, size=size, progressbar=progressbar, group_by=group_by, group_limits=group_limits,\n weight=expression)\n mean = weighted / counts\n mean[counts == 0] = np.nan\n return mean\n\n def bin_mean_cyclic(self, expression, max_value, limits, size=256, progressbar=False, group_by=None, group_limits=None):\n # todo, fix progressbar into two...\n meanx = self.bin_mean(limits=limits, size=size, progressbar=progressbar, group_by=group_by, group_limits=group_limits,\n expression=\"cos((%s)/%r*2*pi)\" % (expression, max_value))\n meany = self.bin_mean(limits=limits, size=size, progressbar=progressbar, group_by=group_by, group_limits=group_limits,\n expression=\"sin((%s)/%r*2*pi)\" % (expression, max_value))\n angles = np.arctan2(meany, meanx)\n values = ((angles + 2 * np.pi) % (2 * np.pi)) / (2 * np.pi) * max_value\n length = np.sqrt(meanx**2 + meany**2)\n length[~np.isfinite(meanx)] = np.nan\n return values, length\n\n def mutual_information(self, limits=None, grid=None, size=256):\n if limits is None:\n limits_done = Task.fulfilled(self.minmax())\n else:\n limits_done = Task.fulfilled(limits)\n if grid is None:\n if limits is None:\n histogram_done = limits_done.then(lambda limits: self.histogram(limits, size=size))\n else:\n histogram_done = Task.fulfilled(self.histogram(limits, size=size))\n else:\n histogram_done = Task.fulfilled(grid)\n mutual_information_promise = histogram_done.then(vaex.kld.mutual_information)\n return mutual_information_promise if self.delay else mutual_information_promise.get()\n\n def limits_percentage(self, percentage=99.73, square=False):\n import scipy.ndimage\n limits = []\n for expr in self.expressions:\n subspace = self.df(expr)\n if self.is_selected():\n subspace = subspace.selected()\n limits_minmax = subspace.minmax()\n vmin, vmax = limits_minmax[0]\n size = 1024 * 16\n counts = subspace.histogram(size=size, limits=limits_minmax)\n cumcounts = np.concatenate([[0], np.cumsum(counts)])\n cumcounts /= cumcounts.max()\n # TODO: this is crude.. see the details!\n f = (1 - percentage / 100.) / 2\n x = np.linspace(vmin, vmax, size + 1)\n l = scipy.interp([f, 1 - f], cumcounts, x)\n limits.append(l)\n return limits\n\n def limits_sigma(self, sigmas=3, square=False):\n if self.delay:\n means_wrapper = [None]\n\n def do_vars(means):\n means_wrapper[0] = means\n return self.var(means)\n\n def do_limits(vars):\n stds = vars**0.5\n means = means_wrapper[0]\n if square:\n stds = np.repeat(stds.mean(), len(stds))\n return np.array(list(zip(means - sigmas * stds, means + sigmas * stds)))\n return self.mean().then(do_vars).then(do_limits)\n else:\n means = self.mean()\n stds = self.var(means=means)**0.5\n if square:\n stds = np.repeat(stds.mean(), len(stds))\n return np.array(list(zip(means - sigmas * stds, means + sigmas * stds)))\n\n def _not_needed_current(self):\n index = self.df.get_current_row()\n\n def find(thread_index, i1, i2, *blocks):\n if (index >= i1) and (index < i2):\n return [block[index - i1] for block in blocks]\n else:\n return None\n task = TaskMapReduceLegacy(self.df, self.expressions, find, lambda a, b: a if b is None else b, info=True)\n return self._task(task)\n\n def nearest(self, point, metric=None):\n metric = metric or [1.] * len(point)\n\n def nearest_in_block(thread_index, i1, i2, *blocks):\n if self.is_masked:\n mask = self.df.evaluate_selection_mask(\"default\", i1=i1, i2=i2)\n if mask.sum() == 0:\n return None\n blocks = [block[mask] for block in blocks]\n distance_squared = np.sum([(blocks[i] - point[i])**2. * metric[i] for i in range(self.dimension)], axis=0)\n min_index_global = min_index = np.argmin(distance_squared)\n if self.is_masked: # we skipped some indices, so correct for that\n min_index_global = np.argmin((np.cumsum(mask) - 1 - min_index)**2)\n # with lock:\n # print i1, i2, min_index, distance_squared, [block[min_index] for block in blocks]\n return min_index_global.item() + i1, distance_squared[min_index].item()**0.5, [block[min_index].item() for block in blocks]\n\n def nearest_reduce(a, b):\n if a is None:\n return b\n if b is None:\n return a\n if a[1] < b[1]:\n return a\n else:\n return b\n if self.is_masked:\n pass\n task = TaskMapReduceLegacy(self.df,\n self.expressions,\n nearest_in_block,\n nearest_reduce, info=True)\n return self._task(task)\n" ]
[ [ "numpy.ma.array", "numpy.array", "numpy.arange" ], [ "numpy.arange" ], [ "matplotlib.cm.ScalarMappable", "numpy.argmin", "numpy.nanmean", "numpy.cumsum", "numpy.zeros_like", "numpy.log1p", "numpy.swapaxes", "numpy.arange", "numpy.sqrt", "numpy.isfinite", "numpy.array", "matplotlib.cm.get_cmap", "numpy.zeros", "numpy.nansum", "numpy.random.randn", "matplotlib.colors.colorConverter.to_rgb", "matplotlib.colors.ListedColormap", "numpy.arctan2", "numpy.clip", "numpy.isnan", "numpy.sum", "matplotlib.colors.colorConverter.to_rgba", "matplotlib.colors.Normalize", "numpy.all", "numpy.linspace", "numpy.meshgrid" ] ]
ALexanderpu/CUDAC-PerformanceEvaluation
[ "1106792a41781b490685941d53bcf5bf43f4ca32" ]
[ "SparkCCM.py" ]
[ "# running under python 2.7 \n__author__ = \"Bo Pu\"\n\nimport sys\nimport ConfigParser\nimport pandas as pd\nfrom pyspark.sql import SparkSession\nimport json\nimport numpy as np\nimport os\n\n# for single L; which will be not used \n# read parameter combinations config and fill into the objects\nclass Sample:\n def __init__(self, _observations, _targets, _e, _tau, _l, _samples, _multil, _genoutput):\n self.observations = _observations\n self.targets = _targets\n self.e = _e\n self.tau = _tau\n self.samples = _samples\n self.l = _l\n self.multil = _multil\n self.genoutput = _genoutput\n\ndef ccm(LArr, EArr, TauArr, num_samples, time_series, x, y, scriptPath, generateOutput):\n observations, targets = time_series[x].tolist(), time_series[y].tolist()\n paras = []\n for l in LArr:\n for e in EArr:\n for tau in TauArr:\n s = Sample(observations, targets, e, tau, l, num_samples, 0, generateOutput)\n para = json.dumps(vars(s))\n #print para\n paras.append(para)\n # start the spark context \n spark = SparkSession.builder.appName(\"PySparkCCM\").getOrCreate()\n paraRdd = spark.sparkContext.parallelize(paras)\n piped = paraRdd.pipe(scriptPath)\n result = piped.collect()\n spark.stop()\n return result\n\n\n# for multi Ls in one task\nclass SampleMultiL:\n def __init__(self, _observations, _targets, _e, _tau, _samples, _lstart, _lend, _linterval, _multil, _genoutput, _outputpath, _gpu):\n self.observations = _observations\n self.targets = _targets\n self.e = _e\n self.tau = _tau\n self.samples = _samples\n self.lstart = _lstart\n self.lend = _lend\n self.linterval = _linterval\n self.multil = _multil\n self.genoutput = _genoutput\n self.outputpath = _outputpath\n self.gpu = _gpu\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 2:\n print(\"please input the local path of ccm.cfg\")\n sys.exit()\n\n # read input cfg file: the first argument is the file path\n cfgfile = sys.argv[1]\n \n config = ConfigParser.RawConfigParser()\n config.read(cfgfile)\n try:\n input_path = config.get('paths', 'input')\n output_path = config.get('paths', 'output')\n script_path = config.get('paths', 'sparkccmlib')\n\n E = config.get('parameters', 'E')\n Tau = config.get('parameters', 'tau')\n EArr = map(int, E.split(\",\"))\n TauArr = map(int, Tau.split(\",\"))\n\n num_samples = config.getint('parameters', 'num_samples')\n \n LStart = config.getint('parameters', 'LStart')\n LEnd = config.getint('parameters', 'LEnd')\n LInterval = config.getint('parameters', 'LInterval')\n\n xname = config.get('inputs', 'x')\n yname = config.get('inputs', 'y')\n\n time_series = pd.read_csv(input_path)\n observations, targets = time_series[xname].tolist(), time_series[yname].tolist()\n\n GenerateOutputCSV = config.getint('options', 'GenerateOutputCSV')\n GPUAcceleration = config.getint('options', 'GPUAcceleration')\n print(\"GPUAcceleration: \" + str(GPUAcceleration))\n # generate para rdd to separate the tasks to different workers\n paras = []\n for e in EArr:\n for tau in TauArr:\n s = SampleMultiL(observations, targets, e, tau, num_samples, LStart, LEnd, LInterval, 1, GenerateOutputCSV, output_path, GPUAcceleration)\n para = json.dumps(vars(s))\n #print para\n paras.append(para)\n # start the spark context \n \n print(\"size: \" + str(len(paras)))\n \n \n spark = SparkSession.builder.appName(\"PySparkCCMMultiL\").getOrCreate()\n paraRdd = spark.sparkContext.parallelize(paras)\n piped = paraRdd.pipe(script_path)\n result = piped.collect()\n\n for ele in result:\n print(ele)\n\n spark.stop()\n\n # output path in the result\n # with open(\"outputcsvpath.out\", \"w\") as f:\n # for record in result:\n # f.write(record)\n except:\n print(\"parsing config file error\")" ]
[ [ "pandas.read_csv" ] ]
justinpayan/StackOverflowNER-NS
[ "8459cee99582e5bddf94fb1dff4fcad5fc93fd54" ]
[ "regularizers.py" ]
[ "import abc\nimport math\nimport torch\nfrom torch.optim import Optimizer, SGD\nfrom settings import args, FILL_VAL, TOKENS_WEIGHT\nfrom utils import get_losses, get_model_dir\nfrom parallel import DataParallelCriterion\nfrom torch.nn import CrossEntropyLoss, MSELoss\nimport pickle as pkl\nimport os\nfrom torch.nn.functional import softmax\n\n\nclass Regularizer(abc.ABC):\n def __init__(self, model, parallel_model, dataloaders, task, prev_task=None):\n self.model = model\n self.parallel_model = parallel_model\n self.dataloaders = dataloaders\n self.task = task\n self.prev_task = prev_task\n @abc.abstractmethod\n def task_start_do(self):\n return NotImplemented\n @abc.abstractmethod\n def task_end_do(self):\n return NotImplemented\n def save_reg_params(self):\n model_dir = get_model_dir([self.task])\n reg_params_path = os.path.join(model_dir, \"reg_params.pkl\")\n with open(reg_params_path, 'wb') as f:\n pkl.dump(self.model.reg_params,f)\n def load_reg_params(self):\n if self.prev_task:\n model_dir = get_model_dir([self.prev_task])\n reg_params_path = os.path.join(model_dir, \"reg_params.pkl\")\n with open(reg_params_path, 'rb') as f:\n self.model.reg_params = pkl.load(f)\n input()\n\n\nclass MAS(Regularizer):\n def task_start_do(self,freeze_layers=[]):\n #self.load_reg_params()\n task_start_do(self.model, freeze_layers)\n def task_end_do(self):\n updater = Omega_update(self.model.parameters(), lr=0.0001, momentum=0.9)\n compute_importance(self.model, self.parallel_model, updater, self.dataloaders)\n accumulate_reg_params(self.model)\n self.save_reg_params()\n\nclass EWC(Regularizer):\n def task_start_do(self,freeze_layers=[]):\n #self.load_reg_params()\n task_start_do(self.model, freeze_layers)\n def task_end_do(self):\n updater = Omega_update(self.model.parameters(), lr=0.0001, momentum=0.9)\n compute_importance(self.model, self.parallel_model, updater, self.dataloaders, loss_type=\"ewc\")\n accumulate_reg_params(self.model)\n self.save_reg_params()\n\n\nREG_TYPES = {\n \"mas\": MAS,\n \"ewc\": EWC,\n}\nargs.REG_TYPE_KEYS = REG_TYPE_KEYS = list(REG_TYPES.keys())\n\n\ndef task_start_do(model, freeze_layers=[]):\n if not hasattr(model,\"reg_params\"):\n initialize_reg_params(model,freeze_layers)\n else:\n clean_omega_sum(model,freeze_layers)\n\n\ndef initialize_reg_params(model,freeze_layers=[]):\n \"\"\"initialize an omega for each parameter to zero\"\"\"\n reg_params={}\n for name, param in model.named_parameters():\n if not name in freeze_layers:\n # print('initializing param',name)\n omega=torch.FloatTensor(param.size()).zero_()\n omega=omega.cuda()\n init_val=param.data.clone()\n init_val=init_val.cuda()\n reg_param={}\n reg_param['omega'] = omega\n reg_param['omega_sum'] = omega\n #initialize the initial value to that before starting training\n reg_param['init_val'] = init_val\n reg_params[param]=reg_param\n if 'data_count' not in reg_params:\n reg_params['data_count'] = 0\n reg_params['lambda'] = args.reg_lambda\n model.reg_params = reg_params\n\n\ndef clean_omega_sum(model,freeze_layers=[]):\n for name, param in model.named_parameters():\n if not name in freeze_layers:\n omega=torch.FloatTensor(param.size()).zero_()\n omega=omega.cuda()\n reg_param = model.reg_params.get(param)\n reg_param['omega_sum'] = omega\n model.reg_params[param]=reg_param\n model.reg_params['data_count'] = 0\n\n\nclass Weight_Regularized_AdamW(Optimizer):\n \"\"\" Implements Adam algorithm with weight decay fix.\n Parameters:\n lr (float): learning rate. Default 1e-3.\n betas (tuple of 2 floats): Adams beta parameters (b1, b2). Default: (0.9, 0.999)\n eps (float): Adams epsilon. Default: 1e-6\n weight_decay (float): Weight decay. Default: 0.0\n correct_bias (bool): can be set to False to avoid correcting bias in Adam (e.g. like in Bert TF repository). Default True.\n \"\"\"\n def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6, weight_decay=0.0, correct_bias=True):\n if lr < 0.0:\n raise ValueError(\"Invalid learning rate: {} - should be >= 0.0\".format(lr))\n if not 0.0 <= betas[0] < 1.0:\n raise ValueError(\"Invalid beta parameter: {} - should be in [0.0, 1.0[\".format(betas[0]))\n if not 0.0 <= betas[1] < 1.0:\n raise ValueError(\"Invalid beta parameter: {} - should be in [0.0, 1.0[\".format(betas[1]))\n if not 0.0 <= eps:\n raise ValueError(\"Invalid epsilon value: {} - should be >= 0.0\".format(eps))\n defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay,\n correct_bias=correct_bias)\n super(Weight_Regularized_AdamW, self).__init__(params, defaults)\n\n def step(self, reg_params, closure=None):\n \"\"\"Performs a single optimization step.\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n reg_lambda=reg_params.get('lambda')\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data\n if grad.is_sparse:\n raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')\n\n\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state['step'] = 0\n # Exponential moving average of gradient values\n state['exp_avg'] = torch.zeros_like(p.data)\n # Exponential moving average of squared gradient values\n state['exp_avg_sq'] = torch.zeros_like(p.data)\n\n exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n beta1, beta2 = group['betas']\n\n state['step'] += 1\n\n # Decay the first and second moment running average coefficient\n # In-place operations to update the averages at the same time\n exp_avg.mul_(beta1).add_(1.0 - beta1, grad)\n exp_avg_sq.mul_(beta2).addcmul_(1.0 - beta2, grad, grad)\n denom = exp_avg_sq.sqrt().add_(group['eps'])\n\n step_size = group['lr']\n if group['correct_bias']: # No bias correction for Bert\n bias_correction1 = 1.0 - beta1 ** state['step']\n bias_correction2 = 1.0 - beta2 ** state['step']\n step_size = step_size * math.sqrt(bias_correction2) / bias_correction1\n\n p.data.addcdiv_(-step_size, exp_avg, denom)\n\n # Just adding the square of the weights to the loss function is *not*\n # the correct way of using L2 regularization/weight decay with Adam,\n # since that will interact with the m and v parameters in strange ways.\n #\n # Instead we want to decay the weights in a manner that doesn't interact\n # with the m/v parameters. This is equivalent to adding the square\n # of the weights to the loss with plain (non-momentum) SGD.\n # Add weight decay at the end (fixed version)\n #Regularize PART CODE GOES HERE\n if p in reg_params:\n\n reg_param=reg_params.get(p)\n #get omega for this parameter\n omega=reg_param.get('omega')\n #initial value when the training start\n init_val=reg_param.get('init_val')\n curr_weight_val=p.data\n\n #get the difference\n weight_dif=curr_weight_val.add(-1,init_val)\n #compute the MAS penalty\n regulizer=weight_dif.mul(2*reg_lambda*omega)\n del weight_dif\n del curr_weight_val\n del omega\n del init_val\n #add the MAS regulizer to the gradient\n # grad.add_(regulizer)\n p.data.add_(-group['lr'], regulizer)\n del regulizer\n #Regularize PART CODE ENDS\n if group['weight_decay'] > 0.0:\n p.data.add_(-group['lr'] * group['weight_decay'], p.data)\n\n return loss\n\n# update omega for one task; use in compute_importance\nclass Omega_update(SGD):\n \"\"\"\n Update the paramerter importance using the gradient of the function output norm. To be used at deployment time.\n reg_params:parameters omega to be updated\n batch_index,batch_size:used to keep a running average over the seen samples\n \"\"\"\n def __init__(self, params, lr=0.001, momentum=0, dampening=0, weight_decay=0, nesterov=False):\n\n super(Omega_update, self).__init__(params,lr,momentum,dampening,weight_decay,nesterov)\n\n def __setstate__(self, state):\n super(Omega_update, self).__setstate__(state)\n\n def step(self, reg_params, batch_size, closure=None):\n \"\"\"\n Performs a single parameters importance update setp\n \"\"\"\n #print('************************DOING A STEP************************')\n reg_params['data_count'] += batch_size\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n\n #if the parameter has an omega to be updated\n for p in group['params']:\n\n #print('************************ONE PARAM************************')\n\n if p.grad is None:\n continue\n\n if p in reg_params:\n\n #HERE MAS IMPOERANCE UPDATE GOES\n #get the gradient\n unreg_dp = p.grad.data.clone()\n reg_param = reg_params.get(p)\n #get parameter omega\n omega = reg_param.get('omega_sum')\n if args.seq_train_type == \"ewc\":\n omega = omega.add((unreg_dp)**2)\n else:\n omega = omega.add(unreg_dp.abs_())\n reg_param['omega_sum'] = omega\n reg_params[p] = reg_param\n #HERE MAS IMPOERANCE UPDATE ENDS\n\n return loss#HAS NOTHING TO DO\n\n# update omega for one task\ndef compute_importance(model, parallel_model, updater, dataloaders, loss_type=\"l2\"):\n \"\"\"Mimic the depoloyment setup where the model is applied on some samples and those are used to update the importance params\n Uses the L2norm of the function output. This is what we MAS uses as default\n \"\"\"\n # model.eval() # Set model to training mode so we get the gradient\n # train_loss_fct = DataParallelCriterion(CrossEntropyLoss(ignore_index=FILL_VAL), args.device_ids)\n\n softmax = torch.nn.Softmax(dim=-1)\n if loss_type == \"l2\":\n # loss_fct = DataParallelCriterion(torch.nn.MSELoss(reduction='mean'), args.device_ids)\n loss_fct = torch.nn.MSELoss(reduction='mean')\n elif loss_type == \"l1\":\n # loss_fct = DataParallelCriterion(torch.nn.L1Loss(reduction='mean'), args.device_ids)\n loss_fct = torch.nn.L1Loss(reduction='mean')\n elif loss_type == \"ewc\":\n CELoss = CrossEntropyLoss(ignore_index=FILL_VAL, reduction='mean', weight=TOKEN_WEIGHT)\n loss_fct = CELoss\n # loss_fct = DataParallelCriterion(CELoss, args.device_ids)\n\n # Iterate over data.\n for dataloader in dataloaders:\n for cq, len_cq, cqa, len_cqa, Y, _, _ in dataloader:\n # get the inputs\n n_inputs = sum(len(_cq) for _cq in cq)\n for i in range(len(cqa)):\n cq[i] = (cq[i].to(args.device_ids[i]),)\n len_cq[i] = len_cq[i].to(args.device_ids[i])\n cqa[i] = (cqa[i].to(args.device_ids[i]),)\n len_cqa[i] = len_cqa[i].to(args.device_ids[i])\n Y[i] = Y[i].to(args.device_ids[i])\n\n # zero the parameter gradients\n updater.zero_grad()\n\n # forward\n if loss_type != \"ewc\":\n # logits = parallel_model(cq)\n logits = model(cq)\n logits = [logit[range(len(logit)), len_cq[i]-1, :] for i, logit in enumerate(logits)]\n #logits = [softmax(logit, dim=-1) for logit in logits]\n target_zeros = [torch.zeros(logit.size()).to(args.device_ids[i]) for i, logit in enumerate(logits)]\n logits = [softmax(logit) for logit in logits]\n\n if loss_type == \"l2\":\n targets = loss_fct(logits, target_zeros)\n elif loss_type == \"l1\":\n targets = loss_fct(logits, target_zeros)\n else:\n # targets, _ = get_losses(parallel_model, cqa, Y, None, None, loss_fct)\n targets, _ = get_losses(model, cqa, Y, None, None, loss_fct)\n\n\n targets /= n_inputs \n\n #compute the gradients\n targets.backward()\n\n #update the parameters importance\n updater.step(model.reg_params, n_inputs)\n\n# omega of task1 + omega of task2 ...\n# new_omega=omega_sum/data_count; omega=new_omega+prev_omega\ndef accumulate_reg_params(model, freeze_layers=[]):\n \"\"\"accumelate the newly computed omega with the previously stroed one from the old previous tasks\"\"\"\n for name, param in model.named_parameters():\n if not name in freeze_layers:\n if param in model.reg_params:\n reg_param=model.reg_params.get(param)\n # print('restoring previous omega',name)\n prev_omega=reg_param.get('omega')\n new_omega=reg_param.get('omega_sum') / model.reg_params[\"data_count\"]\n acc_omega=torch.add(prev_omega,new_omega)\n\n del reg_param['omega_sum']\n reg_param['omega'] = acc_omega\n\n model.reg_params[param]=reg_param\n del prev_omega\n del new_omega\n del acc_omega\n else:\n if param in model.reg_params:\n reg_param=model.reg_params.get(param)\n # print('removing unused omega',name)\n del reg_param['omega']\n del model.reg_params[param]\n\n\nclass Weight_Regularized_SGD(SGD):\n r\"\"\"Implements SGD training with importance params regulization. IT inherents stochastic gradient descent (optionally with momentum).\n Nesterov momentum is based on the formula from\n\n \"\"\"\n\n def __init__(self, params, lr=0.001, momentum=0, dampening=0, weight_decay=0, nesterov=False):\n super(Weight_Regularized_SGD, self).__init__(params, lr,momentum,dampening,weight_decay,nesterov)\n\n\n def __setstate__(self, state):\n super(Weight_Regularized_SGD, self).__setstate__(state)\n\n\n def step(self, reg_params,closure=None):\n\n loss = None\n if closure is not None:\n loss = closure()\n reg_lambda=reg_params.get('lambda')\n\n for group in self.param_groups:\n weight_decay = group['weight_decay']\n momentum = group['momentum']\n dampening = group['dampening']\n nesterov = group['nesterov']\n\n for p in group['params']:\n if p.grad is None:\n continue\n d_p = p.grad.data\n\n #MAS PART CODE GOES HERE\n #if this param has an omega to use for regulization\n if p in reg_params:\n\n reg_param=reg_params.get(p)\n #get omega for this parameter\n omega=reg_param.get('omega')\n #initial value when the training start\n init_val=reg_param.get('init_val')\n\n curr_wegiht_val=p.data\n #move the tensors to cuda\n init_val=init_val.cuda()\n omega=omega.cuda()\n\n #get the difference\n weight_dif=curr_wegiht_val.add(-1,init_val)\n #compute the MAS penalty\n regulizer=weight_dif.mul(2*reg_lambda*omega)\n del weight_dif\n del curr_wegiht_val\n del omega\n del init_val\n #add the MAS regulizer to the gradient\n d_p.add_(regulizer)\n del regulizer\n #MAS PARAT CODE ENDS\n if weight_decay != 0:\n d_p.add_(weight_decay,p.data.sign())\n\n if momentum != 0:\n param_state = self.state[p]\n if 'momentum_buffer' not in param_state:\n buf = param_state['momentum_buffer'] = d_p.clone()\n else:\n buf = param_state['momentum_buffer']\n buf.mul_(momentum).add_(1 - dampening, d_p)\n if nesterov:\n d_p = d_p.add(momentum, buf)\n else:\n d_p = buf\n p.data.add_(-group['lr'], d_p)\n\n return loss\n" ]
[ [ "torch.nn.MSELoss", "torch.nn.Softmax", "torch.add", "torch.nn.L1Loss", "torch.zeros_like", "torch.nn.CrossEntropyLoss" ] ]
divyanshugit/Machine-Learning-Lab-EC792B
[ "2c0ceeef67dcbf9dd1135d0b4616d9f94205fd66" ]
[ "kNN/kNN.py" ]
[ "import numpy as np\nfrom math import sqrt\n\nclass KNN():\n \"\"\" K Nearest Neighbors classifier.\n Parameters:\n -----------\n k: int\n The number of closest neighbors that will determine the class of the \n sample that we wish to predict.\n \"\"\"\n def __init__(self, k=5):\n self.k = k\n\n def euclidean_distance(self, x1, x2):\n \"\"\"\n Calculate the euclidean distance between two rows.\n \"\"\" \n distance = 0.0\n \n for i in range(len(x1)-1):\n distance += (x1[i] - x2[i])**2\n \n return sqrt(distance)\n def _vote(self, neighbor_labels):\n \"\"\" Return the most common class among the neighbor samples \"\"\"\n counts = np.bincount(neighbor_labels.astype('int'))\n return counts.argmax()\n\n def predict(self, X_test, X_train, y_train):\n y_pred = np.empty(X_test.shape[0])\n # Determine the class of each sample\n for i, test_sample in enumerate(X_test):\n # Sort the training samples by their distance to the test sample and get the K nearest\n idx = np.argsort([self.euclidean_distance(test_sample, x) for x in X_train])[:self.k]\n # Extract the labels of the K nearest neighboring training samples\n k_nearest_neighbors = np.array([y_train[i] for i in idx])\n # Label sample as the most common class label\n y_pred[i] = self._vote(k_nearest_neighbors)\n\n return y_pred\n\nX = np.random.rand(100, 2)\ny = np.random.randint(0, 2, size=100)\nX_train = X[:80]\ny_train = y[:80]\nX_test = X[80:]\ny_test = y[80:]\nprint(\"Actual Value:\",y_test)\nKNN = KNN()\nprint(\"Pridicted Value:\",KNN.predict(X_test, X_train, y_train))\n\n# Returns\n#Actual Value: [0 1 1 1 0 0 0 0 1 1 0 1 1 1 0 1 0 1 0 0]\n#Pridicted Value: [0. 1. 1. 0. 0. 0. 0. 0. 1. 0. 0. 1. 1. 1. 1. 1. 0. 0. 1. 1.]" ]
[ [ "numpy.array", "numpy.random.randint", "numpy.random.rand", "numpy.empty" ] ]
chen0040/keras-language-translator-web-api
[ "06dc1d106e2293abaadd506992988a4a66b5eb78" ]
[ "translator_train/eng_to_fra_glove_translator_train.py" ]
[ "from keras.models import Model\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.layers.recurrent import LSTM\nfrom keras.layers import Dense, Input, Embedding\nfrom keras.preprocessing.sequence import pad_sequences\nfrom collections import Counter\nimport nltk\nimport numpy as np\nimport os\nimport sys\nimport zipfile\nimport urllib.request\n\nBATCH_SIZE = 64\nNUM_EPOCHS = 100\nHIDDEN_UNITS = 256\nNUM_SAMPLES = 10000\nMAX_VOCAB_SIZE = 10000\nGLOVE_EMBEDDING_SIZE = 100\nDATA_PATH = 'data/fra.txt'\nWEIGHT_FILE_PATH = 'models/eng-to-fra/eng-to-fra-glove-weights.h5'\nARCHITECTURE_FILE_PATH = 'models/eng-to-fra/eng-to-fra-glove-architecture.json'\n\ntarget_counter = Counter()\n\nGLOVE_MODEL = \"very_large_data/glove.6B.\" + str(GLOVE_EMBEDDING_SIZE) + \"d.txt\"\nWHITELIST = 'abcdefghijklmnopqrstuvwxyz1234567890?.,'\n\n\ndef in_white_list(_word):\n for char in _word:\n if char in WHITELIST:\n return True\n\n return False\n\n\ndef reporthook(block_num, block_size, total_size):\n read_so_far = block_num * block_size\n if total_size > 0:\n percent = read_so_far * 1e2 / total_size\n s = \"\\r%5.1f%% %*d / %d\" % (\n percent, len(str(total_size)), read_so_far, total_size)\n sys.stderr.write(s)\n if read_so_far >= total_size: # near the end\n sys.stderr.write(\"\\n\")\n else: # total size is unknown\n sys.stderr.write(\"read %d\\n\" % (read_so_far,))\n\n\ndef download_glove():\n if not os.path.exists(GLOVE_MODEL):\n\n glove_zip = 'very_large_data/glove.6B.zip'\n\n if not os.path.exists('very_large_data'):\n os.makedirs('very_large_data')\n\n if not os.path.exists(glove_zip):\n print('glove file does not exist, downloading from internet')\n urllib.request.urlretrieve(url='http://nlp.stanford.edu/data/glove.6B.zip', filename=glove_zip,\n reporthook=reporthook)\n\n print('unzipping glove file')\n zip_ref = zipfile.ZipFile(glove_zip, 'r')\n zip_ref.extractall('very_large_data')\n zip_ref.close()\n\n\ndef load_glove():\n download_glove()\n _word2em = {}\n file = open(GLOVE_MODEL, mode='rt', encoding='utf8')\n for line in file:\n words = line.strip().split()\n word = words[0]\n embeds = np.array(words[1:], dtype=np.float32)\n _word2em[word] = embeds\n file.close()\n return _word2em\n\nword2em = load_glove()\n\nlines = open(DATA_PATH, 'rt', encoding='utf8').read().split('\\n')\nfor line in lines[: min(NUM_SAMPLES, len(lines)-1)]:\n input_text, target_text = line.split('\\t')\n input_words = [w for w in nltk.word_tokenize(input_text.lower())]\n target_text = 'START ' + target_text.lower() + ' END'\n target_words = [w for w in nltk.word_tokenize(target_text)]\n for w in target_words:\n target_counter[w] += 1\n\ntarget_word2idx = dict()\nfor idx, word in enumerate(target_counter.most_common(MAX_VOCAB_SIZE)):\n target_word2idx[word[0]] = idx + 1\n\ntarget_word2idx['UNK'] = 0\n\ntarget_idx2word = dict([(idx, word) for word, idx in target_word2idx.items()])\n\nnum_decoder_tokens = len(target_idx2word)\n\nnp.save('models/eng-to-fra/eng-to-fra-glove-target-word2idx.npy', target_word2idx)\nnp.save('models/eng-to-fra/eng-to-fra-glove-target-idx2word.npy', target_idx2word)\n\nunknown_emb = np.random.randn(GLOVE_EMBEDDING_SIZE)\n\nnp.save('models/eng-to-fra/eng-to-fra-glove-unknown-emb', unknown_emb)\n\nencoder_input_data = []\n\nencoder_max_seq_length = 0\ndecoder_max_seq_length = 0\n\nlines = open(DATA_PATH, 'rt', encoding='utf8').read().split('\\n')\nfor line in lines[: min(NUM_SAMPLES, len(lines)-1)]:\n input_text, target_text = line.split('\\t')\n target_text = 'START ' + target_text.lower() + ' END'\n input_words = [w for w in nltk.word_tokenize(input_text.lower())]\n target_words = [w for w in nltk.word_tokenize(target_text)]\n encoder_input_emb = []\n for w in input_words:\n emb = unknown_emb\n if w in word2em:\n emb = word2em[w]\n encoder_input_emb.append(emb)\n\n encoder_input_data.append(encoder_input_emb)\n encoder_max_seq_length = max(len(encoder_input_emb), encoder_max_seq_length)\n decoder_max_seq_length = max(len(target_words), decoder_max_seq_length)\n\nencoder_input_data = pad_sequences(encoder_input_data, encoder_max_seq_length)\n\ndecoder_target_data = np.zeros(shape=(NUM_SAMPLES, decoder_max_seq_length, num_decoder_tokens))\ndecoder_input_data = np.zeros(shape=(NUM_SAMPLES, decoder_max_seq_length, num_decoder_tokens))\nlines = open(DATA_PATH, 'rt', encoding='utf8').read().split('\\n')\nfor lineIdx, line in enumerate(lines[: min(NUM_SAMPLES, len(lines)-1)]):\n _, target_text = line.split('\\t')\n target_text = 'START ' + target_text.lower() + ' END'\n target_words = [w for w in nltk.word_tokenize(target_text)]\n for idx, w in enumerate(target_words):\n w2idx = 0 # default [UNK]\n if w in target_word2idx:\n w2idx = target_word2idx[w]\n decoder_input_data[lineIdx, idx, w2idx] = 1\n if idx > 0:\n decoder_target_data[lineIdx, idx-1, w2idx] = 1\n\ncontext = dict()\ncontext['num_decoder_tokens'] = num_decoder_tokens\ncontext['encoder_max_seq_length'] = encoder_max_seq_length\ncontext['decoder_max_seq_length'] = decoder_max_seq_length\n\nnp.save('models/eng-to-fra/eng-to-fra-glove-context.npy', context)\n\nencoder_inputs = Input(shape=(None, GLOVE_EMBEDDING_SIZE), name='encoder_inputs')\nencoder_lstm = LSTM(units=HIDDEN_UNITS, return_state=True, name='encoder_lstm')\nencoder_outputs, encoder_state_h, encoder_state_c = encoder_lstm(encoder_inputs)\nencoder_states = [encoder_state_h, encoder_state_c]\n\ndecoder_inputs = Input(shape=(None, num_decoder_tokens), name='decoder_inputs')\ndecoder_lstm = LSTM(units=HIDDEN_UNITS, return_state=True, return_sequences=True, name='decoder_lstm')\ndecoder_outputs, decoder_state_h, decoder_state_c = decoder_lstm(decoder_inputs,\n initial_state=encoder_states)\ndecoder_dense = Dense(units=num_decoder_tokens, activation='softmax', name='decoder_dense')\ndecoder_outputs = decoder_dense(decoder_outputs)\n\nmodel = Model([encoder_inputs, decoder_inputs], decoder_outputs)\n\nmodel.compile(loss='categorical_crossentropy', optimizer='rmsprop')\n\njson = model.to_json()\nopen(ARCHITECTURE_FILE_PATH, 'w').write(json)\n\ncheckpoint = ModelCheckpoint(filepath=WEIGHT_FILE_PATH, save_best_only=True)\nmodel.fit([encoder_input_data, decoder_input_data], decoder_target_data, batch_size=BATCH_SIZE, epochs=NUM_EPOCHS,\n verbose=1, validation_split=0.2, callbacks=[checkpoint])\n\nmodel.save_weights(WEIGHT_FILE_PATH)\n\n\n\n\n\n\n\n" ]
[ [ "numpy.array", "numpy.random.randn", "numpy.zeros", "numpy.save" ] ]
jesbu1/spinningup
[ "fd54d9e06febc7ff5696a63d1e84e2c16d38e486" ]
[ "gym/quick_script.py" ]
[ "import gym\nimport numpy as np\nenv = gym.make('SawyerPush-v0')\nfor _ in range(100):\n env.reset()\n for i in range(150):\n env.render()\n env.step(np.random.uniform(0, 1, size=(4,)))\n" ]
[ [ "numpy.random.uniform" ] ]
NICALab/Inducing-Functions-through-RL
[ "e2171ff5e14bb272353e7df5156104ad2a85a3ae" ]
[ "scripts/plot.py" ]
[ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport glob\nimport argparse\nfrom collections import defaultdict\nimport seaborn as sns\nimport pandas as pd\n\ntask_default_list = ['task_b_2021',\n 'task_b_vision_only_2021',\n 'task_b_sequence_ext_use_pred_20',\n 'task_b_sequence_ext_use_pred_60',\n 'task_b_sequence_ext_use_pred_80',\n 'task_b_random_ext_8',\n 'task_b_random_ext_10',\n 'task_b_random_ext_12',\n 'task_b_sequence_ext_use_pred_perm1',\n 'task_b_sequence_ext_use_pred_perm2',\n 'task_b_sequence_ext_use_pred_perm3',\n 'cifar10_2021',\n 'cifar10_sequence_ext_use_pred_2021',\n 'cifar10_vision_only_2021'\n ]\n\n\ndef moving_average(x, w):\n return np.convolve(x, np.ones(w), 'valid') / w\n\n\ndef plot_one(exp_names, csv_slices, feature, env_name):\n # plot features for every experiments\n fig = plt.figure(figsize=(8, 8))\n fig.canvas.set_window_title(feature)\n for csv_slice in csv_slices:\n plt.plot(moving_average(csv_slice[feature].to_numpy(), 100))\n plt.legend(exp_names)\n plt.title(env_name, fontsize=17)\n plt.xlabel(\"iteration\", fontsize=15)\n plt.xticks(fontsize=13)\n plt.ylabel(feature, fontsize=15)\n plt.yticks(fontsize=13)\n\n # make dataframe for multiple experiments\n task_list = []\n for task in task_default_list:\n if any(task in s for s in exp_names):\n task_list.append(task)\n num_df = len(task_list)\n df = []\n for i in range(num_df):\n feature_vals_list = []\n feature_vals_len_list = []\n print(i)\n for j, exp_name in enumerate(exp_names):\n if task_list[i] in exp_name:\n print(task_list[i], exp_name)\n csv_slice = csv_slices[j]\n feature_vals = moving_average(csv_slice[feature].to_numpy(), 100)\n max_len = min([2500, len(feature_vals)])\n feature_vals_list.append(feature_vals[:max_len])\n feature_vals_len_list.append(range(max_len))\n\n feature_vals_array = np.concatenate(feature_vals_list, axis=0)\n feature_vals_len_array = np.concatenate(feature_vals_len_list, axis=0)\n df_i = pd.DataFrame({'iteration': feature_vals_len_array,\n task_list[i]: feature_vals_array})\n df.append(df_i)\n\n fig = plt.figure(figsize=(8, 8))\n fig.canvas.set_window_title(feature)\n for i in range(num_df):\n sns.lineplot(data=df[i], x='iteration', y=task_list[i])\n plt.legend(task_list)\n plt.title(env_name, fontsize=17)\n plt.xlabel(\"iteration\", fontsize=15)\n plt.xticks(fontsize=13)\n plt.ylabel(feature, fontsize=15)\n plt.yticks(fontsize=13)\n\n\n\n\ndef plot_data(args):\n path = args.file\n features = args.f\n style = args.s\n\n plt.style.use(style)\n features = features[0].split(\",\")\n\n for feature in features:\n path = path.rstrip('/').rstrip('\\\\')\n env_name = path.split('/')[-1]\n method = env_name.split('-')[0]\n env_name = env_name.replace(method + '-', '')\n csv_paths = glob.glob(f\"{path}/**/progress.csv\")\n exp_names = [csv_path.split(\"/\")[-2] for csv_path in csv_paths]\n\n assert len(csv_paths) > 0, \"There is no csv files\"\n\n csv_slices = []\n for csv_path in csv_paths:\n csv = pd.read_csv(csv_path)\n csv_slices.append(csv.loc[:, [feature]])\n del csv\n\n plot_one(exp_names, csv_slices, feature, env_name)\n plt.show()\n\n\nif __name__ == \"__main__\":\n # To run, refer README.md\n parser = argparse.ArgumentParser()\n parser.add_argument('file', type=str,\n help='path to the task directory')\n parser.add_argument('--f', type=str, nargs='+',\n help='List of features to plot')\n parser.add_argument('--s', type=str, default='ggplot',\n help='Style of plots, Look at (https://matplotlib.org/3.1.1/gallery/style_sheets/style_sheets_reference.html)')\n args = parser.parse_args()\n plot_data(args)" ]
[ [ "numpy.concatenate", "matplotlib.pyplot.xlabel", "pandas.DataFrame", "matplotlib.pyplot.title", "matplotlib.pyplot.legend", "numpy.ones", "matplotlib.pyplot.yticks", "matplotlib.pyplot.figure", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.style.use", "matplotlib.pyplot.show", "pandas.read_csv", "matplotlib.pyplot.xticks" ] ]
gamaievsky/DescripteursHarmoniquesAudio
[ "551e253058502049a91803da8b0412b5ffb1bd60" ]
[ "Comparison.py" ]
[ "# Representations abstraites\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pickle\nimport params\n\n# # Ouverture des listes\n# with open ('liste1', 'rb') as fp:\n# l1x = pickle.load(fp)\n# with open ('liste2', 'rb') as fp:\n# l1y = pickle.load(fp)\n# with open ('liste1v', 'rb') as fp:\n# l2x = pickle.load(fp)\n# with open ('liste2v', 'rb') as fp:\n# l2y = pickle.load(fp)\n\n#Affichage\ndef Affichage(l1x,l1y,l2x,l2y):\n color = params.color_abstr\n\n plt.figure()\n ax = plt.subplot()\n\n plt.plot(l1x, l1y, 'b'+'--')\n plt.plot(l1x, l1y, 'b'+'o',label = 'Piano')\n for i in range(len(l1x)):\n ax.annotate(' {}'.format(i+1), (l1x[i], l1y[i]), color='black')\n plt.plot(l2x, l2y, 'r'+'--')\n plt.plot(l2x, l2y, 'r'+'o', label = 'Violon')\n for i in range(len(l2x)):\n ax.annotate(' {}'.format(i+1), (l2x[i], l2y[i]), color='black')\n\n\n d1, d2 = 'diffConcordance', 'crossConcordance'\n plt.xlabel(d1[0].upper() + d1[1:])\n plt.ylabel(d2[0].upper() + d2[1:])\n plt.title('Cadence ' + ' (' + d1[0].upper() + d1[1:] + ', ' + d2[0].upper() + d2[1:] + ')')\n plt.legend(frameon=True, framealpha=0.75)\n plt.show()\n\n\npts1 = [np.array((l1x[t],l1y[t])) for t in range(len(l1x))]\npts2 = [np.array((l2x[t],l2y[t])) for t in range(len(l1x))]\n\n# #distance euclidienne\n# def dist(x,y):\n# return np.sqrt(np.sum((x-y)**2))\n#\n#\n# def distance(pts1,pts2,type = 'diff'):\n# distance = 0\n# if type == 'stat':\n# for t in range(len(pts1)):\n# distance += dist(pts1[t], pts2[t])\n# return distance\n# else :\n# pts1_diff = [pts1[t+1]-pts1[t] for t in range(len(pts1)-1)]\n# pts2_diff = [pts2[t+1]-pts2[t] for t in range(len(pts2)-1)]\n# for t in range(len(pts1_diff)):\n# distance += dist(pts1_diff[t], pts2_diff[t])\n# return distance\n\n\n# print(distance(pts1,pts2,'stat'))\n\npoints = np.asarray([pts1, pts2])\n\n# Fonction qui calcule l'éloignement de courbes nomalisées, correspondant à différents timbres \ndef dispersion(points,type = 'diff'):\n if type == 'stat':\n return np.linalg.norm(np.std(points,axis = 0), axis = 1)\n else :\n points_diff = np.zeros((points.shape[0],points.shape[1]-1,points.shape[2]))\n for i in range(points.shape[1]-1):\n points_diff[:,i] = points[:,i+1]-points[:,i]\n return np.linalg.norm(np.std(points_diff,axis = 0), axis = 1)\n\n\n\nprint(dispersion(points))\n\n\n\n\n\n# Affichage(l1x,l1y,l2x,l2y)\n" ]
[ [ "numpy.array", "numpy.asarray", "numpy.zeros", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "matplotlib.pyplot.figure", "numpy.std", "matplotlib.pyplot.show", "matplotlib.pyplot.subplot" ] ]
DzAvril/tvm
[ "89fa6d3363926a6770084c10f9dee2cf78129903" ]
[ "apps/deploy_tflite_cpp/build_input.py" ]
[ "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"Creates a simple TVM modules.\"\"\"\n\nimport argparse\nimport os\nimport logging\nfrom PIL import Image\nimport numpy as np\n\n\ndef preprocess_image(image_file):\n resized_image = Image.open(image_file).resize((224, 224))\n image_data = np.asarray(resized_image).astype(\"float32\")\n # after expand_dims, we have format NCHW\n image_data = np.expand_dims(image_data, axis=0)\n image_data[:, :, :, 0] = 2.0 / 255.0 * image_data[:, :, :, 0] - 1\n image_data[:, :, :, 1] = 2.0 / 255.0 * image_data[:, :, :, 1] - 1\n image_data[:, :, :, 2] = 2.0 / 255.0 * image_data[:, :, :, 2] - 1\n return image_data\n\n\ndef build_inputs():\n x = preprocess_image(\"lib/cat.png\")\n print(\"x\", x.shape)\n with open(\"lib/input.bin\", \"wb\") as fp:\n fp.write(x.astype(np.float32).tobytes())\n\n\nif __name__ == \"__main__\":\n build_inputs()\n" ]
[ [ "numpy.asarray", "numpy.expand_dims" ] ]
LucasFidon/trustworthy-ai-fetal-brain-segmentation
[ "84959da54d8c2fb156da2b06cca30fa31a1c926d", "84959da54d8c2fb156da2b06cca30fa31a1c926d" ]
[ "docker/third-party/nnUNet/nnunet/dataset_conversion/Task172_CovidSegChallengeAutoCorrect.py", "docker/third-party/nnUNet/nnunet/network_architecture/generic_UNet.py" ]
[ "import os\nimport pickle\nfrom scipy.ndimage.measurements import label\nimport numpy as np\nimport SimpleITK as sitk\nfrom collections import OrderedDict\nfrom lungmask import mask\nfrom batchgenerators.utilities.file_and_folder_operations import *\nfrom nnunet.paths import nnUNet_raw_data\n\nMAIN_DATA_FOLDER = '/data'\nMAIN_WORKSPACE_FOLDER = '/workspace'\nNNUNET_FOLDER = os.path.join(MAIN_WORKSPACE_FOLDER, 'nnUNet', 'nnunet')\nNNUNET_INFERENCE_FOLDER = os.path.join(NNUNET_FOLDER, 'inference')\n\n# Challenge data\nDATA_FOLDER = os.path.join(MAIN_DATA_FOLDER, 'COVID-19-20', 'COVID-19-20_v2')\nTRAIN_DATA_FOLDER = join(DATA_FOLDER, 'Train')\nVALID_DATA_FOLDER = join(DATA_FOLDER, 'Validation')\n\nJUN_DATASET_CT_FOLDER = os.path.join(MAIN_DATA_FOLDER, 'covid_benchmark', 'COVID-19-CT-Seg_20cases')\nJUN_DATASET_LESIONS_FOLDER = os.path.join(MAIN_DATA_FOLDER, 'covid_benchmark', 'Infection_Mask')\n\n# Guotai data:\n# only binary seg\n# non HU intensity\nGUOTAI_DATASET_FOLDER = os.path.join(MAIN_DATA_FOLDER, 'UESTC-COVID-19')\nGUOTAI_DATASET_PART1 = os.path.join( # 70 cases labelled by non-experts\n GUOTAI_DATASET_FOLDER,\n 'UESTC-COVID-19-20201109T135232Z-001',\n 'UESTC-COVID-19',\n 'part1',\n)\nGUOTAI_DATASET_PART2 = os.path.join( # 50 cases labelled by experts\n GUOTAI_DATASET_FOLDER,\n 'UESTC-COVID-19-20201109T135232Z-001',\n 'UESTC-COVID-19',\n 'part2',\n)\nGUOTAI_HU_MIN = -1400 # strange value... could it be -1000? nop it is correct\nGUOTAI_HU_MAX = 100\n\n\n# iCovid data\nICOVID_DATASET_FOLDER = os.path.join(MAIN_DATA_FOLDER, 'icovid_raw_data')\nLABELS_ICOVID = {\n # Basic lesion classes\n 'ggo': 1,\n 'consolidation': 2,\n 'crazy_paving_pattern': 3,\n 'linear_opacity': 2,\n # Super classes\n 'combined_pattern': 4,\n 'reversed_halo_sign': 4,\n 'other_abnormal_tissue': 5,\n 'lung': 6,\n 'background': 0,\n}\nPATIENT_ID_TO_EXCLUDE = [\n '1363112652', # moderate artefact and I can't see some of the lesions segmented\n '1366125607', # artefact and suspicious seg (completed and reviewed by same person)\n '1413717420', # strong breathing artefact and suspicious seg\n '1812933091', # BART: to exclude pat12. nothing seg\n '1868609820', # BART: to exclude pat13. nothing seg\n '2602703662', # can't see most of the lesions; noisy seg\n '2762004157', # mainly other abn and comb pattern; noisy seg\n '2969709397', # lots of other abn; mix other abn other lesions; can't see some of the lesions\n '3375944345', # no lesion\n '5925215067', # not annotated completely (very partial)\n '7414742831', # can't see the lesions; seg seem noisy\n # '7957238453', # suspicious: lesion in only one slice\n '8874887577', # mainly combined pattern; some suspicious seg\n]\n\n\n# PREPROCESSING PARAMS\nMIN_HU = -1000 # air\nMAX_HU = 100 # max for Guotai's data\nMASK_MARGIN = [5, 15, 15]\nMIN_NUM_VOXEL_PER_COMP = 100000\nLABELS = {\n 'lung': 1,\n 'lesion': 2,\n 'unsure': 3, # when we are not sure if a voxel belongs to the lesion or to healthy lung tissues\n 'background': 0,\n}\n\n\ndef get_patient_name_from_file_name(file_name):\n name = file_name.replace('_ct.nii.gz', '').replace('_seg.nii.gz', '').replace('.nii.gz', '')\n return name\n\n\ndef predict_lesion_with_model_ensemble_task171(preprocessed_ct_sitk):\n \"\"\"\n Compute the automatic segmentations using the ensemble 3d_lowres\n trained on task171 with 16GB and batch size=2\n :param preprocessed_ct:\n :return: seg proba\n \"\"\"\n def post_process_softmax_pred(softmax_np):\n seg_pred = np.argmax(softmax_np, axis=0)\n seg_lung = seg_pred > 0\n # Keep only the two largest connected components\n structure = np.ones((3, 3, 3), dtype=np.int)\n labeled, ncomp = label(seg_lung, structure)\n size_comp = [\n np.sum(labeled == l) for l in range(1, ncomp + 1)\n ]\n first_largest_comp = np.argmax(size_comp)\n label_first = first_largest_comp + 1\n size_comp[first_largest_comp] = -1\n second_largest_comp = np.argmax(size_comp)\n label_second = second_largest_comp + 1\n # To avoid cases where the two lungs are in the same component\n # and the second largest component is outside the lungs\n # we set a minimum size for the second largest component\n if size_comp[second_largest_comp] < MIN_NUM_VOXEL_PER_COMP:\n label_second = -1\n for i in range(1, ncomp + 1):\n if i not in [label_first, label_second]:\n # set to background with proba 1\n # the voxels of the foreground that are not in the\n # two main connected components of the foreground\n softmax_np[:, labeled == i] = 0.\n softmax_np[0, labeled == i] = 1.\n return softmax_np\n\n tmp_folder = 'tmp_autoseg'\n if not os.path.exists(tmp_folder):\n os.mkdir(tmp_folder)\n\n # Save the CT in a tmp folder\n tmp_folder_ct = os.path.join(tmp_folder, 'ct')\n if not os.path.exists(tmp_folder_ct):\n os.mkdir(tmp_folder_ct)\n save_img_path = os.path.join(tmp_folder_ct, 'ct_0000.nii.gz')\n sitk.WriteImage(preprocessed_ct_sitk, save_img_path)\n\n out_folder_list = []\n\n # Run the individual model predictions\n for fold in range(5):\n output_folder = os.path.join(tmp_folder, 'out_fold%d' % fold)\n out_folder_list.append(output_folder)\n if os.path.exists(output_folder):\n os.system('rm -r %s' % output_folder)\n options = '-t 171 -f %d -m 3d_lowres -tr nnUNetTrainerV2 -p nnUNetPlansv2.1_16GB --save_npz' % fold\n cmd = '%s/predict_simple.py -i %s -o %s %s' % (NNUNET_INFERENCE_FOLDER, tmp_folder_ct, output_folder, options)\n print('\\n%s\\n' % cmd)\n os.system(cmd)\n\n # Compute the mean of the softmax predictions\n output_folder_ens = os.path.join(tmp_folder, 'out_ensemble')\n cmd = '%s/ensemble_predictions.py -f %s %s %s %s %s -o %s --npz' % \\\n (NNUNET_INFERENCE_FOLDER, out_folder_list[0], out_folder_list[1], out_folder_list[2], out_folder_list[3], out_folder_list[4], output_folder_ens)\n print('\\n%s\\n' % cmd)\n os.system(cmd)\n\n # Load the softmax proba ensemble prediction\n softmax_path = os.path.join(output_folder_ens, 'ct.npz')\n softmax_cropped = np.load(softmax_path)['softmax'][None][0,...]\n pkl_path = os.path.join(output_folder_ens, 'ct.pkl')\n with open(pkl_path, 'rb') as f:\n prop = pickle.load(f)\n ori_img_shape = prop['original_size_of_raw_data']\n shape = (softmax_cropped.shape[0], ori_img_shape[0], ori_img_shape[1], ori_img_shape[2])\n softmax_full = np.zeros(shape)\n softmax_full[0, ...] = 1 # initialize to background\n crop_coord = np.array(prop['crop_bbox'])\n softmax_full[:, crop_coord[0,0]:crop_coord[0,1], crop_coord[1,0]:crop_coord[1,1], crop_coord[2,0]:crop_coord[2,1]] = softmax_cropped\n\n # Apply the post-processing\n softmax_full = post_process_softmax_pred(softmax_full)\n\n # Delete all the temporary files\n if os.path.exists(tmp_folder):\n os.system('rm -r %s' % tmp_folder)\n\n return softmax_full\n\ndef preprocess(img_path, seg_path=None, mode='challenge', crop=False):\n def mask_img(img_np, lung_mask_np, do_crop=False):\n x, y, z = np.where(lung_mask_np > 0)\n x_min = max(0, np.min(x) - MASK_MARGIN[0])\n x_max = min(img_np.shape[0], np.max(x) + MASK_MARGIN[0])\n y_min = max(0, np.min(y) - MASK_MARGIN[1])\n y_max = min(img_np.shape[1], np.max(y) + MASK_MARGIN[1])\n z_min = max(0, np.min(z) - MASK_MARGIN[2])\n z_max = min(img_np.shape[2], np.max(z) + MASK_MARGIN[2])\n if do_crop:\n img_np = img_np[x_min:x_max, y_min:y_max, z_min:z_max]\n else:\n img_np[:x_min, :, :] = 0\n img_np[x_max:, :, :] = 0\n img_np[:, :y_min, :] = 0\n img_np[:, y_max:, :] = 0\n img_np[:, :, :z_min] = 0\n img_np[:, :, z_max:] = 0\n return img_np\n\n def postprocess_auto_lung_seg(lung_seg_np):\n # Binarize the lung segmentation\n lung_seg_np[lung_seg_np > 1] = 1\n # Keep only the two largest connected components\n structure = np.ones((3, 3, 3), dtype=np.int)\n labeled, ncomp = label(lung_seg_np, structure)\n size_comp = [\n np.sum(labeled == l) for l in range(1, ncomp + 1)\n ]\n first_largest_comp = np.argmax(size_comp)\n label_first = first_largest_comp + 1\n size_comp[first_largest_comp] = -1\n second_largest_comp = np.argmax(size_comp)\n label_second = second_largest_comp + 1\n # To avoid cases where the two lungs are in the same component\n # and the second largest component is outside the lungs\n # we set a minimum size for the second largest component\n if size_comp[second_largest_comp] < MIN_NUM_VOXEL_PER_COMP:\n label_second = -1\n for i in range(1, ncomp + 1):\n if i in [label_first, label_second]:\n labeled[labeled == i] = 1\n else:\n labeled[labeled == i] = 0\n return labeled\n\n def update_labels_seg_task171(ct_sitk, seg_np, mode='normal'):\n new_seg = np.zeros_like(seg_np)\n pred_proba_seg_t171 = predict_lesion_with_model_ensemble_task171(ct_sitk)\n pred_seg_t171 = np.argmax(pred_proba_seg_t171, axis=0)\n\n # Make the initial segmentation\n new_seg[pred_seg_t171 > 0] = LABELS['lung']\n if mode == 'icovid':\n for l in [1, 2, 3, 4]:\n new_seg[seg_np == l] = LABELS['lesion'] # all lesion types together\n new_seg[seg_np == LABELS_ICOVID['other_abnormal_tissue']] = LABELS['unsure']\n else:\n new_seg[seg_np > 0] = LABELS['lesion']\n\n # Look at the voxels with disagreement between manual and auto seg\n # and mark them as 'unsure' when appropriate\n max_proba = np.max(pred_proba_seg_t171, axis=0)\n # We mark a voxel as 'unsure' iff there is a disagreement\n # and the ensemble has a maximum probability of at least 0.75\n disagreement = np.logical_and(new_seg != pred_seg_t171, max_proba >= 0.75)\n new_seg[disagreement] = LABELS['unsure']\n\n return new_seg\n\n def convert_to_sitk(img_np, ref_img_sitk):\n img_sitk = sitk.GetImageFromArray(img_np)\n img_sitk.SetOrigin(ref_img_sitk.GetOrigin())\n img_sitk.SetSpacing(ref_img_sitk.GetSpacing())\n img_sitk.SetDirection(ref_img_sitk.GetDirection())\n return img_sitk\n\n img = sitk.ReadImage(img_path)\n img_np = sitk.GetArrayFromImage(img)\n if mode == 'guotai':\n # Convert the CT intensities back to HU\n # This has to be done before inference of the lung mask\n img_np = GUOTAI_HU_MIN + (GUOTAI_HU_MAX - GUOTAI_HU_MIN) * img_np\n img = convert_to_sitk(img_np, img)\n\n # Create the lung mask\n if mode == 'icovid':\n assert seg_path is not None, 'Segmentation is required for iCovid data'\n seg = sitk.ReadImage(seg_path)\n seg_np = sitk.GetArrayFromImage(seg)\n lung_mask_np = np.zeros_like(seg_np)\n lung_mask_np[seg_np > 0] = 1\n else:\n lung_mask_np = mask.apply(img)\n # binarize the mask and keep only the two largest connected components\n lung_mask_np = postprocess_auto_lung_seg(lung_mask_np)\n\n # Clip the HU intensity\n img_np[img_np < MIN_HU] = MIN_HU\n img_np[img_np > MAX_HU] = MAX_HU\n\n # Mask the image outside a box containing the lung\n img_np = mask_img(img_np, lung_mask_np, do_crop=crop)\n\n # Convert back to SITK image\n img_pre = convert_to_sitk(img_np, img)\n\n # Seg pre-processing (if available)\n if seg_path is not None:\n seg = sitk.ReadImage(seg_path)\n seg_np = sitk.GetArrayFromImage(seg)\n if crop:\n seg_np = mask_img(seg_np, lung_mask_np, do_crop=crop)\n # Add lung and unsure as extra labels for the segmentation\n seg_np = update_labels_seg_task171(img_pre, seg_np, mode=mode)\n if mode == 'guotai':\n seg_pre = convert_to_sitk(seg_np, img) # need to use img header for Guotai's data\n else:\n seg_pre = convert_to_sitk(seg_np, seg)\n else:\n seg_pre = None\n\n return img_pre, seg_pre\n\n\nif __name__ == '__main__':\n task_id = 172\n task_name = \"CovidSegChallengeAutoCorrect\"\n\n foldername = \"Task%d_%s\" % (task_id, task_name)\n\n out_base = join(nnUNet_raw_data, foldername)\n imagestr = join(out_base, \"imagesTr\")\n imagesval = join(out_base, \"imagesVal\")\n labelstr = join(out_base, \"labelsTr\")\n maybe_mkdir_p(imagestr)\n maybe_mkdir_p(imagesval)\n maybe_mkdir_p(labelstr)\n\n train_patient_names = []\n valid_patient_names = []\n\n # Training data (Challenge data)\n for f_n in os.listdir(TRAIN_DATA_FOLDER):\n patient_name = get_patient_name_from_file_name(f_n)\n if patient_name in train_patient_names:\n continue\n print('\\nPreprocces', patient_name)\n train_patient_names.append(patient_name)\n img = join(TRAIN_DATA_FOLDER, '%s_ct.nii.gz' % patient_name)\n seg = join(TRAIN_DATA_FOLDER, '%s_seg.nii.gz' % patient_name)\n assert all([\n isfile(img),\n isfile(seg),\n ]), '%s: some files were not found' % patient_name\n\n save_img = join(imagestr, patient_name + \"_0000.nii.gz\")\n save_seg = join(labelstr, patient_name + \".nii.gz\")\n if os.path.exists(save_img) and os.path.exists(save_seg):\n print('%s already reprocessed' % patient_name)\n print('pass\\n')\n else:\n img_pre, seg_pre = preprocess(img, seg, crop=True)\n sitk.WriteImage(img_pre, save_img)\n sitk.WriteImage(seg_pre, save_seg)\n print('Found %d training cases in %s' % (len(train_patient_names), TRAIN_DATA_FOLDER))\n\n # Jun dataset\n jun_patient_names = []\n for f_n in os.listdir(JUN_DATASET_CT_FOLDER):\n if not 'coronacases' in f_n: # remove data with low quality\n continue\n patient_name = get_patient_name_from_file_name(f_n)\n print('Preprocces', patient_name)\n if patient_name in train_patient_names:\n continue\n jun_patient_names.append(patient_name)\n img = join(JUN_DATASET_CT_FOLDER, '%s.nii.gz' % patient_name)\n seg = join(JUN_DATASET_LESIONS_FOLDER, '%s.nii.gz' % patient_name)\n assert all([\n isfile(img),\n isfile(seg),\n ]), '%s: some files were not found' % patient_name\n\n save_img = join(imagestr, patient_name + \"_0000.nii.gz\")\n save_seg = join(labelstr, patient_name + \".nii.gz\")\n if os.path.exists(save_img) and os.path.exists(save_seg):\n print('%s already reprocessed' % patient_name)\n print('pass\\n')\n else:\n img_pre, seg_pre = preprocess(img, seg, mode='jun', crop=True)\n sitk.WriteImage(img_pre, save_img)\n sitk.WriteImage(seg_pre, save_seg)\n train_patient_names += jun_patient_names\n print('Found %d training cases in %s' % (len(jun_patient_names), JUN_DATASET_CT_FOLDER))\n\n # Guotai data (expert)\n guotai_pat_names = []\n img_folder = os.path.join(GUOTAI_DATASET_PART2, 'image')\n seg_folder = os.path.join(GUOTAI_DATASET_PART2, 'label')\n for f_n in os.listdir(img_folder):\n patient_name = get_patient_name_from_file_name(f_n) + '_part2'\n if patient_name in train_patient_names:\n continue\n print('Preprocces', patient_name)\n guotai_pat_names.append(patient_name)\n img = join(img_folder, f_n)\n seg = join(seg_folder, f_n)\n assert all([\n isfile(img),\n isfile(seg),\n ]), '%s: some files were not found' % patient_name\n\n save_img = join(imagestr, patient_name + \"_0000.nii.gz\")\n save_seg = join(labelstr, patient_name + \".nii.gz\")\n if os.path.exists(save_img) and os.path.exists(save_seg):\n print('%s already reprocessed' % patient_name)\n print('pass\\n')\n else:\n img_pre, seg_pre = preprocess(img, seg, mode='guotai', crop=True)\n sitk.WriteImage(img_pre, save_img)\n sitk.WriteImage(seg_pre, save_seg)\n train_patient_names += guotai_pat_names\n print('Found %d training cases in %s' % (len(guotai_pat_names), GUOTAI_DATASET_PART2))\n\n # Guotai data (non-expert)\n guotai_pat_names = []\n img_folder = os.path.join(GUOTAI_DATASET_PART1, 'image')\n seg_folder = os.path.join(GUOTAI_DATASET_PART1, 'label')\n for f_n in os.listdir(img_folder):\n patient_name = get_patient_name_from_file_name(f_n) + '_part1'\n if patient_name in train_patient_names:\n continue\n print('Preprocces', patient_name)\n guotai_pat_names.append(patient_name)\n img = join(img_folder, f_n)\n seg = join(seg_folder, f_n)\n assert all([\n isfile(img),\n isfile(seg),\n ]), '%s: some files were not found' % patient_name\n\n save_img = join(imagestr, patient_name + \"_0000.nii.gz\")\n save_seg = join(labelstr, patient_name + \".nii.gz\")\n if os.path.exists(save_img) and os.path.exists(save_seg):\n print('%s already reprocessed' % patient_name)\n print('pass\\n')\n else:\n img_pre, seg_pre = preprocess(img, seg, mode='guotai', crop=True)\n sitk.WriteImage(img_pre, save_img)\n sitk.WriteImage(seg_pre, save_seg)\n train_patient_names += guotai_pat_names\n print('Found %d training cases in %s' % (len(guotai_pat_names), GUOTAI_DATASET_PART1))\n\n # iCovid data\n icovid_patient_names = []\n for f_n in os.listdir(ICOVID_DATASET_FOLDER):\n patient_name = f_n\n if patient_name in PATIENT_ID_TO_EXCLUDE:\n print(patient_name, 'excluded')\n continue\n print('Preprocces', patient_name)\n icovid_patient_names.append(patient_name)\n img = join(ICOVID_DATASET_FOLDER, patient_name, 'ct.nii.gz')\n seg = join(ICOVID_DATASET_FOLDER, patient_name, 'lesions_seg.nii.gz')\n assert all([\n isfile(img),\n isfile(seg),\n ]), '%s: some files were not found' % patient_name\n\n save_img = join(imagestr, patient_name + \"_0000.nii.gz\")\n save_seg = join(labelstr, patient_name + \".nii.gz\")\n if os.path.exists(save_img) and os.path.exists(save_seg):\n print('%s already reprocessed' % patient_name)\n print('pass\\n')\n else:\n img_pre, seg_pre = preprocess(img, seg, mode='icovid', crop=True)\n sitk.WriteImage(img_pre, save_img)\n sitk.WriteImage(seg_pre, save_seg)\n train_patient_names += icovid_patient_names\n print('Found %d training cases in %s' % (len(icovid_patient_names), ICOVID_DATASET_FOLDER))\n\n print('')\n print('A total of %s training cases were found' % len(train_patient_names))\n print('')\n\n # Validation data\n for f_n in os.listdir(VALID_DATA_FOLDER):\n patient_name = get_patient_name_from_file_name(f_n)\n if patient_name in valid_patient_names:\n continue\n valid_patient_names.append(patient_name)\n img = join(VALID_DATA_FOLDER, '%s_ct.nii.gz' % patient_name)\n assert isfile(img), '%s: CT file was not found' % patient_name\n\n save_img = join(imagesval, patient_name + \"_0000.nii.gz\")\n if os.path.exists(save_img):\n print('%s already reprocessed' % patient_name)\n print('pass\\n')\n else:\n img_pre, _ = preprocess(img)\n sitk.WriteImage(img_pre, save_img)\n print('Found %d validation cases' % len(valid_patient_names))\n\n # Dataset json file\n json_dict = OrderedDict()\n json_dict['name'] = task_name\n json_dict['description'] = \"nothing\"\n json_dict['tensorImageSize'] = \"4D\"\n json_dict['reference'] = \"no reference\"\n json_dict['licence'] = \"no license\"\n json_dict['release'] = \"0.0\"\n json_dict['modality'] = {\n \"0\": \"CT\",\n }\n json_dict['labels'] = {\n \"0\": \"background\",\n \"1\": \"lung\",\n \"2\": \"lesion\",\n \"3\": \"unsure\",\n }\n json_dict['numTraining'] = len(train_patient_names)\n json_dict['numTest'] = 0\n json_dict['training'] = [{\n 'image': \"./imagesTr/%s.nii.gz\" % i,\n \"label\": \"./labelsTr/%s.nii.gz\" % i}\n for i in train_patient_names]\n json_dict['test'] = []\n save_json(json_dict, join(out_base, \"dataset.json\"))", "# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom copy import deepcopy\nfrom nnunet.utilities.nd_softmax import softmax_helper\nfrom torch import nn\nimport torch\nimport numpy as np\nfrom nnunet.network_architecture.initialization import InitWeights_He\nfrom nnunet.network_architecture.neural_network import SegmentationNetwork\nimport torch.nn.functional\n\n\nclass ConvDropoutNormNonlin(nn.Module):\n \"\"\"\n fixes a bug in ConvDropoutNormNonlin where lrelu was used regardless of nonlin. Bad.\n \"\"\"\n\n def __init__(self, input_channels, output_channels,\n conv_op=nn.Conv2d, conv_kwargs=None,\n norm_op=nn.BatchNorm2d, norm_op_kwargs=None,\n dropout_op=nn.Dropout2d, dropout_op_kwargs=None,\n nonlin=nn.LeakyReLU, nonlin_kwargs=None):\n super(ConvDropoutNormNonlin, self).__init__()\n if nonlin_kwargs is None:\n nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}\n if dropout_op_kwargs is None:\n dropout_op_kwargs = {'p': 0.5, 'inplace': True}\n if norm_op_kwargs is None:\n norm_op_kwargs = {'eps': 1e-5, 'affine': True, 'momentum': 0.1}\n if conv_kwargs is None:\n conv_kwargs = {'kernel_size': 3, 'stride': 1, 'padding': 1, 'dilation': 1, 'bias': True}\n\n self.nonlin_kwargs = nonlin_kwargs\n self.nonlin = nonlin\n self.dropout_op = dropout_op\n self.dropout_op_kwargs = dropout_op_kwargs\n self.norm_op_kwargs = norm_op_kwargs\n self.conv_kwargs = conv_kwargs\n self.conv_op = conv_op\n self.norm_op = norm_op\n\n self.conv = self.conv_op(input_channels, output_channels, **self.conv_kwargs)\n if self.dropout_op is not None and self.dropout_op_kwargs['p'] is not None and self.dropout_op_kwargs[\n 'p'] > 0:\n self.dropout = self.dropout_op(**self.dropout_op_kwargs)\n else:\n self.dropout = None\n self.instnorm = self.norm_op(output_channels, **self.norm_op_kwargs)\n self.lrelu = self.nonlin(**self.nonlin_kwargs)\n\n def forward(self, x):\n x = self.conv(x)\n if self.dropout is not None:\n x = self.dropout(x)\n return self.lrelu(self.instnorm(x))\n\n\nclass ConvDropoutNonlinNorm(ConvDropoutNormNonlin):\n def forward(self, x):\n x = self.conv(x)\n if self.dropout is not None:\n x = self.dropout(x)\n return self.instnorm(self.lrelu(x))\n\n\nclass StackedConvLayers(nn.Module):\n def __init__(self, input_feature_channels, output_feature_channels, num_convs,\n conv_op=nn.Conv2d, conv_kwargs=None,\n norm_op=nn.BatchNorm2d, norm_op_kwargs=None,\n dropout_op=nn.Dropout2d, dropout_op_kwargs=None,\n nonlin=nn.LeakyReLU, nonlin_kwargs=None, first_stride=None, basic_block=ConvDropoutNormNonlin):\n '''\n stacks ConvDropoutNormLReLU layers. initial_stride will only be applied to first layer in the stack. The other parameters affect all layers\n :param input_feature_channels:\n :param output_feature_channels:\n :param num_convs:\n :param dilation:\n :param kernel_size:\n :param padding:\n :param dropout:\n :param initial_stride:\n :param conv_op:\n :param norm_op:\n :param dropout_op:\n :param inplace:\n :param neg_slope:\n :param norm_affine:\n :param conv_bias:\n '''\n self.input_channels = input_feature_channels\n self.output_channels = output_feature_channels\n\n if nonlin_kwargs is None:\n nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}\n if dropout_op_kwargs is None:\n dropout_op_kwargs = {'p': 0.5, 'inplace': True}\n if norm_op_kwargs is None:\n norm_op_kwargs = {'eps': 1e-5, 'affine': True, 'momentum': 0.1}\n if conv_kwargs is None:\n conv_kwargs = {'kernel_size': 3, 'stride': 1, 'padding': 1, 'dilation': 1, 'bias': True}\n\n self.nonlin_kwargs = nonlin_kwargs\n self.nonlin = nonlin\n self.dropout_op = dropout_op\n self.dropout_op_kwargs = dropout_op_kwargs\n self.norm_op_kwargs = norm_op_kwargs\n self.conv_kwargs = conv_kwargs\n self.conv_op = conv_op\n self.norm_op = norm_op\n\n if first_stride is not None:\n self.conv_kwargs_first_conv = deepcopy(conv_kwargs)\n self.conv_kwargs_first_conv['stride'] = first_stride\n else:\n self.conv_kwargs_first_conv = conv_kwargs\n\n super(StackedConvLayers, self).__init__()\n self.blocks = nn.Sequential(\n *([basic_block(input_feature_channels, output_feature_channels, self.conv_op,\n self.conv_kwargs_first_conv,\n self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs,\n self.nonlin, self.nonlin_kwargs)] +\n [basic_block(output_feature_channels, output_feature_channels, self.conv_op,\n self.conv_kwargs,\n self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs,\n self.nonlin, self.nonlin_kwargs) for _ in range(num_convs - 1)]))\n\n def forward(self, x):\n return self.blocks(x)\n\n\ndef print_module_training_status(module):\n if isinstance(module, nn.Conv2d) or isinstance(module, nn.Conv3d) or isinstance(module, nn.Dropout3d) or \\\n isinstance(module, nn.Dropout2d) or isinstance(module, nn.Dropout) or isinstance(module, nn.InstanceNorm3d) \\\n or isinstance(module, nn.InstanceNorm2d) or isinstance(module, nn.InstanceNorm1d) \\\n or isinstance(module, nn.BatchNorm2d) or isinstance(module, nn.BatchNorm3d) or isinstance(module,\n nn.BatchNorm1d):\n print(str(module), module.training)\n\n\nclass Upsample(nn.Module):\n def __init__(self, size=None, scale_factor=None, mode='nearest', align_corners=False):\n super(Upsample, self).__init__()\n self.align_corners = align_corners\n self.mode = mode\n self.scale_factor = scale_factor\n self.size = size\n\n def forward(self, x):\n return nn.functional.interpolate(x, size=self.size, scale_factor=self.scale_factor, mode=self.mode,\n align_corners=self.align_corners)\n\n\nclass Generic_UNet(SegmentationNetwork):\n DEFAULT_BATCH_SIZE_3D = 2\n DEFAULT_PATCH_SIZE_3D = (64, 192, 160)\n SPACING_FACTOR_BETWEEN_STAGES = 2\n BASE_NUM_FEATURES_3D = 30\n MAX_NUMPOOL_3D = 999\n MAX_NUM_FILTERS_3D = 320\n\n DEFAULT_PATCH_SIZE_2D = (256, 256)\n BASE_NUM_FEATURES_2D = 30\n DEFAULT_BATCH_SIZE_2D = 50\n MAX_NUMPOOL_2D = 999\n MAX_FILTERS_2D = 480\n\n use_this_for_batch_size_computation_2D = 19739648\n use_this_for_batch_size_computation_3D = 520000000 # 505789440\n\n def __init__(self, input_channels, base_num_features, num_classes, num_pool, num_conv_per_stage=2,\n feat_map_mul_on_downscale=2, conv_op=nn.Conv2d,\n norm_op=nn.BatchNorm2d, norm_op_kwargs=None,\n dropout_op=nn.Dropout2d, dropout_op_kwargs=None,\n nonlin=nn.LeakyReLU, nonlin_kwargs=None, deep_supervision=True, dropout_in_localization=False,\n final_nonlin=softmax_helper, weightInitializer=InitWeights_He(1e-2), pool_op_kernel_sizes=None,\n conv_kernel_sizes=None,\n upscale_logits=False, convolutional_pooling=False, convolutional_upsampling=False,\n max_num_features=None, basic_block=ConvDropoutNormNonlin,\n seg_output_use_bias=False):\n \"\"\"\n basically more flexible than v1, architecture is the same\n\n Does this look complicated? Nah bro. Functionality > usability\n\n This does everything you need, including world peace.\n\n Questions? -> [email protected]\n \"\"\"\n super(Generic_UNet, self).__init__()\n self.convolutional_upsampling = convolutional_upsampling\n self.convolutional_pooling = convolutional_pooling\n self.upscale_logits = upscale_logits\n if nonlin_kwargs is None:\n nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}\n if dropout_op_kwargs is None:\n dropout_op_kwargs = {'p': 0.5, 'inplace': True}\n if norm_op_kwargs is None:\n norm_op_kwargs = {'eps': 1e-5, 'affine': True, 'momentum': 0.1}\n\n self.conv_kwargs = {'stride': 1, 'dilation': 1, 'bias': True}\n\n self.nonlin = nonlin\n self.nonlin_kwargs = nonlin_kwargs\n self.dropout_op_kwargs = dropout_op_kwargs\n self.norm_op_kwargs = norm_op_kwargs\n self.weightInitializer = weightInitializer\n self.conv_op = conv_op\n self.norm_op = norm_op\n self.dropout_op = dropout_op\n self.num_classes = num_classes\n self.final_nonlin = final_nonlin\n self._deep_supervision = deep_supervision\n self.do_ds = deep_supervision\n\n if conv_op == nn.Conv2d:\n upsample_mode = 'bilinear'\n pool_op = nn.MaxPool2d\n transpconv = nn.ConvTranspose2d\n if pool_op_kernel_sizes is None:\n pool_op_kernel_sizes = [(2, 2)] * num_pool\n if conv_kernel_sizes is None:\n conv_kernel_sizes = [(3, 3)] * (num_pool + 1)\n elif conv_op == nn.Conv3d:\n upsample_mode = 'trilinear'\n pool_op = nn.MaxPool3d\n transpconv = nn.ConvTranspose3d\n if pool_op_kernel_sizes is None:\n pool_op_kernel_sizes = [(2, 2, 2)] * num_pool\n if conv_kernel_sizes is None:\n conv_kernel_sizes = [(3, 3, 3)] * (num_pool + 1)\n else:\n raise ValueError(\"unknown convolution dimensionality, conv op: %s\" % str(conv_op))\n\n self.input_shape_must_be_divisible_by = np.prod(pool_op_kernel_sizes, 0, dtype=np.int64)\n self.pool_op_kernel_sizes = pool_op_kernel_sizes\n self.conv_kernel_sizes = conv_kernel_sizes\n\n self.conv_pad_sizes = []\n for krnl in self.conv_kernel_sizes:\n self.conv_pad_sizes.append([1 if i == 3 else 0 for i in krnl])\n\n if max_num_features is None:\n if self.conv_op == nn.Conv3d:\n self.max_num_features = self.MAX_NUM_FILTERS_3D\n else:\n self.max_num_features = self.MAX_FILTERS_2D\n else:\n self.max_num_features = max_num_features\n\n self.conv_blocks_context = []\n self.conv_blocks_localization = []\n self.td = []\n self.tu = []\n self.seg_outputs = []\n\n output_features = base_num_features\n input_features = input_channels\n\n for d in range(num_pool):\n # determine the first stride\n if d != 0 and self.convolutional_pooling:\n first_stride = pool_op_kernel_sizes[d - 1]\n else:\n first_stride = None\n\n self.conv_kwargs['kernel_size'] = self.conv_kernel_sizes[d]\n self.conv_kwargs['padding'] = self.conv_pad_sizes[d]\n # add convolutions\n self.conv_blocks_context.append(StackedConvLayers(input_features, output_features, num_conv_per_stage,\n self.conv_op, self.conv_kwargs, self.norm_op,\n self.norm_op_kwargs, self.dropout_op,\n self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs,\n first_stride, basic_block=basic_block))\n if not self.convolutional_pooling:\n self.td.append(pool_op(pool_op_kernel_sizes[d]))\n input_features = output_features\n output_features = int(np.round(output_features * feat_map_mul_on_downscale))\n\n output_features = min(output_features, self.max_num_features)\n\n # now the bottleneck.\n # determine the first stride\n if self.convolutional_pooling:\n first_stride = pool_op_kernel_sizes[-1]\n else:\n first_stride = None\n\n # the output of the last conv must match the number of features from the skip connection if we are not using\n # convolutional upsampling. If we use convolutional upsampling then the reduction in feature maps will be\n # done by the transposed conv\n if self.convolutional_upsampling:\n final_num_features = output_features\n else:\n final_num_features = self.conv_blocks_context[-1].output_channels\n\n self.conv_kwargs['kernel_size'] = self.conv_kernel_sizes[num_pool]\n self.conv_kwargs['padding'] = self.conv_pad_sizes[num_pool]\n self.conv_blocks_context.append(nn.Sequential(\n StackedConvLayers(input_features, output_features, num_conv_per_stage - 1, self.conv_op, self.conv_kwargs,\n self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin,\n self.nonlin_kwargs, first_stride, basic_block=basic_block),\n StackedConvLayers(output_features, final_num_features, 1, self.conv_op, self.conv_kwargs,\n self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin,\n self.nonlin_kwargs, basic_block=basic_block)))\n\n # if we don't want to do dropout in the localization pathway then we set the dropout prob to zero here\n if not dropout_in_localization:\n old_dropout_p = self.dropout_op_kwargs['p']\n self.dropout_op_kwargs['p'] = 0.0\n\n # now lets build the localization pathway\n for u in range(num_pool):\n nfeatures_from_down = final_num_features\n nfeatures_from_skip = self.conv_blocks_context[\n -(2 + u)].output_channels # self.conv_blocks_context[-1] is bottleneck, so start with -2\n n_features_after_tu_and_concat = nfeatures_from_skip * 2\n\n # the first conv reduces the number of features to match those of skip\n # the following convs work on that number of features\n # if not convolutional upsampling then the final conv reduces the num of features again\n if u != num_pool - 1 and not self.convolutional_upsampling:\n final_num_features = self.conv_blocks_context[-(3 + u)].output_channels\n else:\n final_num_features = nfeatures_from_skip\n\n if not self.convolutional_upsampling:\n self.tu.append(Upsample(scale_factor=pool_op_kernel_sizes[-(u + 1)], mode=upsample_mode))\n else:\n self.tu.append(transpconv(nfeatures_from_down, nfeatures_from_skip, pool_op_kernel_sizes[-(u + 1)],\n pool_op_kernel_sizes[-(u + 1)], bias=False))\n\n self.conv_kwargs['kernel_size'] = self.conv_kernel_sizes[- (u + 1)]\n self.conv_kwargs['padding'] = self.conv_pad_sizes[- (u + 1)]\n self.conv_blocks_localization.append(nn.Sequential(\n StackedConvLayers(n_features_after_tu_and_concat, nfeatures_from_skip, num_conv_per_stage - 1,\n self.conv_op, self.conv_kwargs, self.norm_op, self.norm_op_kwargs, self.dropout_op,\n self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs, basic_block=basic_block),\n StackedConvLayers(nfeatures_from_skip, final_num_features, 1, self.conv_op, self.conv_kwargs,\n self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs,\n self.nonlin, self.nonlin_kwargs, basic_block=basic_block)\n ))\n\n for ds in range(len(self.conv_blocks_localization)):\n # Last convolution operation(s) to obtain the labels score maps before softmax/argmax\n self.seg_outputs.append(conv_op(self.conv_blocks_localization[ds][-1].output_channels, num_classes,\n 1, 1, 0, 1, 1, seg_output_use_bias))\n\n self.upscale_logits_ops = []\n cum_upsample = np.cumprod(np.vstack(pool_op_kernel_sizes), axis=0)[::-1]\n for usl in range(num_pool - 1):\n if self.upscale_logits:\n self.upscale_logits_ops.append(Upsample(scale_factor=tuple([int(i) for i in cum_upsample[usl + 1]]),\n mode=upsample_mode))\n else:\n self.upscale_logits_ops.append(lambda x: x)\n\n if not dropout_in_localization:\n self.dropout_op_kwargs['p'] = old_dropout_p\n\n # register all modules properly\n self.conv_blocks_localization = nn.ModuleList(self.conv_blocks_localization)\n self.conv_blocks_context = nn.ModuleList(self.conv_blocks_context)\n self.td = nn.ModuleList(self.td)\n self.tu = nn.ModuleList(self.tu)\n self.seg_outputs = nn.ModuleList(self.seg_outputs)\n if self.upscale_logits:\n self.upscale_logits_ops = nn.ModuleList(\n self.upscale_logits_ops) # lambda x:x is not a Module so we need to distinguish here\n\n if self.weightInitializer is not None:\n self.apply(self.weightInitializer)\n # self.apply(print_module_training_status)\n\n def forward(self, x):\n skips = []\n seg_outputs = []\n # Encoder\n for d in range(len(self.conv_blocks_context) - 1):\n x = self.conv_blocks_context[d](x)\n skips.append(x)\n if not self.convolutional_pooling:\n x = self.td[d](x)\n\n x = self.conv_blocks_context[-1](x)\n # Decoder\n for u in range(len(self.tu)):\n x = self.tu[u](x)\n x = torch.cat((x, skips[-(u + 1)]), dim=1)\n x = self.conv_blocks_localization[u](x)\n seg_outputs.append(self.final_nonlin(self.seg_outputs[u](x)))\n\n if self._deep_supervision and self.do_ds:\n return tuple([seg_outputs[-1]] + [i(j) for i, j in\n zip(list(self.upscale_logits_ops)[::-1], seg_outputs[:-1][::-1])])\n else:\n return seg_outputs[-1]\n\n def embedding(self, x):\n # return the last embedding map (before last conv + softmax)\n skips = []\n # Encoder\n for d in range(len(self.conv_blocks_context) - 1):\n x = self.conv_blocks_context[d](x)\n skips.append(x)\n if not self.convolutional_pooling:\n x = self.td[d](x)\n x = self.conv_blocks_context[-1](x)\n # Decoder\n for u in range(len(self.tu)):\n x = self.tu[u](x)\n x = torch.cat((x, skips[-(u + 1)]), dim=1)\n x = self.conv_blocks_localization[u](x)\n return x\n\n @staticmethod\n def compute_approx_vram_consumption(patch_size, num_pool_per_axis, base_num_features, max_num_features,\n num_modalities, num_classes, pool_op_kernel_sizes, deep_supervision=False,\n conv_per_stage=2):\n \"\"\"\n This only applies for num_conv_per_stage and convolutional_upsampling=True\n not real vram consumption. just a constant term to which the vram consumption will be approx proportional\n (+ offset for parameter storage)\n :param deep_supervision:\n :param patch_size:\n :param num_pool_per_axis:\n :param base_num_features:\n :param max_num_features:\n :param num_modalities:\n :param num_classes:\n :param pool_op_kernel_sizes:\n :return:\n \"\"\"\n if not isinstance(num_pool_per_axis, np.ndarray):\n num_pool_per_axis = np.array(num_pool_per_axis)\n\n npool = len(pool_op_kernel_sizes)\n\n map_size = np.array(patch_size)\n tmp = np.int64((conv_per_stage * 2 + 1) * np.prod(map_size, dtype=np.int64) * base_num_features +\n num_modalities * np.prod(map_size, dtype=np.int64) +\n num_classes * np.prod(map_size, dtype=np.int64))\n\n num_feat = base_num_features\n\n for p in range(npool):\n for pi in range(len(num_pool_per_axis)):\n map_size[pi] /= pool_op_kernel_sizes[p][pi]\n num_feat = min(num_feat * 2, max_num_features)\n num_blocks = (conv_per_stage * 2 + 1) if p < (npool - 1) else conv_per_stage # conv_per_stage + conv_per_stage for the convs of encode/decode and 1 for transposed conv\n tmp += num_blocks * np.prod(map_size, dtype=np.int64) * num_feat\n if deep_supervision and p < (npool - 2):\n tmp += np.prod(map_size, dtype=np.int64) * num_classes\n # print(p, map_size, num_feat, tmp)\n return tmp\n" ]
[ [ "numpy.max", "numpy.array", "numpy.zeros_like", "numpy.zeros", "numpy.sum", "numpy.ones", "numpy.load", "numpy.min", "numpy.logical_and", "scipy.ndimage.measurements.label", "numpy.where", "numpy.argmax" ], [ "numpy.array", "torch.cat", "torch.nn.ModuleList", "numpy.round", "torch.nn.functional.interpolate", "numpy.prod", "numpy.vstack" ] ]
SudoHead/cs231n.github.io
[ "652285518ff5ed8c02503bac6cb24aaea0d6ff75" ]
[ "assignments/2019/assignment1/cs231n/data_utils.py" ]
[ "from __future__ import print_function\n\nfrom builtins import range\nfrom six.moves import cPickle as pickle\nimport numpy as np\nimport os\n\n# scipy.misc.imread is deprecated, so use imageio.imread\nfrom scipy.misc import imread\nimport platform\n\ndef load_pickle(f):\n version = platform.python_version_tuple()\n if version[0] == '2':\n return pickle.load(f)\n elif version[0] == '3':\n return pickle.load(f, encoding='latin1')\n raise ValueError(\"invalid python version: {}\".format(version))\n\ndef load_CIFAR_batch(filename):\n \"\"\" load single batch of cifar \"\"\"\n with open(filename, 'rb') as f:\n datadict = load_pickle(f)\n X = datadict['data']\n Y = datadict['labels']\n X = X.reshape(10000, 3, 32, 32).transpose(0,2,3,1).astype(\"float\")\n Y = np.array(Y)\n return X, Y\n\ndef load_CIFAR10(ROOT):\n \"\"\" load all of cifar \"\"\"\n xs = []\n ys = []\n for b in range(1,6):\n f = os.path.join(ROOT, 'data_batch_%d' % (b, ))\n X, Y = load_CIFAR_batch(f)\n xs.append(X)\n ys.append(Y)\n Xtr = np.concatenate(xs)\n Ytr = np.concatenate(ys)\n del X, Y\n Xte, Yte = load_CIFAR_batch(os.path.join(ROOT, 'test_batch'))\n return Xtr, Ytr, Xte, Yte\n\n\ndef get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000,\n subtract_mean=True):\n \"\"\"\n Load the CIFAR-10 dataset from disk and perform preprocessing to prepare\n it for classifiers. These are the same steps as we used for the SVM, but\n condensed to a single function.\n \"\"\"\n # Load the raw CIFAR-10 data\n cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'\n X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)\n\n # Subsample the data\n mask = list(range(num_training, num_training + num_validation))\n X_val = X_train[mask]\n y_val = y_train[mask]\n mask = list(range(num_training))\n X_train = X_train[mask]\n y_train = y_train[mask]\n mask = list(range(num_test))\n X_test = X_test[mask]\n y_test = y_test[mask]\n\n # Normalize the data: subtract the mean image\n if subtract_mean:\n mean_image = np.mean(X_train, axis=0)\n X_train -= mean_image\n X_val -= mean_image\n X_test -= mean_image\n\n # Transpose so that channels come first\n X_train = X_train.transpose(0, 3, 1, 2).copy()\n X_val = X_val.transpose(0, 3, 1, 2).copy()\n X_test = X_test.transpose(0, 3, 1, 2).copy()\n\n # Package data into a dictionary\n return {\n 'X_train': X_train, 'y_train': y_train,\n 'X_val': X_val, 'y_val': y_val,\n 'X_test': X_test, 'y_test': y_test,\n }\n\n\ndef load_tiny_imagenet(path, dtype=np.float32, subtract_mean=True):\n \"\"\"\n Load TinyImageNet. Each of TinyImageNet-100-A, TinyImageNet-100-B, and\n TinyImageNet-200 have the same directory structure, so this can be used\n to load any of them.\n\n Inputs:\n - path: String giving path to the directory to load.\n - dtype: numpy datatype used to load the data.\n - subtract_mean: Whether to subtract the mean training image.\n\n Returns: A dictionary with the following entries:\n - class_names: A list where class_names[i] is a list of strings giving the\n WordNet names for class i in the loaded dataset.\n - X_train: (N_tr, 3, 64, 64) array of training images\n - y_train: (N_tr,) array of training labels\n - X_val: (N_val, 3, 64, 64) array of validation images\n - y_val: (N_val,) array of validation labels\n - X_test: (N_test, 3, 64, 64) array of testing images.\n - y_test: (N_test,) array of test labels; if test labels are not available\n (such as in student code) then y_test will be None.\n - mean_image: (3, 64, 64) array giving mean training image\n \"\"\"\n # First load wnids\n with open(os.path.join(path, 'wnids.txt'), 'r') as f:\n wnids = [x.strip() for x in f]\n\n # Map wnids to integer labels\n wnid_to_label = {wnid: i for i, wnid in enumerate(wnids)}\n\n # Use words.txt to get names for each class\n with open(os.path.join(path, 'words.txt'), 'r') as f:\n wnid_to_words = dict(line.split('\\t') for line in f)\n for wnid, words in wnid_to_words.items():\n wnid_to_words[wnid] = [w.strip() for w in words.split(',')]\n class_names = [wnid_to_words[wnid] for wnid in wnids]\n\n # Next load training data.\n X_train = []\n y_train = []\n for i, wnid in enumerate(wnids):\n if (i + 1) % 20 == 0:\n print('loading training data for synset %d / %d'\n % (i + 1, len(wnids)))\n # To figure out the filenames we need to open the boxes file\n boxes_file = os.path.join(path, 'train', wnid, '%s_boxes.txt' % wnid)\n with open(boxes_file, 'r') as f:\n filenames = [x.split('\\t')[0] for x in f]\n num_images = len(filenames)\n\n X_train_block = np.zeros((num_images, 3, 64, 64), dtype=dtype)\n y_train_block = wnid_to_label[wnid] * \\\n np.ones(num_images, dtype=np.int64)\n for j, img_file in enumerate(filenames):\n img_file = os.path.join(path, 'train', wnid, 'images', img_file)\n img = imread(img_file)\n if img.ndim == 2:\n ## grayscale file\n img.shape = (64, 64, 1)\n X_train_block[j] = img.transpose(2, 0, 1)\n X_train.append(X_train_block)\n y_train.append(y_train_block)\n\n # We need to concatenate all training data\n X_train = np.concatenate(X_train, axis=0)\n y_train = np.concatenate(y_train, axis=0)\n\n # Next load validation data\n with open(os.path.join(path, 'val', 'val_annotations.txt'), 'r') as f:\n img_files = []\n val_wnids = []\n for line in f:\n img_file, wnid = line.split('\\t')[:2]\n img_files.append(img_file)\n val_wnids.append(wnid)\n num_val = len(img_files)\n y_val = np.array([wnid_to_label[wnid] for wnid in val_wnids])\n X_val = np.zeros((num_val, 3, 64, 64), dtype=dtype)\n for i, img_file in enumerate(img_files):\n img_file = os.path.join(path, 'val', 'images', img_file)\n img = imread(img_file)\n if img.ndim == 2:\n img.shape = (64, 64, 1)\n X_val[i] = img.transpose(2, 0, 1)\n\n # Next load test images\n # Students won't have test labels, so we need to iterate over files in the\n # images directory.\n img_files = os.listdir(os.path.join(path, 'test', 'images'))\n X_test = np.zeros((len(img_files), 3, 64, 64), dtype=dtype)\n for i, img_file in enumerate(img_files):\n img_file = os.path.join(path, 'test', 'images', img_file)\n img = imread(img_file)\n if img.ndim == 2:\n img.shape = (64, 64, 1)\n X_test[i] = img.transpose(2, 0, 1)\n\n y_test = None\n y_test_file = os.path.join(path, 'test', 'test_annotations.txt')\n if os.path.isfile(y_test_file):\n with open(y_test_file, 'r') as f:\n img_file_to_wnid = {}\n for line in f:\n line = line.split('\\t')\n img_file_to_wnid[line[0]] = line[1]\n y_test = [wnid_to_label[img_file_to_wnid[img_file]]\n for img_file in img_files]\n y_test = np.array(y_test)\n\n mean_image = X_train.mean(axis=0)\n if subtract_mean:\n X_train -= mean_image[None]\n X_val -= mean_image[None]\n X_test -= mean_image[None]\n\n return {\n 'class_names': class_names,\n 'X_train': X_train,\n 'y_train': y_train,\n 'X_val': X_val,\n 'y_val': y_val,\n 'X_test': X_test,\n 'y_test': y_test,\n 'class_names': class_names,\n 'mean_image': mean_image,\n }\n\n\ndef load_models(models_dir):\n \"\"\"\n Load saved models from disk. This will attempt to unpickle all files in a\n directory; any files that give errors on unpickling (such as README.txt)\n will be skipped.\n\n Inputs:\n - models_dir: String giving the path to a directory containing model files.\n Each model file is a pickled dictionary with a 'model' field.\n\n Returns:\n A dictionary mapping model file names to models.\n \"\"\"\n models = {}\n for model_file in os.listdir(models_dir):\n with open(os.path.join(models_dir, model_file), 'rb') as f:\n try:\n models[model_file] = load_pickle(f)['model']\n except pickle.UnpicklingError:\n continue\n return models\n\n\ndef load_imagenet_val(num=None):\n \"\"\"Load a handful of validation images from ImageNet.\n\n Inputs:\n - num: Number of images to load (max of 25)\n\n Returns:\n - X: numpy array with shape [num, 224, 224, 3]\n - y: numpy array of integer image labels, shape [num]\n - class_names: dict mapping integer label to class name\n \"\"\"\n imagenet_fn = 'cs231n/datasets/imagenet_val_25.npz'\n if not os.path.isfile(imagenet_fn):\n print('file %s not found' % imagenet_fn)\n print('Run the following:')\n print('cd cs231n/datasets')\n print('bash get_imagenet_val.sh')\n assert False, 'Need to download imagenet_val_25.npz'\n f = np.load(imagenet_fn)\n X = f['X']\n y = f['y']\n class_names = f['label_map'].item()\n if num is not None:\n X = X[:num]\n y = y[:num]\n return X, y, class_names\n" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.zeros", "numpy.ones", "numpy.load", "numpy.mean", "scipy.misc.imread" ] ]
WildbookOrg/wbia-deprecate-tpl-brambox
[ "9aa6a69f706d0653a65520c696a7cd66715b6a37" ]
[ "brambox/boxes/statistics/pr.py" ]
[ "# -*- coding: utf-8 -*-\n#\n# Copyright EAVISE\n# Author: Maarten Vandersteegen\n# Author: Tanguy Ophoff\n#\n# Functions for generating PR-curve values and calculating average precision\n#\n\nimport math\nfrom statistics import mean\nimport numpy as np\nimport scipy.interpolate\n\nfrom .util import *\n\n__all__ = ['pr', 'ap']\n\n\ndef pr(detections, ground_truth, overlap_threshold=0.5):\n \"\"\" Compute a list of precision recall values that can be plotted into a graph.\n\n Args:\n detections (dict): Detection objects per image\n ground_truth (dict): Annotation objects per image\n overlap_threshold (Number, optional): Minimum iou threshold for true positive; Default **0.5**\n\n Returns:\n tuple: **[precision_values]**, **[recall_values]**\n \"\"\"\n tps, fps, num_annotations = match_detections(\n detections, ground_truth, overlap_threshold\n )\n\n precision = []\n recall = []\n for tp, fp in zip(tps, fps):\n recall.append(tp / num_annotations)\n precision.append(tp / (fp + tp))\n\n return precision, recall\n\n\ndef ap(precision, recall, num_of_samples=100):\n \"\"\" Compute the average precision from a given pr-curve.\n The average precision is defined as the area under the curve.\n\n Args:\n precision (list): Precision values\n recall (list): Recall values\n num_of_samples (int, optional): Number of samples to take from the curve to measure the average precision; Default **100**\n\n Returns:\n Number: average precision\n \"\"\"\n if len(precision) > 1 and len(recall) > 1:\n p = np.array(precision)\n r = np.array(recall)\n p_start = p[np.argmin(r)]\n samples = np.arange(0.0, 1.0, 1.0 / num_of_samples)\n interpolated = scipy.interpolate.interp1d(\n r, p, fill_value=(p_start, 0.0), bounds_error=False\n )(samples)\n avg = sum(interpolated) / len(interpolated)\n elif len(precision) > 0 and len(recall) > 0:\n # 1 point on PR: AP is box between (0,0) and (p,r)\n avg = precision[0] * recall[0]\n else:\n avg = float('nan')\n\n return avg\n" ]
[ [ "numpy.array", "numpy.arange", "numpy.argmin" ] ]
Utsav-Patel/The-Imitation-Game
[ "09dfaffdf917c1adfb1d8cd3e09a216b9a014e52" ]
[ "models/project2/dense/20x20/model1.py" ]
[ "import pickle\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nimport tensorflow as tf\nfrom tensorflow.keras.utils import to_categorical\n\nfrom constants import CHECKPOINT_FILEPATH, PROJECT2_DATA_PATH, PROJECT2_VALIDATION_PATH\nfrom model_architectures import create_model_project2_dense_20x20\nfrom DataGenerator import DataGenerator\n\nconfig = tf.compat.v1.ConfigProto()\nconfig.gpu_options.allow_growth = True\nsess = tf.compat.v1.Session(config=config)\n\n\ndef prepare_dataset(path):\n open_file = open(path, \"rb\")\n loaded_list = pickle.load(open_file)\n open_file.close()\n\n print(\"Successfully loaded data from pickle file\", path)\n\n input_list = list()\n output_list = list()\n\n for dct in loaded_list:\n input_list.append({'input': dct['input'], 'sensed': dct['sensed'], 'current_pos': dct['current_pos']})\n output_list.append(dct['output'])\n\n # input_numpy = np.array(input_list)\n # print(input_numpy.shape)\n # # input_numpy = input_numpy.reshape(input_numpy.shape[0], -1)\n\n output_numpy = np.array(output_list)\n output_numpy = output_numpy.reshape(output_numpy.shape[0])\n output_numpy = to_categorical(output_numpy)\n\n return input_list, output_numpy\n\n\n# print(\"Input shape\", input_numpy.shape)\n# print(\"Output shape\", output_numpy.shape)\n# print('Starting training')\n\nX_train, y_train = prepare_dataset(PROJECT2_DATA_PATH)\nX_val, y_val = prepare_dataset(PROJECT2_VALIDATION_PATH)\n\nX_val, X_test, y_val, y_test = train_test_split(X_val, y_val, test_size=0.50, random_state=81)\n\n# print(\"X train shape\", X_train.shape)\n# print(\"y train shape\", y_train.shape)\n# print(\"X validation shape\", X_val.shape)\n# print(\"y validation shape\", y_val.shape)\n# print(\"X test shape\", X_test.shape)\n# print(\"y test shape\", y_test.shape)\n\ntraining_generator = DataGenerator(X_train, y_train)\nvalidation_generator = DataGenerator(X_val, y_val)\ntesting_generator = DataGenerator(X_test, y_test)\n\nmodel = create_model_project2_dense_20x20()\nmodel.summary()\n\nmodel_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(\n filepath=CHECKPOINT_FILEPATH,\n verbose=1,\n save_weights_only=True,\n monitor='val_accuracy',\n save_best_only=False,\n save_freq='epoch'\n)\n\nhistory = model.fit(training_generator, epochs=20, validation_data=validation_generator, use_multiprocessing=True,\n workers=75, callbacks=[model_checkpoint_callback])\n\nprint(history.history)\nmodel.evaluate(testing_generator, verbose=2)\n" ]
[ [ "tensorflow.keras.utils.to_categorical", "numpy.array", "tensorflow.compat.v1.ConfigProto", "tensorflow.keras.callbacks.ModelCheckpoint", "tensorflow.compat.v1.Session", "sklearn.model_selection.train_test_split" ] ]
Hotpotfish/pysc2
[ "3d7f7ffc01a50ab69d435b65c892cd0bc11265a8", "3d7f7ffc01a50ab69d435b65c892cd0bc11265a8" ]
[ "pysc2/agents/myAgent/myAgent_7/net/lenet.py", "pysc2/agents/myAgent/myAgent_5/macro_operation.py" ]
[ "import tensorflow as tf\n\n\nclass Lenet():\n\n def __init__(self, mu, sigma, learning_rate, action_dim, parameterdim, statedim, name):\n self.mu = mu\n self.sigma = sigma\n self.learning_rate = learning_rate\n\n self.action_dim = action_dim\n self.parameterdim = parameterdim\n self.statedim = statedim\n\n self.name = name\n\n self._build_graph()\n\n def _build_graph(self):\n self._setup_placeholders_graph()\n self._build_network_graph(self.name)\n self._compute_loss_graph()\n # self._compute_acc_graph()\n self._create_train_op_graph()\n self.merged_summary = tf.summary.merge_all()\n\n def _setup_placeholders_graph(self):\n self.action_input = tf.placeholder(\"float\", shape=[None, self.action_dim + self.parameterdim], name=self.name + '_' + 'action_input')\n self.y_input = tf.placeholder(\"float\", shape=[None, 1 + self.parameterdim], name=self.name + '_' + 'y_input')\n self.state_input = tf.placeholder(\"float\", shape=self.statedim, name=self.name + '_' + 'state_input')\n\n def _cnn_layer(self, scope_name, W_name, b_name, x, filter_shape, conv_strides, padding_tag='VALID'):\n with tf.variable_scope(scope_name):\n conv_W = tf.get_variable(W_name,\n dtype=tf.float32,\n initializer=tf.truncated_normal(shape=filter_shape, mean=self.mu,\n stddev=self.sigma))\n conv_b = tf.get_variable(b_name,\n dtype=tf.float32,\n initializer=tf.zeros(filter_shape[3]))\n conv = tf.nn.conv2d(x, conv_W,\n strides=conv_strides,\n padding=padding_tag) + conv_b\n\n return conv\n\n def _pooling_layer(self, scope_name, x, pool_ksize, pool_strides, padding_tag='VALID'):\n with tf.variable_scope(scope_name):\n pool = tf.nn.avg_pool(x, pool_ksize, pool_strides, padding=padding_tag)\n return pool\n\n def _fully_connected_layer(self, scope_name, W_name, b_name, x, W_shape):\n with tf.variable_scope(scope_name):\n x = tf.reshape(x, [-1, W_shape[0]])\n w = tf.get_variable(W_name,\n dtype=tf.float32,\n initializer=tf.truncated_normal(shape=W_shape, mean=self.mu,\n stddev=self.sigma))\n b = tf.get_variable(b_name,\n dtype=tf.float32,\n initializer=tf.zeros(W_shape[1]))\n\n r = tf.add(tf.matmul(x, w), b)\n\n return r\n\n def _build_network_graph(self, scope_name):\n with tf.variable_scope(scope_name, reuse=tf.AUTO_REUSE):\n # 28 * 28 * 6\n self.conv1 = self._cnn_layer('layer_1_conv', 'conv_w', 'conv_b', self.state_input, (5, 5, self.statedim[3], 6), [1, 1, 1, 1])\n # 14 * 14 * 6\n self.pool1 = self._pooling_layer('layer_1_pooling', self.conv1, [1, 2, 2, 1], [1, 2, 2, 1])\n\n # 10 * 10 * 16\n self.conv2 = self._cnn_layer('layer_2_conv', 'conv_w', 'conv_b', self.pool1, (5, 5, 6, 16), [1, 1, 1, 1])\n\n # 5 * 5 * 16\n self.pool2 = self._pooling_layer('layer_2_pooling', self.conv2, [1, 2, 2, 1], [1, 2, 2, 1])\n\n # w.shape=[5 * 5 * 16, 120]\n self.fc1 = self._fully_connected_layer('full_connected1', 'full_connected_w', 'full_connected_b',\n self.pool2, (self.pool2._shape[1] * self.pool2._shape[2] * self.pool2._shape[3], 120))\n\n # w.shape=[120, 84]\n self.fc2 = self._fully_connected_layer('full_connected2', 'full_connected_w',\n 'full_connected_b',\n self.fc1, (120, 84))\n # w.shape=[84, 10]\n self.logits = self._fully_connected_layer('full_connected3', 'full_connected_w', 'full_connected_b',\n self.fc2, (84, self.action_dim + self.parameterdim))\n\n self.Q_value = tf.nn.softmax(self.logits)\n tf.summary.histogram(\"Q_value\", self.Q_value)\n\n def _compute_loss_graph(self):\n with tf.name_scope(self.name + \"_loss_function\"):\n self.Q_action = tf.reduce_sum(tf.multiply(self.Q_value, self.action_input))\n self.loss = tf.reduce_mean(tf.square(self.y_input - self.Q_action))\n # tf.summary.scalar(self.name + \"_loss_function\", self.loss)\n\n def _compute_acc_graph(self):\n with tf.name_scope(self.name + \"_acc_function\"):\n self.accuracy = \\\n tf.metrics.accuracy(labels=tf.argmax(self.y, axis=1), predictions=tf.argmax(self.y_predicted, axis=1))[\n 1]\n tf.summary.scalar(\"accuracy\", self.accuracy)\n\n def _create_train_op_graph(self):\n self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)\n", "import random\nfrom pysc2.lib import actions\nfrom pysc2.lib import features\nfrom pysc2.lib import units\nfrom queue import Queue\nimport numpy as np\n\n\n\nmapSzie = 128\n\n\n# def plt_soldiers(soldiers, color):\n# for i in range(len(soldiers)):\n# plt.scatter(soldiers[i].x, soldiers[i].y, c=color, cmap='coolwarm')\n\n\ndef automatic_formation(obs):\n soldiers = get_my_units_by_type(obs, units.Terran.Marine)\n # plt_soldiers(soldiers, 1000.0)\n # plt.show()\n\n soldier_count = len(soldiers)\n\n combat_teams = []\n\n loop = 0\n\n while soldier_count != 0:\n\n # 战斗小组人数\n combat_team_count = random.randint(1, soldier_count)\n\n combat_team = []\n\n # 筛选人员\n for i in range(combat_team_count):\n random_index = random.randint(0, len(soldiers) - 1)\n\n soldier = soldiers.pop(random_index)\n\n combat_team.append(soldier)\n\n print('soldier:' + str(soldier.tag) + ' ')\n\n # plt_soldiers(combat_team, float(loop * 100))\n combat_teams.append(combat_team)\n\n print('are in combat_team_' + str(loop))\n print('-------------------------------')\n loop += 1\n soldier_count -= combat_team_count\n # plt.show()\n\n print()\n print()\n print()\n\n return combat_teams\n\n\ndef chooseARandomPlace(input_x, input_y):\n offset = 20\n add_y = random.randint(-offset, offset)\n add_x = random.randint(-offset, offset)\n\n if input_x + add_x >= mapSzie:\n\n outx = mapSzie\n\n elif input_x + add_x < 0:\n outx = 0\n\n else:\n outx = input_x + add_x\n\n if input_y + add_y >= mapSzie:\n\n outy = mapSzie\n\n elif input_y + add_y < 0:\n outy = 0\n\n else:\n outy = input_y + add_y\n\n return (outx, outy)\n\n\ndef get_my_units_by_type(obs, unit_type):\n return [unit for unit in obs.observation.raw_units\n if unit.unit_type == unit_type\n and unit.alliance == features.PlayerRelative.SELF]\n\n\ndef get_enemy_units_by_type(obs, unit_type):\n return [unit for unit in obs.observation.raw_units\n if unit.unit_type == unit_type\n and unit.alliance == features.PlayerRelative.ENEMY]\n\n\ndef get_my_completed_units_by_type(obs, unit_type):\n return [unit for unit in obs.observation.raw_units\n if unit.unit_type == unit_type\n and unit.build_progress == 100\n and unit.alliance == features.PlayerRelative.SELF]\n\n\ndef get_enemy_completed_units_by_type(obs, unit_type):\n return [unit for unit in obs.observation.raw_units\n if unit.unit_type == unit_type\n and unit.build_progress == 100\n and unit.alliance == features.PlayerRelative.ENEMY]\n\n\ndef find_any_enemy(obs):\n return [unit for unit in obs.observation.raw_units\n if unit.alliance == features.PlayerRelative.ENEMY]\n\n\ndef get_distances(obs, units, xy):\n units_xy = [(unit.x, unit.y) for unit in units]\n return np.linalg.norm(np.array(units_xy) - np.array(xy), axis=1)\n\n\ndef harvest_minerals(obs):\n scvs = get_my_units_by_type(obs, units.Terran.SCV)\n idle_scvs = [scv for scv in scvs if scv.order_length == 0]\n if len(idle_scvs) > 0:\n mineral_patches = [unit for unit in obs.observation.raw_units\n if unit.unit_type in [\n units.Neutral.BattleStationMineralField,\n units.Neutral.BattleStationMineralField750,\n units.Neutral.LabMineralField,\n units.Neutral.LabMineralField750,\n units.Neutral.MineralField,\n units.Neutral.MineralField750,\n units.Neutral.PurifierMineralField,\n units.Neutral.PurifierMineralField750,\n units.Neutral.PurifierRichMineralField,\n units.Neutral.PurifierRichMineralField750,\n units.Neutral.RichMineralField,\n units.Neutral.RichMineralField750\n ]]\n if len(mineral_patches) == 0:\n return actions.RAW_FUNCTIONS.no_op()\n scv = random.choice(idle_scvs)\n distances = get_distances(obs, mineral_patches, (scv.x, scv.y))\n mineral_patch = mineral_patches[np.argmin(distances)]\n return actions.RAW_FUNCTIONS.Harvest_Gather_unit(\n \"now\", scv.tag, mineral_patch.tag)\n return actions.RAW_FUNCTIONS.no_op()\n\n\ndef harvest_VespeneGeyser(obs):\n scvs = get_my_units_by_type(obs, units.Terran.SCV)\n # idle_scvs = [scv for scv in scvs if scv.order_length == 0]\n VespeneGeyser_patches = get_my_completed_units_by_type(obs, units.Terran.Refinery) \\\n + get_my_completed_units_by_type(obs, units.Terran.RefineryRich)\n if len(scvs) > 0 and len(VespeneGeyser_patches) > 0:\n for i in range(len(VespeneGeyser_patches)):\n if VespeneGeyser_patches[i].assigned_harvesters < VespeneGeyser_patches[i].ideal_harvesters:\n scv = random.choice(scvs)\n return actions.RAW_FUNCTIONS.Harvest_Gather_unit(\n \"now\", scv.tag, VespeneGeyser_patches[i].tag)\n\n # scv = random.choice(scvs)\n # distances = get_distances(obs, VespeneGeyser_patches, (scv.x, scv.y))\n # VespeneGeyser_patch = VespeneGeyser_patches[np.argmin(distances)]\n # return actions.RAW_FUNCTIONS.Harvest_Gather_unit(\n # \"now\", scv.tag, VespeneGeyser_patch.tag)\n return actions.RAW_FUNCTIONS.no_op()\n\n\ndef build_refinery(obs):\n commandCenters = get_my_units_by_type(obs, units.Terran.CommandCenter)\n if len(commandCenters) > 0:\n commandCenter = commandCenters[random.randint(0, len(commandCenters) - 1)]\n scvs = get_my_units_by_type(obs, units.Terran.SCV)\n if (obs.observation.player.minerals >= 75 and len(scvs) > 0):\n VespeneGeyser_patches = [unit for unit in obs.observation.raw_units\n if unit.unit_type in [\n units.Neutral.ProtossVespeneGeyser,\n units.Neutral.PurifierVespeneGeyser,\n units.Neutral.RichVespeneGeyser,\n units.Neutral.ShakurasVespeneGeyser,\n units.Neutral.VespeneGeyser,\n ]]\n if len(VespeneGeyser_patches) == 0:\n return actions.RAW_FUNCTIONS.no_op()\n\n refineries = get_my_units_by_type(obs, units.Terran.Refinery)\n\n if len(refineries) == 0:\n scv = random.choice(scvs)\n distances = get_distances(obs, VespeneGeyser_patches, (commandCenter.x, commandCenter.y))\n VespeneGeyser_patch = VespeneGeyser_patches[np.argmin(distances)]\n return actions.RAW_FUNCTIONS.Build_Refinery_pt(\n \"now\", scv.tag, VespeneGeyser_patch.tag)\n elif len(refineries) < len(commandCenters) * 2:\n for i in range(len(refineries)):\n for j in range(len(VespeneGeyser_patches)):\n if refineries[i].x == VespeneGeyser_patches[j].x and \\\n refineries[i].y == VespeneGeyser_patches[j].y:\n VespeneGeyser_patches.pop(j)\n j -= 1\n break\n scv = random.choice(scvs)\n distances = get_distances(obs, VespeneGeyser_patches, (commandCenter.x, commandCenter.y))\n VespeneGeyser_patch = VespeneGeyser_patches[np.argmin(distances)]\n return actions.RAW_FUNCTIONS.Build_Refinery_pt(\n \"now\", scv.tag, VespeneGeyser_patch.tag)\n\n return actions.RAW_FUNCTIONS.no_op()\n\n\ndef build_supply_depot(obs):\n commandCenters = get_my_units_by_type(obs, units.Terran.CommandCenter)\n if len(commandCenters) > 0:\n commandCenter = commandCenters[random.randint(0, len(commandCenters) - 1)]\n scvs = get_my_units_by_type(obs, units.Terran.SCV)\n if (obs.observation.player.minerals >= 100 and len(scvs) > 0 and obs.observation.player.food_cap < 200):\n supply_depot_xy = chooseARandomPlace(commandCenter.x, commandCenter.y)\n distances = get_distances(obs, scvs, supply_depot_xy)\n scv = scvs[np.argmin(distances)]\n return actions.RAW_FUNCTIONS.Build_SupplyDepot_pt(\"now\", scv.tag, supply_depot_xy)\n return actions.RAW_FUNCTIONS.no_op()\n\n\ndef build_barracks(obs):\n commandCenters = get_my_units_by_type(obs, units.Terran.CommandCenter)\n if len(commandCenters) > 0:\n completed_supply_depots = get_my_completed_units_by_type(\n obs, units.Terran.SupplyDepot)\n\n commandCenter = commandCenters[random.randint(0, len(commandCenters) - 1)]\n scvs = get_my_units_by_type(obs, units.Terran.SCV)\n if (len(completed_supply_depots) > 0 and\n obs.observation.player.minerals >= 150 and len(scvs) > 0):\n barracks_xy = chooseARandomPlace(commandCenter.x, commandCenter.y)\n distances = get_distances(obs, scvs, barracks_xy)\n scv = scvs[np.argmin(distances)]\n return actions.RAW_FUNCTIONS.Build_Barracks_pt(\n \"now\", scv.tag, barracks_xy)\n return actions.RAW_FUNCTIONS.no_op()\n\n\ndef train_scv(obs):\n completed_commandCenters = get_my_completed_units_by_type(obs, units.Terran.CommandCenter)\n free_supply = (obs.observation.player.food_cap - obs.observation.player.food_used)\n if (len(completed_commandCenters) > 0 and obs.observation.player.minerals >= 50 and free_supply > 0):\n commandCenters = get_my_units_by_type(obs, units.Terran.CommandCenter)\n commandCenter = commandCenters[random.randint(0, len(commandCenters) - 1)]\n if commandCenter.order_length < 5:\n return actions.RAW_FUNCTIONS.Train_SCV_quick(\"now\", commandCenter.tag)\n return actions.RAW_FUNCTIONS.no_op()\n\n\ndef train_marine(obs):\n completed_barrackses = get_my_completed_units_by_type(\n obs, units.Terran.Barracks)\n free_supply = (obs.observation.player.food_cap -\n obs.observation.player.food_used)\n if (len(completed_barrackses) > 0 and obs.observation.player.minerals >= 100\n and free_supply > 0):\n barracks = get_my_units_by_type(obs, units.Terran.Barracks)\n barrack = barracks[random.randint(0, len(barracks) - 1)]\n if barrack.order_length < 5:\n return actions.RAW_FUNCTIONS.Train_Marine_quick(\"now\", barrack.tag)\n return actions.RAW_FUNCTIONS.no_op()\n\n\n#\n# def attack(obs):\n# marines = get_my_units_by_type(obs, units.Terran.Marine)\n# if len(marines) > 0:\n# enmies = find_any_enemy(obs)\n# attack_orders = []\n# if len(enmies) > 0:\n# for i in range(len(marines)):\n# marine_xy = (marines[i].x, marines[i].y)\n# distances = get_distances(obs, enmies, marine_xy)\n# enmy = enmies[np.argmin(distances)]\n# attack_orders.append(actions.RAW_FUNCTIONS.Attack_unit(\"now\", marines[i].tag, enmy.tag))\n# return attack_orders\n#\n# else:\n# for i in range(len(marines)):\n# random_x = random.randint(0, mapSzie - 1)\n# random_y = random.randint(0, mapSzie - 1)\n# attack_orders.append(actions.RAW_FUNCTIONS.Move_pt(\"queued\", marines[i].tag, (random_x, random_y)))\n# return attack_orders\n#\n# return actions.RAW_FUNCTIONS.no_op()\n\ndef attack(obs):\n combat_teams = automatic_formation(obs)\n if len(combat_teams) > 0:\n enmies = find_any_enemy(obs)\n attack_orders = []\n if len(enmies) > 0:\n for i in range(len(combat_teams)):\n marine_xy = (combat_teams[i][0].x, combat_teams[i][0].y)\n distances = get_distances(obs, enmies, marine_xy)\n enmy = enmies[np.argmin(distances)]\n for j in range(len(combat_teams[i])):\n attack_orders.append(actions.RAW_FUNCTIONS.Attack_unit(\"now\", combat_teams[i][j].tag, enmy.tag))\n return attack_orders\n\n else:\n for i in range(len(combat_teams)):\n random_x = random.randint(0, mapSzie - 1)\n random_y = random.randint(0, mapSzie - 1)\n for j in range(len(combat_teams[i])):\n attack_orders.append(\n actions.RAW_FUNCTIONS.Move_pt(\"queued\", combat_teams[i][j].tag, (random_x, random_y)))\n return attack_orders\n\n return actions.RAW_FUNCTIONS.no_op()\n" ]
[ [ "tensorflow.multiply", "tensorflow.zeros", "tensorflow.train.AdamOptimizer", "tensorflow.nn.conv2d", "tensorflow.summary.scalar", "tensorflow.summary.histogram", "tensorflow.argmax", "tensorflow.matmul", "tensorflow.reshape", "tensorflow.truncated_normal", "tensorflow.variable_scope", "tensorflow.placeholder", "tensorflow.nn.softmax", "tensorflow.name_scope", "tensorflow.summary.merge_all", "tensorflow.square", "tensorflow.nn.avg_pool" ], [ "numpy.array", "numpy.argmin" ] ]
zhuboli/alf
[ "b357565638c9336ebd88cecb9766a17d72d5d0c3", "38a3621337a030f74bb3944d7695e7642e777e10" ]
[ "alf/environments/suite_carla.py", "alf/environments/mario_wrappers.py" ]
[ "# Copyright (c) 2020 Horizon Robotics. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"CarlaEnvironment suite.\n\nTo use this, there are two ways:\n\n1. Run the code within docker image horizonrobotics/alf:0.0.3-carla\n Both `Docker <https://docs.docker.com/engine/install/ubuntu/>`_ and\n `Nvidia-Docker2 <https://github.com/NVIDIA/nvidia-docker>`_ need to be installed.\n\n2. Install carla:\n\n.. code-block:: bash\n\n wget https://carla-releases.s3.eu-west-3.amazonaws.com/Linux/CARLA_0.9.9.tar.gz\n mkdir carla\n tar zxf CARLA_0.9.9.tar.gz -C carla\n cd carla/Import\n wget https://carla-releases.s3.eu-west-3.amazonaws.com/Linux/AdditionalMaps_0.9.9.tar.gz\n cd ..\n ./ImportAssert.sh\n easy_install PythonAPI/carla/dist/carla-0.9.9-py3.7-linux-x86_64.egg\n\nMake sure you are using python3.7\n\n\"\"\"\n\nfrom collections import OrderedDict\nfrom absl import logging\nimport gin\nimport math\nimport numpy as np\nimport os\nimport random\nimport subprocess\nimport sys\nimport time\nimport torch\n\ntry:\n import carla\nexcept ImportError:\n carla = None\n\nimport alf\nimport alf.data_structures as ds\nfrom alf.utils import common\nfrom .suite_socialbot import _get_unused_port\nfrom .alf_environment import AlfEnvironment\nfrom .carla_sensors import (CameraSensor, CollisionSensor, GnssSensor,\n IMUSensor, LaneInvasionSensor, NavigationSensor,\n RadarSensor, World, MINIMUM_RENDER_WIDTH,\n MINIMUM_RENDER_HEIGHT)\n\n\ndef is_available():\n return carla is not None\n\n\ndef geo_distance(loc1, loc2):\n \"\"\"\n Args:\n loc1 (np.array): [latitude, longitude, altitude]. The units for altitude\n is meter.\n loc2 (np.array):\n Returns:\n float: distance in meters\n \"\"\"\n earth_radius = 6371 * 1000\n d2r = math.pi / 180\n\n d = loc1 - loc2\n dlat = d[0] * d2r\n dlon = d[1] * d2r\n lat1 = loc1[0] * d2r\n lat2 = loc2[0] * d2r\n a = np.sin(\n 0.5 * dlat)**2 + np.sin(0.5 * dlon)**2 * np.cos(lat1) * np.cos(lat2)\n c = 2 * np.arctan2(np.sqrt(a), np.sqrt(1 - a))\n c = earth_radius * c\n return np.sqrt(c * c + d[2] * d[2])\n\n\ndef _calculate_relative_position(self_transform, location):\n \"\"\"\n Args:\n self_transform (carla.Transform): transform of self actor\n location (np.ndarray): shape is [3] or [N, 3]\n Returns:\n np.ndarray: shape is same as location\n \"\"\"\n trans = self_transform\n self_loc = trans.location\n yaw = math.radians(trans.rotation.yaw)\n\n self_loc = np.array([self_loc.x, self_loc.y, self_loc.z])\n cos, sin = np.cos(yaw), np.sin(yaw)\n rot = np.array([[cos, -sin, 0.], [sin, cos, 0.], [0., 0., 1.]])\n return np.matmul(location - self_loc, rot).astype(np.float32)\n\n\ndef _calculate_relative_velocity(self_transform, velocity):\n \"\"\"\n Args:\n self_transform (carla.Transform): transform of self actor\n velocity (np.ndarray): shape is [3] or [N, 3]\n Returns:\n np.ndarray: shape is same as location\n \"\"\"\n trans = self_transform\n yaw = math.radians(trans.rotation.yaw)\n\n cos, sin = np.cos(yaw), np.sin(yaw)\n rot = np.array([[cos, -sin, 0.], [sin, cos, 0.], [0., 0., 1.]])\n return np.matmul(velocity, rot).astype(np.float32)\n\n\ndef _to_numpy_loc(loc: carla.Location):\n return np.array([loc.x, loc.y, loc.z])\n\n\[email protected](blacklist=['actor', 'alf_world'])\nclass Player(object):\n \"\"\"Player is a vehicle with some sensors.\n\n An episode terminates if it reaches one of the following situations:\n 1. the vehicle arrives at the goal.\n 2. the time exceeds ``route_length / min_speed``.\n 3. it get stuck because of a collision.\n\n At each step, the reward is given based on the following components:\n 1. Arriving goal: ``success_reward``\n 2. Moving in the navigation direction: the number of meters moved\n This moving reward can be either dense of sparse depending on the argument\n ``sparse_reward``.\n 3. Negative reward caused by collision: ``-min(max_collision_reward, max(epside_reward, 0))``\n\n Currently, the player has the these sensors: ``CollisionSensor``, ``GnssSensor``,\n ``IMUSensor``, ``CameraSensor``, ``LaneInvasionSensor`` , ``RadarSensor``,\n ``NavigationSensor``. See the documentation for these class for the definition\n the data generated by these sensors.\n \"\"\"\n\n # over all reward\n REWARD_OVERALL = 0\n\n # distance in meter for moving along route\n # If using sparse reward (`sparse_reward` is True), this reward is only given\n # about every `sparse_reward_interval` meters\n # If not using sparse reward, this reward is given every steps.\n REWARD_DISTANCE = 1\n\n # 0/1 valued indicating whether there is collision\n REWARD_COLLISION = 2\n\n # 0/1 valued indicating reaching goal\n REWARD_SUCCESS = 3\n\n # dimension of the reward vector\n REWARD_DIMENSION = 4\n\n def __init__(self,\n actor,\n alf_world,\n success_reward=100.,\n success_distance_thresh=5.0,\n max_collision_penalty=100.,\n max_stuck_at_collision_seconds=5.0,\n stuck_at_collision_distance=1.0,\n sparse_reward=False,\n sparse_reward_interval=10.,\n allow_negative_distance_reward=True,\n min_speed=5.,\n with_gnss_sensor=True,\n with_imu_sensor=True,\n with_camera_sensor=True,\n with_radar_sensor=True):\n \"\"\"\n Args:\n actor (carla.Actor): the carla actor object\n alf_world (Wolrd): the world containing the player\n success_reward (float): the reward for arriving the goal location.\n success_distance_thresh (float): success is achieved if the current\n location is with such distance of the goal\n max_collision_penalty (float): the maximum penalty (i.e. negative reward)\n for collision. We don't want the collision penalty to be too large\n if the player cannot even get enough positive moving reward. So the\n panalty is capped at ``max(0., episode_reward))``. Note that this\n reward is only given once at the first step of contiguous collisions.\n max_stuck_at_collision_seconds (float): the episode will end and is\n considerred as failure if the car is stuck at the collision for\n so many seconds,\n stuck_at_collision_distance (float): the car is considerred as being\n stuck at the collision if it is within such distance of the first\n collision location.\n sparse_reward (bool): If False, the distance reward is given at every\n step based on how much it moves along the navigation route. If\n True, the distance reward is only given after moving ``sparse_reward_distance``.\n sparse_reward_interval (float): the sparse reward is given after\n approximately every such distance along the route has been driven.\n allow_negative_distance_reward (True): whether to allow negative distance\n reward. If True, the agent will receive positive reward for moving\n ahead along the route, and negative rewad for moving back along\n the route. If False, the agent still receives positive reward for\n moving ahead along the route, but will not receive negative rewad\n for moving back along the route. Instead, the negative distance\n will be accumulated to the future distance reward. This may ease\n the learning if the right behavior is to temporarily go back along\n the route in order, for examle, to avoid obstacle.\n min_speed (float): unit is m/s. Failure if initial_distance / min_speed\n seconds passed\n with_gnss_sensor (bool): whether to use ``GnssSensor``.\n with_imu_sensor (bool): whether to use ``IMUSensor``.\n with_camera_sensor (bool): whether to use ``CameraSensor``.\n with_radar_sensor (bool): whether to use ``RadarSensor``.\n \"\"\"\n self._actor = actor\n self._alf_world = alf_world\n self._observation_sensors = {}\n\n self._collision_sensor = CollisionSensor(actor)\n self._observation_sensors['collision'] = self._collision_sensor\n\n if with_gnss_sensor:\n self._gnss_sensor = GnssSensor(actor)\n self._observation_sensors['gnss'] = self._gnss_sensor\n else:\n self._gnss_sensor = None\n\n if with_imu_sensor:\n self._imu_sensor = IMUSensor(actor)\n self._observation_sensors['imu'] = self._imu_sensor\n else:\n self._imu_sensor = None\n\n if with_camera_sensor:\n self._camera_sensor = CameraSensor(actor)\n self._observation_sensors['camera'] = self._camera_sensor\n else:\n self._camera_sensor = None\n\n self._lane_invasion_sensor = LaneInvasionSensor(actor)\n\n if with_radar_sensor:\n self._radar_sensor = RadarSensor(actor)\n self._observation_sensors['radar'] = self._radar_sensor\n else:\n self._radar_sensor = None\n\n self._navigation = NavigationSensor(actor, alf_world)\n self._observation_sensors['navigation'] = self._navigation\n\n self._success_reward = success_reward\n self._success_distance_thresh = success_distance_thresh\n self._min_speed = min_speed\n self._delta_seconds = actor.get_world().get_settings(\n ).fixed_delta_seconds\n self._max_collision_penalty = max_collision_penalty\n self._max_stuck_at_collision_frames = max_stuck_at_collision_seconds / self._delta_seconds\n self._stuck_at_collision_distance = stuck_at_collision_distance\n self._sparse_reward = sparse_reward\n self._sparse_reward_index_interval = int(\n max(1, sparse_reward_interval // self._alf_world.route_resolution))\n self._allow_negative_distance_reward = allow_negative_distance_reward\n\n self._observation_spec = dict()\n self._observation_desc = dict()\n for sensor_name, sensor in self._observation_sensors.items():\n self._observation_spec[sensor_name] = sensor.observation_spec()\n self._observation_desc[sensor_name] = sensor.observation_desc()\n self._observation_spec['goal'] = alf.TensorSpec([3])\n self._observation_spec['velocity'] = alf.TensorSpec([3])\n\n # UE4 coordinate system is right handed:\n # https://forums.unrealengine.com/development-discussion/c-gameplay-programming/103787-ue4-coordinate-system-not-right-handed\n self._observation_desc['goal'] = (\n \"Target location relative to the vehicle coordinate system in \"\n \"meters. X axis: front, Y axis: right, Z axis: up. Only the \"\n \"rotation around Z axis is taken into account when calculating the \"\n \"vehicle's coordinate system.\")\n self._observation_desc['navigation'] = (\n 'Relative positions of the future waypoints in the route')\n self._observation_desc[\n 'velocity'] = \"3D Velocity relative to self coordinate in m/s\"\n self._info_spec = OrderedDict(\n success=alf.TensorSpec(()), collision=alf.TensorSpec(()))\n\n self._control = carla.VehicleControl()\n self.reset()\n\n # for rendering\n self._surface = None\n self._font = None\n self._clock = None\n\n def reset(self):\n \"\"\"Reset the player location and goal.\n\n Use ``carla.Client.apply_batch_sync()`` to actually reset.\n\n Returns:\n list[carla.command]:\n \"\"\"\n\n wp = random.choice(self._alf_world.get_waypoints())\n goal_loc = wp.transform.location\n self._goal_location = np.array([goal_loc.x, goal_loc.y, goal_loc.z],\n dtype=np.float32)\n\n forbidden_locations = []\n for v in self._alf_world.get_actors():\n if v.id == self._actor.id:\n continue\n forbidden_locations.append(\n self._alf_world.get_actor_location(v.id))\n\n # find a waypoint far enough from other vehicles\n ok = False\n i = 0\n while not ok and i < 100:\n wp = random.choice(self._alf_world.get_waypoints())\n loc = wp.transform.location\n ok = True\n for other_loc in forbidden_locations:\n if loc.distance(other_loc) < 10.:\n ok = False\n break\n i += 1\n assert ok, \"Fail to find new position\"\n # loc.z + 0.27531 to avoid Z-collision, see Carla documentation for\n # carla.Map.get_spawn_points(). The value used by carla is slightly\n # smaller: 0.27530714869499207\n loc = carla.Location(loc.x, loc.y, loc.z + 0.3)\n\n commands = [\n carla.command.ApplyTransform(\n self._actor, carla.Transform(loc, wp.transform.rotation)),\n carla.command.ApplyVelocity(self._actor, carla.Vector3D()),\n carla.command.ApplyAngularVelocity(self._actor, carla.Vector3D())\n ]\n\n self._max_frame = None\n self._done = False\n self._prev_location = loc\n self._prev_action = np.zeros(\n self.action_spec().shape, dtype=np.float32)\n self._alf_world.update_actor_location(self._actor.id, loc)\n\n self._route_length = self._navigation.set_destination(goal_loc)\n\n self._prev_collision = False # whether there is collision in the previous frame\n self._collision = False # whether there is colliion in the current frame\n self._collision_loc = None # the location of the car when it starts to have collition\n\n # The intermediate goal for sparse reward\n self._intermediate_goal_index = min(self._sparse_reward_index_interval,\n self._navigation.num_waypoints - 1)\n\n # The location of the car when the intermediate goal is set\n self._intermediate_start = _to_numpy_loc(loc)\n\n self._episode_reward = 0.\n self._unrecorded_distance_reward = 0.\n self._is_first_step = True\n\n return commands\n\n def destroy(self):\n \"\"\"Get the commands for destroying the player.\n\n Use carla.Client.apply_batch_sync() to actually destroy the sensor.\n\n Returns:\n list[carla.command]:\n \"\"\"\n commands = []\n for sensor in self._observation_sensors.values():\n commands.extend(sensor.destroy())\n commands.extend(self._lane_invasion_sensor.destroy())\n commands.append(carla.command.DestroyActor(self._actor))\n if self._surface is not None:\n import pygame\n pygame.quit()\n\n return commands\n\n def observation_spec(self):\n \"\"\"Get the observation spec.\n\n Returns:\n nested TensorSpec:\n \"\"\"\n return self._observation_spec\n\n def observation_desc(self):\n \"\"\"Get the description about the observation.\n\n Returns:\n nested str: each str corresponds to one TensorSpec from\n ``observatin_spec()``.\n \"\"\"\n return self._observation_desc\n\n def action_spec(self):\n \"\"\"Get the action spec.\n\n The action is a 4-D vector of [throttle, steer, brake, reverse], where\n throttle is in [-1.0, 1.0] (negative value is same as zero), steer is in\n [-1.0, 1.0], brake is in [-1.0, 1.0] (negative value is same as zero),\n and reverse is interpreted as a boolean value with values greater than\n 0.5 corrsponding to True.\n\n Returns:\n nested BoundedTensorSpec:\n \"\"\"\n return alf.BoundedTensorSpec([4],\n minimum=[-1., -1., -1., 0.],\n maximum=[1., 1., 1., 1.])\n\n def info_spec(self):\n \"\"\"Get the info spec.\"\"\"\n return self._info_spec\n\n def action_desc(self):\n \"\"\"Get the description about the action.\n\n Returns:\n nested str: each str corresponds to one TensorSpec from\n ``action_spec()``.\n \"\"\"\n return (\n \"4-D vector of [throttle, steer, brake, reverse], where \"\n \"throttle is in [-1.0, 1.0] (negative value is same as zero), \"\n \"steer is in [-1.0, 1.0], brake is in [-1.0, 1.0] (negative value \"\n \"is same as zero), and reverse is interpreted as a boolean value \"\n \"with values greater than 0.5 corrsponding to True.\")\n\n def reward_spec(self):\n \"\"\"Get the reward spec.\"\"\"\n return alf.TensorSpec([Player.REWARD_DIMENSION])\n\n def _get_goal(self):\n return _calculate_relative_position(self._actor.get_transform(),\n self._goal_location)\n\n def get_current_time_step(self, current_frame):\n \"\"\"Get the current time step for the player.\n\n Args:\n current_frame (int): current simulation frame no.\n Returns:\n TimeStep: all elements are ``np.ndarray`` or ``np.number``.\n \"\"\"\n obs = dict()\n for sensor_name, sensor in self._observation_sensors.items():\n obs[sensor_name] = sensor.get_current_observation(current_frame)\n obs['goal'] = self._get_goal()\n self._alf_world.update_actor_location(self._actor.id,\n self._actor.get_location())\n v = self._actor.get_velocity()\n obs['velocity'] = _calculate_relative_velocity(\n self._actor.get_transform(), _to_numpy_loc(v))\n self._current_distance = np.linalg.norm(obs['goal'])\n\n prev_loc = _to_numpy_loc(self._prev_location)\n curr_loc = _to_numpy_loc(self._actor.get_location())\n\n reward_vector = np.zeros(Player.REWARD_DIMENSION, np.float32)\n reward = 0.\n discount = 1.0\n info = OrderedDict(success=np.float32(0.0), collision=np.float32(0.0))\n\n # When the previous episode ends because of stucking at a collision with\n # another vehicle, it may get an additional collision event in the new frame\n # because the relocation of the car may happen after the simulation of the\n # moving. So we ignore the collision at the first step.\n self._collision = not np.all(\n obs['collision'] == 0) and not self._is_first_step\n if self._collision and not self._prev_collision:\n # We only report the first collision event among contiguous collision\n # events.\n info['collision'] = np.float32(1.0)\n logging.info(\"actor=%d frame=%d COLLISION\" % (self._actor.id,\n current_frame))\n self._collision_loc = curr_loc\n self._collision_frame = current_frame\n # We don't want the collision penalty to be too large if the player\n # cannot even get enough positive moving reward. So we cap the penalty\n # at ``max(0., self._episode_reward)``\n reward -= min(self._max_collision_penalty,\n max(0., self._episode_reward))\n reward_vector[Player.REWARD_COLLISION] = 1.\n\n if self._max_frame is None:\n step_type = ds.StepType.FIRST\n max_frames = math.ceil(\n self._route_length / self._min_speed / self._delta_seconds)\n self._max_frame = current_frame + max_frames\n elif (self._current_distance < self._success_distance_thresh\n and self._actor.get_velocity() == carla.Location(0., 0., 0.)):\n # TODO: include waypoint orientation as success critiria\n step_type = ds.StepType.LAST\n reward += self._success_reward\n reward_vector[Player.REWARD_SUCCESS] = 1.\n discount = 0.0\n info['success'] = np.float32(1.0)\n logging.info(\n \"actor=%d frame=%d SUCCESS\" % (self._actor.id, current_frame))\n elif current_frame >= self._max_frame:\n logging.info(\"actor=%d frame=%d FAILURE: out of time\" %\n (self._actor.id, current_frame))\n step_type = ds.StepType.LAST\n elif (self._collision_loc is not None\n and current_frame - self._collision_frame >\n self._max_stuck_at_collision_frames\n and np.linalg.norm(curr_loc - self._collision_loc) <\n self._stuck_at_collision_distance):\n logging.info(\"actor=%d frame=%d FAILURE: stuck at collision\" %\n (self._actor.id, current_frame))\n step_type = ds.StepType.LAST\n else:\n step_type = ds.StepType.MID\n\n if self._sparse_reward:\n current_index = self._navigation.get_next_waypoint_index()\n if step_type == ds.StepType.LAST and info['success'] == 1.0:\n # Since the episode is finished, we need to incorporate the final\n # progress towards the goal as reward to encourage stopping near the goal.\n distance_reward = (\n np.linalg.norm(self._intermediate_start -\n self._goal_location) -\n np.linalg.norm(curr_loc - self._goal_location))\n elif self._intermediate_goal_index < current_index:\n # This means that the car has passed the intermediate goal.\n # And we give it a reward which is equal to the distance it\n # travels.\n intermediate_goal = self._navigation.get_waypoint(\n self._intermediate_goal_index)\n distance_reward = np.linalg.norm(intermediate_goal -\n self._intermediate_start)\n self._intermediate_start = intermediate_goal\n self._intermediate_goal_index = min(\n self._intermediate_goal_index +\n self._sparse_reward_index_interval,\n self._navigation.num_waypoints - 1)\n else:\n goal0 = obs['navigation'][2] # This is about 10m ahead\n distance_reward = (np.linalg.norm(prev_loc - goal0) -\n np.linalg.norm(curr_loc - goal0))\n\n reward_vector[Player.REWARD_DISTANCE] = distance_reward\n if not self._allow_negative_distance_reward:\n distance_reward += self._unrecorded_distance_reward\n if distance_reward < 0:\n self._unrecorded_distance_reward = distance_reward\n distance_reward = 0\n else:\n self._unrecorded_distance_reward = 0\n reward += distance_reward\n\n obs['navigation'] = _calculate_relative_position(\n self._actor.get_transform(), obs['navigation'])\n\n self._done = step_type == ds.StepType.LAST\n self._episode_reward += reward\n\n reward_vector[Player.REWARD_OVERALL] = reward\n\n self._current_time_step = ds.TimeStep(\n step_type=step_type,\n reward=reward_vector,\n discount=np.float32(discount),\n observation=obs,\n prev_action=self._prev_action,\n env_info=info)\n return self._current_time_step\n\n def act(self, action):\n \"\"\"Generate the carla command for taking the given action.\n\n Use ``carla.Client.apply_batch_sync()`` to actually destroy the sensor.\n\n Args:\n action (nested np.ndarray):\n Returns:\n list[carla.command]:\n \"\"\"\n self._prev_collision = self._collision\n self._prev_location = self._actor.get_location()\n self._is_first_step = False\n if self._done:\n return self.reset()\n self._control.throttle = max(float(action[0]), 0.0)\n self._control.steer = float(action[1])\n self._control.brake = max(float(action[2]), 0.0)\n self._control.reverse = bool(action[3] > 0.5)\n self._prev_action = action\n\n return [carla.command.ApplyVehicleControl(self._actor, self._control)]\n\n def render(self, mode):\n \"\"\"Render the simulation.\n\n Args:\n mode (str): one of ['rgb_array', 'human']\n Returns:\n one of the following:\n - None: if mode is 'human'\n - np.ndarray: the image of shape [height, width, channeles] if\n mode is 'rgb_array'\n \"\"\"\n import pygame\n if self._surface is None:\n pygame.init()\n pygame.font.init()\n self._clock = pygame.time.Clock()\n if self._camera_sensor:\n height, width = self._camera_sensor.observation_spec(\n ).shape[1:3]\n height = max(height, MINIMUM_RENDER_HEIGHT)\n width = max(width, MINIMUM_RENDER_WIDTH)\n else:\n height = MINIMUM_RENDER_HEIGHT\n width = MINIMUM_RENDER_WIDTH\n if mode == 'human':\n self._surface = pygame.display.set_mode(\n (width, height), pygame.HWSURFACE | pygame.DOUBLEBUF)\n else:\n self._surface = pygame.Surface((width, height))\n\n if mode == 'human':\n self._clock.tick_busy_loop(1000)\n\n if self._camera_sensor:\n self._camera_sensor.render(self._surface)\n obs = self._current_time_step.observation\n np_precision = np.get_printoptions()['precision']\n np.set_printoptions(precision=1)\n info_text = [\n 'FPS: %6.2f' % self._clock.get_fps(),\n 'GPS: (%7.4f, %8.4f, %5.2f)' % tuple(obs['gnss'].tolist()),\n 'Goal: (%7.1f, %8.1f, %5.1f)' % tuple(obs['goal'].tolist()),\n 'Ahead: (%7.1f, %8.1f, %5.1f)' % tuple(\n obs['navigation'][2].tolist()),\n 'Distance: %7.2f' % np.linalg.norm(obs['goal']),\n 'Velocity: (%4.1f, %4.1f, %4.1f) km/h' % tuple(\n (3.6 * obs['velocity']).tolist()),\n 'Acceleration: (%4.1f, %4.1f, %4.1f)' % tuple(\n obs['imu'][0:3].tolist()),\n 'Compass: %5.1f' % math.degrees(float(obs['imu'][6])),\n 'Throttle: %4.2f' % self._control.throttle,\n 'Brake: %4.2f' % self._control.brake,\n 'Steer: %4.2f' % self._control.steer,\n 'Reverse: %4s' % self._control.reverse,\n 'Reward: (%s)' % self._current_time_step.reward,\n ]\n np.set_printoptions(precision=np_precision)\n self._draw_text(info_text)\n\n if mode == 'human':\n pygame.display.flip()\n elif mode == 'rgb_array':\n # (x, y, c) => (y, x, c)\n return np.transpose(\n pygame.surfarray.array3d(self._surface), (1, 0, 2))\n else:\n raise ValueError(\"Unsupported render mode: %s\" % mode)\n\n def _draw_text(self, texts):\n import os\n import pygame\n if self._font is None:\n font_name = 'courier' if os.name == 'nt' else 'mono'\n fonts = [x for x in pygame.font.get_fonts() if font_name in x]\n default_font = 'ubuntumono'\n mono = default_font if default_font in fonts else fonts[0]\n mono = pygame.font.match_font(mono)\n self._font = pygame.font.Font(mono, 12 if os.name == 'nt' else 14)\n info_surface = pygame.Surface((240, 240))\n info_surface.set_alpha(100)\n self._surface.blit(info_surface, (0, 0))\n v_offset = 4\n for item in texts:\n surface = self._font.render(item, True, (255, 255, 255))\n self._surface.blit(surface, (8, v_offset))\n v_offset += 18\n\n\ndef _exec(command):\n stream = os.popen(command)\n ret = stream.read()\n stream.close()\n return ret\n\n\ngin.constant('CarlaEnvironment.REWARD_DIMENSION', Player.REWARD_DIMENSION)\n\n\[email protected]\nclass CarlaServer(object):\n \"\"\"CarlaServer for doing the simulation.\"\"\"\n\n def __init__(self,\n rpc_port=2000,\n streaming_port=2001,\n docker_image=\"horizonrobotics/alf:0.0.3-carla\",\n quality_level=\"Low\",\n carla_root=\"/home/carla\",\n use_opengl=True):\n \"\"\"\n\n Args:\n rpc_port (int): port for RPC\n streaming_port (int): port for data streaming\n docker_image (str): If provided, will use the docker image to start\n the Carla server. Some valid images are \"carlasim/carla:0.9.9\"\n and \"horionrobotics/alf:0.0.3-carla\"\n quality_level (str): one of ['Low', 'Epic']. See the explanation at\n `<https://carla.readthedocs.io/en/latest/adv_rendering_options/#graphics-quality>`_\n carla_root (str): directorcy where CarlaUE4.sh is in. The default\n value is correct for using docker image. If not using docker\n image, make sure you provide the correct path. This is the directory\n where you unzipped the file you downloaded from\n `<https://github.com/carla-simulator/carla/releases/tag/0.9.9>`_.\n use_opengl (bool): the default graphics engine of Carla is Vulkan,\n which is supposed to be better than OpenGL. However, Vulkan is not\n always available. It may not be installed or the nvidia driver does\n not support vulkan.\n \"\"\"\n assert quality_level in ['Low', 'Epic'], \"Unknown quality level\"\n use_docker = (not alf.utils.common.is_inside_docker_container()\n and docker_image)\n opengl = \"-opengl\" if use_opengl else \"\"\n if use_docker:\n dev = os.environ.get('CUDA_VISIBLE_DEVICES')\n if not dev:\n dev = 'all'\n command = (\"docker run -d \"\n \"-p {rpc_port}:{rpc_port} \"\n \"-p {streaming_port}:{streaming_port} \"\n \"-u carla \"\n \"--rm --gpus device=\" + dev + \" \" + docker_image +\n \" {carla_root}/CarlaUE4.sh \"\n \"--carla-rpc-port={rpc_port} \"\n \"--carla-streaming-port={streaming_port} \"\n \"--quality-level={quality_level} {opengl}\")\n else:\n assert os.path.exists(carla_root + \"/CarlaUE4.sh\"), (\n \"%s/CarlaUE4.sh \"\n \"does not exist. Please provide correct value for `carla_root`\"\n % carla_root)\n # We do not use CarlaUE4.sh here in order to get the actual Carla\n # server processs so that we can kill it.\n command = (\n \"{carla_root}/CarlaUE4/Binaries/Linux/CarlaUE4-Linux-Shipping \"\n \"CarlaUE4 \" # perhaps most system does not have vulkan support, so we use opengl\n \"-carla-rpc-port={rpc_port} \"\n \"-carla-streaming-port={streaming_port} \"\n \"-quality-level={quality_level} {opengl}\")\n\n command = command.format(\n rpc_port=rpc_port,\n streaming_port=streaming_port,\n quality_level=quality_level,\n carla_root=carla_root,\n opengl=opengl)\n\n logging.info(\"Starting Carla server: %s\" % command)\n self._container_id = None\n self._process = None\n if use_docker:\n self._container_id = _exec(command)\n assert self._container_id, \"Fail to start container\"\n logging.info(\"Starting carla in container %s\" % self._container_id)\n else:\n new_env = os.environ.copy()\n new_env['SDL_VIDEODRIVER'] = 'offscreen'\n self._process = subprocess.Popen(\n command.split(),\n stdout=sys.stdout,\n stderr=sys.stderr,\n env=new_env)\n\n def stop(self):\n \"\"\"Stop the carla server.\"\"\"\n if self._container_id:\n command = \"docker kill %s\" % self._container_id\n logging.info(\"Stopping Carla server: %s\" % command)\n _exec(command)\n self._container_id = None\n if self._process:\n self._process.kill()\n self._process.communicate()\n self._process = None\n\n def __del__(self):\n self.stop()\n\n\[email protected]\nclass CarlaEnvironment(AlfEnvironment):\n \"\"\"Carla simulation environment.\n\n In order to use it, you need to either download a valid docker image or\n a Carla package.\n \"\"\"\n\n def __init__(self,\n batch_size,\n map_name,\n vehicle_filter='vehicle.*',\n walker_filter='walker.pedestrian.*',\n num_other_vehicles=0,\n num_walkers=0,\n percentage_walkers_running=0.1,\n percentage_walkers_crossing=0.1,\n global_distance_to_leading_vehicle=2.0,\n use_hybrid_physics_mode=True,\n safe=True,\n step_time=0.05):\n \"\"\"\n Args:\n batch_size (int): the number of learning vehicles.\n map_name (str): the name of the map (e.g. \"Town01\")\n vehicle_filter (str): the filter for getting vehicle blueprints.\n walker_filter (str): the filter for getting walker blueprints.\n num_other_vehicles (int): the number of autopilot vehicles\n num_walkers (int): the number of walkers\n global_distance_to_leading_vehicle (str): the autopiloted vehicles\n will try to keep such distance from other vehicles.\n percentage_walkers_running (float): percent of running walkers\n percentage_walkers_crossing (float): percent of walkers walking\n across the road.\n use_hybrid_physics_mode (bool): If true, the autopiloted vehicle will\n not use physics for simulation if it is far from other vehicles.\n safe (bool): avoid spawning vehicles prone to accidents.\n step_time (float): how many seconds does each step of simulation represents.\n \"\"\"\n super().__init__()\n\n with _get_unused_port(2000, n=2) as (rpc_port, streaming_port):\n self._server = CarlaServer(rpc_port, streaming_port)\n\n self._batch_size = batch_size\n self._num_other_vehicles = num_other_vehicles\n self._num_walkers = num_walkers\n self._percentage_walkers_running = percentage_walkers_running\n self._percentage_walkers_crossing = percentage_walkers_crossing\n\n self._world = None\n try:\n for i in range(20):\n try:\n logging.info(\n \"Waiting for server to start. Try %d\" % (i + 1))\n self._client = carla.Client(\"localhost\", rpc_port)\n self._world = self._client.load_world(map_name)\n break\n except RuntimeError:\n continue\n finally:\n if self._world is None:\n self._server.stop()\n assert self._world is not None, \"Fail to start server.\"\n\n logging.info(\"Server started.\")\n\n self._traffic_manager = None\n if self._num_other_vehicles + self._num_walkers > 0:\n with _get_unused_port(8000, n=1) as tm_port:\n self._traffic_manager = self._client.get_trafficmanager(\n tm_port)\n self._traffic_manager.set_hybrid_physics_mode(\n use_hybrid_physics_mode)\n self._traffic_manager.set_global_distance_to_leading_vehicle(\n global_distance_to_leading_vehicle)\n\n self._client.set_timeout(20)\n self._alf_world = World(self._world)\n self._safe = safe\n self._vehicle_filter = vehicle_filter\n self._walker_filter = walker_filter\n\n settings = self._world.get_settings()\n settings.synchronous_mode = True\n settings.fixed_delta_seconds = step_time\n\n self._world.apply_settings(settings)\n self._map_name = map_name\n\n self._spawn_vehicles()\n self._spawn_walkers()\n\n self._observation_spec = self._players[0].observation_spec()\n self._action_spec = self._players[0].action_spec()\n self._env_info_spec = self._players[0].info_spec()\n self._reward_spec = self._players[0].reward_spec()\n\n # metadata property is required by video recording\n self.metadata = {\n 'render.modes': ['human', 'rgb_array'],\n 'video.frames_per_second': 1 / step_time\n }\n\n def _spawn_vehicles(self):\n blueprints = self._world.get_blueprint_library().filter(\n self._vehicle_filter)\n assert len(\n blueprints) > 0, \"Cannot find vehicle '%s'\" % self._vehicle_filter\n if self._safe:\n blueprints = [\n x for x in blueprints\n if int(x.get_attribute('number_of_wheels')) == 4\n ]\n blueprints = [x for x in blueprints if not x.id.endswith('isetta')]\n blueprints = [\n x for x in blueprints if not x.id.endswith('carlacola')\n ]\n blueprints = [\n x for x in blueprints if not x.id.endswith('cybertruck')\n ]\n blueprints = [x for x in blueprints if not x.id.endswith('t2')]\n assert len(\n blueprints\n ) > 0, \"Cannot find safe vehicle '%s'\" % self._vehicle_filter\n\n spawn_points = self._world.get_map().get_spawn_points()\n number_of_spawn_points = len(spawn_points)\n\n num_vehicles = self._batch_size + self._num_other_vehicles\n if num_vehicles <= number_of_spawn_points:\n random.shuffle(spawn_points)\n else:\n raise ValueError(\n \"requested %d vehicles, but could only find %d spawn points\" %\n (self._batch_size, number_of_spawn_points))\n\n commands = []\n for i, transform in enumerate(spawn_points[:num_vehicles]):\n blueprint = random.choice(blueprints)\n if blueprint.has_attribute('color'):\n color = random.choice(\n blueprint.get_attribute('color').recommended_values)\n blueprint.set_attribute('color', color)\n if blueprint.has_attribute('driver_id'):\n driver_id = random.choice(\n blueprint.get_attribute('driver_id').recommended_values)\n blueprint.set_attribute('driver_id', driver_id)\n if i < self._batch_size:\n blueprint.set_attribute('role_name', 'hero')\n else:\n blueprint.set_attribute('role_name', 'autopilot')\n command = carla.command.SpawnActor(blueprint, transform)\n if i >= self._batch_size:\n # managed by traffic manager\n command = command.then(\n carla.command.SetAutopilot(\n carla.command.FutureActor, True,\n self._traffic_manager.get_port()))\n commands.append(command)\n\n self._players = []\n self._other_vehicles = []\n responses = self._client.apply_batch_sync(commands, True)\n for i, response in enumerate(responses):\n if response.error:\n logging.error(response.error)\n continue\n vehicle = self._world.get_actor(response.actor_id)\n if i < self._batch_size:\n self._players.append(Player(vehicle, self._alf_world))\n else:\n self._other_vehicles.append(vehicle)\n self._alf_world.add_actor(vehicle)\n self._alf_world.update_actor_location(vehicle.id,\n spawn_points[i].location)\n\n assert len(self._players) + len(\n self._other_vehicles) == num_vehicles, (\n \"Fail to create %s vehicles\" % num_vehicles)\n\n def _spawn_walkers(self):\n walker_blueprints = self._world.get_blueprint_library().filter(\n self._walker_filter)\n\n # 1. take all the random locations to spawn\n spawn_points = []\n for _ in range(self._num_walkers):\n spawn_point = carla.Transform()\n loc = self._world.get_random_location_from_navigation()\n if loc != None:\n spawn_point.location = loc\n spawn_points.append(spawn_point)\n\n # 2. we spawn the walker object\n commands = []\n walker_speeds = []\n for spawn_point in spawn_points:\n walker_bp = random.choice(walker_blueprints)\n # set as not invincible\n if walker_bp.has_attribute('is_invincible'):\n walker_bp.set_attribute('is_invincible', 'false')\n # set the max speed\n if walker_bp.has_attribute('speed'):\n if (random.random() > self._percentage_walkers_running):\n # walking\n walker_speeds.append(\n walker_bp.get_attribute('speed').recommended_values[1])\n else:\n # running\n walker_speeds.append(\n walker_bp.get_attribute('speed').recommended_values[2])\n else:\n logging.info(\"Walker has no speed\")\n walker_speeds.append(0.0)\n commands.append(carla.command.SpawnActor(walker_bp, spawn_point))\n responses = self._client.apply_batch_sync(commands, True)\n walker_speeds2 = []\n self._walkers = []\n for response, walker_speed, spawn_point in zip(\n responses, walker_speeds, spawn_points):\n if response.error:\n logging.error(\n \"%s: %s\" % (response.error, spawn_point.location))\n continue\n walker = self._world.get_actor(response.actor_id)\n self._walkers.append({\"walker\": walker})\n walker_speeds2.append(walker_speed)\n walker_speeds = walker_speeds2\n\n # 3. we spawn the walker controller\n commands = []\n walker_controller_bp = self._world.get_blueprint_library().find(\n 'controller.ai.walker')\n for walker in self._walkers:\n commands.append(\n carla.command.SpawnActor(walker_controller_bp,\n carla.Transform(),\n walker[\"walker\"].id))\n responses = self._client.apply_batch_sync(commands, True)\n for response, walker in zip(responses, self._walkers):\n if response.error:\n logging.error(response.error)\n continue\n walker[\"controller\"] = self._world.get_actor(response.actor_id)\n\n # wait for a tick to ensure client receives the last transform of the walkers we have just created\n self._world.tick()\n\n # 5. initialize each controller and set target to walk to (list is [controler, actor, controller, actor ...])\n # set how many pedestrians can cross the road\n self._world.set_pedestrians_cross_factor(\n self._percentage_walkers_crossing)\n for walker, walker_speed in zip(self._walkers, walker_speeds):\n # start walker\n walker['controller'].start()\n # set walk to random point\n location = self._world.get_random_location_from_navigation()\n walker['controller'].go_to_location(location)\n # max speed\n walker['controller'].set_max_speed(float(walker_speed))\n self._alf_world.add_actor(walker['walker'])\n self._alf_world.update_actor_location(walker['walker'].id,\n location)\n\n def _clear(self):\n if self._world is None:\n return\n if self._players:\n commands = []\n for player in self._players:\n commands.extend(player.destroy())\n for response in self._client.apply_batch_sync(commands, True):\n if response.error:\n logging.error(response.error)\n self._players.clear()\n commands = []\n for vehicle in self._other_vehicles:\n commands.append(carla.command.DestroyActor(vehicle))\n for walker in self._walkers:\n walker['controller'].stop()\n commands.append(carla.command.DestroyActor(walker['controller']))\n commands.append(carla.command.DestroyActor(walker['walker']))\n\n if commands:\n for response in self._client.apply_batch_sync(commands, True):\n if response.error:\n logging.error(response.error)\n self._other_vehicles.clear()\n self._walkers.clear()\n\n @property\n def batched(self):\n return True\n\n @property\n def batch_size(self):\n return self._batch_size\n\n def env_info_spec(self):\n return self._env_info_spec\n\n def observation_spec(self):\n return self._observation_spec\n\n def observation_desc(self):\n return self._players[0].observation_desc()\n\n def action_spec(self):\n return self._action_spec\n\n def action_desc(self):\n return self._players[0].action_desc()\n\n def reward_spec(self):\n return self._reward_spec\n\n def close(self):\n self._clear()\n self._server.stop()\n\n def __del__(self):\n self.close()\n\n @property\n def players(self):\n \"\"\"Get all the players in the environment.\n\n Returns:\n list[Player]:\n \"\"\"\n return self._players\n\n def render(self, mode):\n return self._players[0].render(mode)\n\n def _step(self, action):\n action = alf.nest.map_structure(lambda x: x.cpu().numpy(), action)\n commands = []\n for player, act in zip(self._players, action):\n commands.extend(player.act(act))\n for response in self._client.apply_batch_sync(commands):\n if response.error:\n logging.error(response.error)\n self._current_frame = self._world.tick()\n for vehicle in self._other_vehicles:\n self._alf_world.update_actor_location(vehicle.id,\n vehicle.get_location())\n for walker in self._walkers:\n actor = walker['walker']\n self._alf_world.update_actor_location(actor.id,\n actor.get_location())\n\n return self._get_current_time_step()\n\n def _get_current_time_step(self):\n time_step = [\n player.get_current_time_step(self._current_frame)\n for player in self._players\n ]\n time_step = alf.nest.map_structure(lambda *a: np.stack(a), *time_step)\n time_step = alf.nest.map_structure(torch.as_tensor, time_step)\n\n common.check_numerics(time_step)\n\n return time_step._replace(env_id=torch.arange(self._batch_size))\n\n def _reset(self):\n commands = []\n for player in self._players:\n commands.extend(player.reset())\n for response in self._client.apply_batch_sync(commands):\n if response.error:\n logging.error(response.error)\n self._current_frame = self._world.tick()\n return self._get_current_time_step()\n\n\[email protected](whitelist=['wrappers'])\ndef load(map_name, batch_size, wrappers=[]):\n \"\"\"Load CarlaEnvironment\n\n Args:\n map_name (str): name of the map. Currently available maps are:\n 'Town01, Town02', 'Town03', 'Town04', 'Town05', 'Town06', 'Town07',\n and 'Town10HD'\n batch_size (int): the number of vehicles in the simulation.\n wrappers (list[AlfEnvironmentBaseWrapper]): environment wrappers\n Returns:\n CarlaEnvironment\n \"\"\"\n env = CarlaEnvironment(batch_size, map_name)\n for wrapper in wrappers:\n env = wrapper(env)\n return env\n\n\nload.batched = True\n", "# Copyright (c) 2019 Horizon Robotics. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom collections import deque\nimport itertools\nfrom copy import copy\nimport numpy as np\nfrom PIL import Image\nimport gym\nfrom gym import spaces\n\n# See https://github.com/openai/large-scale-curiosity/blob/ \\\n# 0c3d179fd61ee46233199d0891c40fbe7964d3aa/wrappers.py#L155-L238\n\n\nclass MarioXReward(gym.Wrapper):\n \"\"\"\n Wrap mario environment and use X-axis coordinate increment as reward\n\n ```\n max_x = 0 if initial or upgrade_to_new_level\n current_x = xscrollHi * 256 + xscrollLo\n reward = current_x - max_x if current_x > max_x else 0\n max_x = current_x if current_x > max_x else max_x\n ```\n \"\"\"\n\n def __init__(self, env):\n gym.Wrapper.__init__(self, env)\n self.current_level = [0, 0]\n self.visited_levels = set()\n self.visited_levels.add(tuple(self.current_level))\n self.current_max_x = 0.\n\n def reset(self):\n ob = self.env.reset()\n self.current_level = [0, 0]\n self.visited_levels = set()\n self.visited_levels.add(tuple(self.current_level))\n self.current_max_x = 0.\n return ob\n\n def step(self, action):\n ob, reward, done, info = self.env.step(action)\n levellow, levelhigh, xscrollHi, xscrollLo = \\\n info[\"levelLo\"], info[\"levelHi\"], \\\n info[\"xscrollHi\"], info[\"xscrollLo\"]\n currentx = xscrollHi * 256 + xscrollLo\n new_level = [levellow, levelhigh]\n if new_level != self.current_level:\n self.current_level = new_level\n self.current_max_x = 0.\n reward = 0.\n self.visited_levels.add(tuple(self.current_level))\n else:\n if currentx > self.current_max_x:\n delta = currentx - self.current_max_x\n self.current_max_x = currentx\n reward = delta\n else:\n reward = 0.\n if done:\n info[\"levels\"] = copy(self.visited_levels)\n info[\"retro_episode\"] = dict(levels=copy(self.visited_levels))\n\n return ob, reward, done, info\n\n\nclass LimitedDiscreteActions(gym.ActionWrapper):\n \"\"\"\n Wrap mario environment and make it use discrete actions.\n Map available button combinations to discrete actions\n eg:\n 0 -> None\n 1 -> UP\n 2 -> DOWN\n ...\n k -> A\n ...\n m -> A + LEFT\n ...\n n -> B + UP\n ...\n \"\"\"\n\n BUTTONS = {\"A\", \"B\"}\n SHOULDERS = {\"L\", \"R\"}\n\n def __init__(self, env, all_buttons):\n gym.ActionWrapper.__init__(self, env)\n # 'B', None, 'SELECT', 'START', 'UP', 'DOWN', 'LEFT', 'RIGHT', 'A'\n self._num_buttons = len(all_buttons)\n button_keys = {\n i\n for i, b in enumerate(all_buttons) if b in self.BUTTONS\n }\n buttons = [(), *zip(button_keys),\n *itertools.combinations(button_keys, 2)]\n # 'UP', 'DOWN', 'LEFT', 'RIGHT'\n arrows = [(), (4, ), (5, ), (6, ), (7, )]\n acts = []\n acts += arrows\n acts += buttons[1:]\n acts += [a + b for a in arrows[-2:] for b in buttons[1:]]\n self._actions = acts\n self.action_space = gym.spaces.Discrete(len(self._actions))\n\n def action(self, a):\n mask = np.zeros(self._num_buttons)\n for i in self._actions[a]:\n mask[i] = 1\n return mask\n\n\nclass ProcessFrame84(gym.ObservationWrapper):\n \"\"\"\n Resize frame from original resolution to 84x84 or\n resize to 84x110 and then crop to 84x84\n \"\"\"\n\n def __init__(self, env, crop=True):\n self.crop = crop\n super(ProcessFrame84, self).__init__(env)\n self.observation_space = gym.spaces.Box(\n low=0, high=255, shape=(84, 84, 1), dtype=np.uint8)\n\n def observation(self, obs):\n return ProcessFrame84.process(obs, crop=self.crop)\n\n @staticmethod\n def process(frame, crop=True):\n if frame.size == 210 * 160 * 3:\n img = np.reshape(frame, [210, 160, 3]).astype(np.float32)\n elif frame.size == 250 * 160 * 3:\n img = np.reshape(frame, [250, 160, 3]).astype(np.float32)\n elif frame.size == 224 * 240 * 3: # mario resolution\n img = np.reshape(frame, [224, 240, 3]).astype(np.float32)\n else:\n assert False, \"Unknown resolution.\" + str(frame.size)\n img = img[:, :, 0] * 0.299 + img[:, :, 1] * 0.587 + img[:, :, 2] * 0.114\n size = (84, 110 if crop else 84)\n resized_screen = np.array(\n Image.fromarray(img).resize(size, resample=Image.BILINEAR),\n dtype=np.uint8)\n x_t = resized_screen[18:102, :] if crop else resized_screen\n x_t = np.reshape(x_t, [84, 84, 1])\n return x_t.astype(np.uint8)\n\n\nclass FrameFormat(gym.Wrapper):\n \"\"\"\n Format frame to specified data_format\n\n Args:\n data_format: Data format for frame\n `channels_first` for CHW and `channels_last` for HWC\n \"\"\"\n\n def __init__(self, env, data_format='channels_last'):\n gym.Wrapper.__init__(self, env)\n data_format = data_format.lower()\n if data_format not in {'channels_first', 'channels_last'}:\n raise ValueError('The `data_format` argument must be one of '\n '\"channels_first\", \"channels_last\". Received: ' +\n str(data_format))\n self._transpose = False\n obs_shape = env.observation_space.shape\n if data_format == 'channels_first':\n self._transpose = True\n obs_shape = (obs_shape[-1], ) + (obs_shape[:-1])\n self.observation_space = spaces.Box(\n low=0,\n high=255,\n shape=obs_shape,\n dtype=env.observation_space.dtype)\n\n def reset(self):\n ob = self.env.reset()\n return self._get_ob(ob)\n\n def step(self, action):\n ob, reward, done, info = self.env.step(action)\n ob = self._get_ob(ob)\n return ob, reward, done, info\n\n def _get_ob(self, ob):\n import numpy as np\n if self._transpose:\n return np.transpose(ob, (2, 0, 1))\n return ob\n" ]
[ [ "numpy.array", "numpy.linalg.norm", "numpy.sin", "numpy.matmul", "numpy.zeros", "torch.arange", "numpy.set_printoptions", "numpy.get_printoptions", "numpy.float32", "numpy.stack", "numpy.sqrt", "numpy.cos", "numpy.all" ], [ "numpy.transpose", "numpy.reshape", "numpy.zeros" ] ]
rougier/JCGT-2014a
[ "78793d05a145af79d9cacf87a6e1ffaaea501394" ]
[ "demo-continuous.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# -----------------------------------------------------------------------------\n# Copyright (C) 2013 Nicolas P. Rougier. All rights reserved.\n# \n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# \n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n# \n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# \n# THIS SOFTWARE IS PROVIDED BY NICOLAS P. ROUGIER ''AS IS'' AND ANY EXPRESS OR\n# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF\n# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO\n# EVENT SHALL NICOLAS P. ROUGIER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,\n# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF\n# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n# \n# The views and conclusions contained in the software and documentation are\n# those of the authors and should not be interpreted as representing official\n# policies, either expressed or implied, of Nicolas P. Rougier.\n# -----------------------------------------------------------------------------\nimport numpy as np\nimport OpenGL.GL as gl\nfrom transforms import ortho\n\n# -------------------------------------\ndef on_display():\n gl.glClearColor(1,1,1,1)\n gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)\n collection.draw(uniforms= {'u_projection': u_projection,\n 'u_model' : u_model,\n 'u_view' : u_view})\n glut.glutSwapBuffers()\n\n# -------------------------------------\ndef on_reshape(width, height):\n gl.glViewport(0, 0, width, height)\n u_projection[...] = ortho(0,width,0,height,-1,1)\n collection.scale = min(width, height)\n\n# -------------------------------------\ndef on_keyboard(key, x, y):\n if key == '\\033': sys.exit()\n\n# -------------------------------------\ndef on_special( key, x, y ):\n if key == glut.GLUT_KEY_LEFT:\n collection.dash_phase += 0.05\n elif key == glut.GLUT_KEY_RIGHT:\n collection.dash_phase -= 0.05\n glut.glutPostRedisplay()\n\n\n# -------------------------------------\nif __name__ == '__main__':\n import sys\n import OpenGL.GLUT as glut\n\n from curves import curve3_bezier, curve4_bezier\n from dash_lines_2D import DashLines\n\n glut.glutInit(sys.argv)\n # HiDPI support for retina display\n # This requires glut from http://iihm.imag.fr/blanch/software/glut-macosx/\n if sys.platform == 'darwin':\n import ctypes\n from OpenGL import platform\n try:\n glutInitDisplayString = platform.createBaseFunction( \n 'glutInitDisplayString', dll=platform.GLUT, resultType=None, \n argTypes=[ctypes.c_char_p],\n doc='glutInitDisplayString( ) -> None', \n argNames=() )\n text = ctypes.c_char_p(\"rgba stencil double samples=8 hidpi\")\n glutInitDisplayString(text)\n except:\n pass\n\n glut.glutInitDisplayMode(glut.GLUT_DOUBLE | glut.GLUT_RGB | glut.GLUT_DEPTH)\n glut.glutInitWindowSize(1000, 1000)\n glut.glutCreateWindow(\"Dashed & antialiased bezier curve [Arrow keys change offset]\")\n glut.glutDisplayFunc(on_display)\n glut.glutReshapeFunc(on_reshape)\n glut.glutKeyboardFunc(on_keyboard)\n glut.glutSpecialFunc(on_special)\n\n # Some init\n gl.glBlendFunc( gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA )\n gl.glDisable( gl.GL_DEPTH_TEST )\n gl.glEnable( gl.GL_BLEND )\n gl.glClearColor(1.0,1.0,1.0,1.0)\n u_projection = np.eye(4).astype( np.float32 )\n u_view = np.eye(4).astype( np.float32 )\n u_model = np.eye(4).astype( np.float32 )\n\n collection = DashLines()\n\n # ---------------------------------\n points = np.array([[.1, .6], [.5, 1.], [.9, .6]])\n vertices = curve3_bezier(*points)\n\n closed = False\n collection.append(vertices, color=(0,0,0,1), linewidth=104,\n dash_pattern = 'solid', linecaps=('>','<'), closed=closed)\n collection.append(vertices, color=(1,1,1,1), linewidth=102,\n dash_pattern = 'solid', linecaps=('>','<'), closed=closed)\n collection.append(vertices, color=(0.75,0.75,1.00,1.00), linewidth=100,\n dash_pattern = 'dashed', dash_caps=('>','<'),\n linecaps=('>','<'), closed=closed)\n\n\n # ---------------------------------\n vertices = curve3_bezier(*(points + [0, -0.4]))\n collection.append(vertices, color=(0,0,0,1), linewidth=104,\n dash_pattern = 'solid', linecaps=('=','='), closed=closed)\n collection.append(vertices, color=(1,1,1,1), linewidth=102,\n dash_pattern = 'solid', linecaps=('=','='), closed=closed)\n collection.append( vertices, color=(0.75,0.75,1.00,1.0),\n linewidth=100, linecaps = ('|','|'), closed=closed,\n dash_pattern = 'custom', dash_caps=('|','|') )\n\n # ---------------------------------\n vertices = curve3_bezier(*(points + [0, -0.2]))\n collection.append(vertices, color=(0,0,0,1), linewidth=104,\n dash_pattern = 'solid', linecaps=('o','o'), closed=closed)\n collection.append(vertices, color=(1,1,1,1), linewidth=102,\n dash_pattern = 'solid', linecaps=('o','o'), closed=closed)\n collection.append( vertices, color=(0.75,0.75,1.00,1.0),\n linewidth=100, linecaps = ('o','o'), closed=closed,\n dash_pattern = 'densely dotted', dash_caps=('o','o') )\n\n\n glut.glutMainLoop()\n" ]
[ [ "numpy.array", "numpy.eye" ] ]
rhambach/TEMareels
[ "92a907f483baeb919dd485895c56454f0b552c76" ]
[ "tools/remove_stripes.py" ]
[ "\"\"\"\n IMPLEMENTATION:\n - crude method for removing periodic noise in images recorded \n on Tietz CMOS slave camera in wq-mode\n - integrates over several lines (e.g. 10x4096) of noise and \n substracts signal from each line in sector\n \n Copyright (c) 2013, pwachsmuth, rhambach\n This file is part of the TEMareels package and released\n under the MIT-Licence. See LICENCE file for details.\n\n\"\"\"\nimport numpy as np\nimport matplotlib.pylab as plt\n\ndef remove_stripes(image, intwidth=10, intstart=[0,1025,2900,3800], \n sector_width=1024, mask = None, verbosity = 0):\n \n image = np.asarray(image)\n old_image = image.copy(); \n \n offset = 0;\n for j in range(0,4):\n ref_line = old_image[:,intstart[j]:intstart[j]+intwidth].sum(axis=1)*(1./(intwidth));\n #ref_line[ref_line > thresh] = 0;\n imax = ref_line.argmax();\n if mask is not None:\n ref_line[mask] = 0;\n for i in range(offset,offset+sector_width):\n image[:,i] = image[:,i]-ref_line;\n offset += sector_width;\n #print offset \n image[:,0:5]= image[:,-5:] = 0; \n if verbosity > 0:\n plt.title(\"Remove Stripes: difference between old and new image\");\n plt.imshow(image - old_image, aspect='auto')\n plt.show();\n return image;\n\n \n # -- main ----------------------------------------\nif __name__ == '__main__':\n import TEMareels.tools.tifffile as tiff\n from TEMareels.tools import tvips\n\n\n image_file = '../tests/wqmap.tif';\n image = tiff.imread(image_file).astype(float);\n binning = 8;\n intstart= np.array([0,1025,2900,3800])/binning;\n \n img = remove_stripes(image, intwidth=100/binning, \n intstart=intstart, sector_width=1024/binning, verbosity=1);\n \n #outfile = \"script_test_.tif\";\n #tvips.write_tiff(img, outfile);\n \n \n \n" ]
[ [ "numpy.array", "numpy.asarray", "matplotlib.pylab.show", "matplotlib.pylab.title", "matplotlib.pylab.imshow" ] ]
pyronear/pyro-dataset
[ "b6445f6051058f20f2fc821040ec3705dc60464c" ]
[ "test/test_datasets.py" ]
[ "# Copyright (C) 2021, Pyronear contributors.\n\n# This program is licensed under the GNU Affero General Public License version 3.\n# See LICENSE or go to <https://www.gnu.org/licenses/agpl-3.0.txt> for full license details.\n\nimport unittest\nimport tempfile\nfrom pathlib import Path\nimport json\nfrom PIL.Image import Image\nimport pandas as pd\nimport random\nimport requests\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torchvision.transforms import transforms\nfrom torchvision.datasets import VisionDataset\n\nfrom pyrodataset.wildfire import WildFireDataset, WildFireSplitter, computeSubSet\n\n\ndef generate_wildfire_dataset_fixture():\n random.seed(42)\n df = pd.DataFrame(columns=['imgFile', 'fire_id', 'fire'])\n for i in range(974):\n df = df.append({'imgFile': str(i).zfill(4) + '.jpg', 'fire_id': float(random.randint(1, 100)),\n 'fire': float(random.randint(0, 1))}, ignore_index=True)\n\n return df\n\n\ndef generate_wildfire_subsampler_dataset_fixture():\n df = pd.DataFrame(columns=['exploitable', 'fire', 'sequence', 'clf_confidence',\n 'loc_confidence', 'x', 'y', 't', 'stateStart',\n 'stateEnd', 'imgFile', 'fire_id', 'fBase'])\n for b in range(10):\n x = random.uniform(200, 500)\n y = random.uniform(200, 500)\n t = random.uniform(0, 100)\n start = random.randint(0, 200)\n end = random.randint(start + 11, 400)\n base = str(b) + '.mp4'\n imgsNb = random.sample(range(start, end), 10)\n imgsNb.sort()\n imgs = [str(b) + '_frame' + str(i) + '.png' for i in imgsNb]\n fire_id = float(random.randint(1, 100))\n fire = float(random.randint(0, 1))\n for i in range(10):\n df = df.append({'exploitable': True, 'fire': fire, 'sequence': 0,\n 'clf_confidence': 0, 'loc_confidence': 0, 'x': x, 'y': y, 't': t, 'stateStart': start,\n 'stateEnd': end, 'imgFile': imgs[i], 'fire_id': fire_id,\n 'fBase': base}, ignore_index=True)\n\n return df\n\n\ndef get_wildfire_image():\n\n #download image\n url = 'https://media.springernature.com/w580h326/nature-cms/uploads/collections/' \\\n 'Wildfire-and-ecosystems-Hero-d62e7fbbf36ce6915d4e3efef069ee0e.jpg'\n response = requests.get(url)\n # save image\n file = open(\"test//0003.jpg\", \"wb\")\n file.write(response.content)\n file.close()\n\n\nclass WildFireDatasetTester(unittest.TestCase):\n\n def setUp(self):\n self.path_to_frames = Path(__file__).parent\n self.path_to_frames_str = str(self.path_to_frames)\n self.wildfire_path = Path(__file__).parent / 'wildfire_dataset.csv'\n self.wildfire_df = generate_wildfire_dataset_fixture()\n self.wildfire_df.to_csv(self.wildfire_path)\n get_wildfire_image()\n\n def test_wildfire_correctly_init_from_path(self):\n\n for path_to_frames in [self.path_to_frames, self.path_to_frames_str]:\n wildfire = WildFireDataset(\n metadata=self.wildfire_path,\n path_to_frames=path_to_frames\n )\n\n self.assertEqual(len(wildfire), 974)\n self.assertEqual(len(wildfire[3]), 2)\n\n def test_wildfire_correctly_init_from_dataframe(self):\n for path_to_frames in [self.path_to_frames, self.path_to_frames_str]:\n wildfire = WildFireDataset(\n metadata=self.wildfire_df,\n path_to_frames=path_to_frames\n )\n\n self.assertEqual(len(wildfire), 974)\n self.assertEqual(len(wildfire[3]), 2)\n\n # try to get one image of wildfire (item 3 is authorized image fixture)\n observation_3, metadata_3 = wildfire[3]\n self.assertIsInstance(observation_3, Image) # image correctly loaded ?\n self.assertEqual(observation_3.size, (580, 326))\n # metadata correctly loaded ?\n self.assertTrue(torch.equal(metadata_3, torch.tensor([self.wildfire_df.loc[3]['fire']])))\n\n def test_wildfire_correctly_init_with_multiple_targets(self):\n wildfire = WildFireDataset(\n metadata=self.wildfire_df,\n path_to_frames=self.path_to_frames,\n transform=transforms.ToTensor(),\n target_names=['fire', 'fire_id']\n )\n\n self.assertEqual(len(wildfire), 974)\n\n # try to get one image of wildfire (item 3 is authorized image fixture)\n observation_3, metadata_3 = wildfire[3]\n self.assertIsInstance(observation_3, torch.Tensor) # image correctly loaded ?\n self.assertEqual(observation_3.size(), torch.Size([3, 326, 580]))\n self.assertTrue(torch.equal(metadata_3, torch.tensor([self.wildfire_df.loc[3]['fire'],\n self.wildfire_df.loc[3]['fire_id']]))) # metadata correctly loaded ?\n\n def test_invalid_csv_path_raises_exception(self):\n with self.assertRaises(ValueError):\n WildFireDataset(\n metadata='bad_path.csv',\n path_to_frames=self.path_to_frames\n )\n\n def test_wildfire_correctly_init_with_transform(self):\n wildfire = WildFireDataset(\n metadata=self.wildfire_path,\n path_to_frames=self.path_to_frames,\n transform=transforms.Compose([transforms.Resize((100, 66)), transforms.ToTensor()])\n )\n\n observation_3, _ = wildfire[3]\n self.assertEqual(observation_3.size(), torch.Size((3, 100, 66)))\n\n def test_dataloader_can_be_init_with_wildfire(self):\n wildfire = WildFireDataset(metadata=self.wildfire_path, path_to_frames=self.path_to_frames)\n DataLoader(wildfire, batch_size=64)\n\n\nclass WildFireSubSamplerTester(unittest.TestCase):\n\n def setUp(self):\n self.path_to_frames = Path(__file__).parent\n self.wildfire_path = Path(__file__).parent / 'wildfire_dataset.csv'\n self.wildfire_df = generate_wildfire_subsampler_dataset_fixture()\n self.wildfire_df.to_csv(self.wildfire_path)\n\n def test_good_size_after_subsamping(self):\n self.assertEqual(len(self.wildfire_df), 100)\n metadataSS = computeSubSet(self.wildfire_df, 2)\n\n self.assertEqual(len(metadataSS), 20)\n\n def test_metadata_changes_each_time(self):\n metadataSS_1 = computeSubSet(self.wildfire_df, 2, seed=1)\n metadataSS_2 = computeSubSet(self.wildfire_df, 2, seed=2)\n\n self.assertEqual(len(metadataSS_1), 20)\n self.assertEqual(len(metadataSS_2), 20)\n self.assertFalse(metadataSS_1['imgFile'].values.tolist() == metadataSS_2['imgFile'].values.tolist())\n\n def test_metadata_does_not_changes_with_same_seed(self):\n metadataSS_1 = computeSubSet(self.wildfire_df, 2, seed=1)\n metadataSS_2 = computeSubSet(self.wildfire_df, 2, seed=1)\n\n self.assertEqual(len(metadataSS_1), 20)\n self.assertEqual(len(metadataSS_2), 20)\n self.assertTrue(metadataSS_1['imgFile'].values.tolist() == metadataSS_2['imgFile'].values.tolist())\n\n def test_increase_not_fire_semples(self):\n metadataSS = computeSubSet(self.wildfire_path, 2, 1)\n\n self.assertGreater(len(metadataSS), 20)\n\n def test_invalid_csv_path_raises_exception(self):\n with self.assertRaises(ValueError):\n computeSubSet(\n metadata='bad_path.csv',\n frame_per_seq=2\n )\n\n\nclass WildFireDatasetSplitter(unittest.TestCase):\n\n def setUp(self):\n self.path_to_frames = Path(__file__).parent\n\n self.wildfire_df = generate_wildfire_dataset_fixture()\n\n self.wildfire = WildFireDataset(metadata=self.wildfire_df, path_to_frames=self.path_to_frames)\n\n def test_consistent_ratios_good_init(self):\n ratios = {'train': 0.7, 'val': 0.15, 'test': 0.15}\n splitter = WildFireSplitter(ratios)\n self.assertEqual(ratios, splitter.ratios)\n\n def test_inconsistent_ratios_raise_exception(self):\n ratios = {'train': 0.9, 'val': 0.2, 'test': 0.1} # sum > 1\n with self.assertRaises(ValueError):\n WildFireSplitter(ratios)\n\n def test_splitting_with_test_to_zero(self):\n ratios = {'train': 0.8, 'val': 0.2, 'test': 0}\n\n splitter = WildFireSplitter(ratios, seed=42)\n splitter.fit(self.wildfire)\n\n for (set_, ratio_) in splitter.ratios_.items():\n self.assertAlmostEqual(ratio_, ratios[set_], places=1)\n\n def test_splitting_gives_good_splits_size(self):\n n_samples_expected = {'train': 688, 'val': 147, 'test': 139}\n ratios = {'train': 0.7, 'val': 0.15, 'test': 0.15}\n\n splitter = WildFireSplitter(ratios, seed=42)\n splitter.fit(self.wildfire)\n\n self.assertEqual(splitter.n_samples_, n_samples_expected)\n for (set_, ratio_) in splitter.ratios_.items():\n self.assertAlmostEqual(ratio_, ratios[set_], places=1)\n\n def test_splitting_working_with_transforms(self):\n ratios = {'train': 0.7, 'val': 0.15, 'test': 0.15}\n transforms_expected = {'train': transforms.RandomCrop(10), 'val': None, 'test': None}\n\n splitter = WildFireSplitter(ratios, transforms=transforms_expected)\n splitter.fit(self.wildfire)\n\n for (set_, transform_expected) in transforms_expected.items():\n self.assertIs(getattr(splitter, set_).transform, transform_expected)\n\n def test_splitting_with_unavailable_algorithm_raise_exception(self):\n ratios = {'train': 0.7, 'val': 0.15, 'test': 0.15}\n\n splitter = WildFireSplitter(ratios, algorithm='wtf')\n with self.assertRaises(ValueError):\n splitter.fit(self.wildfire)\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "torch.Size", "pandas.DataFrame", "torch.tensor", "torch.utils.data.DataLoader" ] ]
alesanmed/as-route
[ "fc7fcb65496188f7c7e12626e2169f5315e4e3d1" ]
[ "heuristic/Constructive.py" ]
[ "# -*- coding: utf-8 -*-\nimport numpy as np\nimport heuristic.utils as utils\nimport random\n\nfrom heuristic.Graph import TSP_Graph\nfrom heuristic.Solution import Solution\n\ndef random_solution(graph, customers_list):\n if not isinstance(graph, TSP_Graph):\n utils.raise_value_error(graph, TSP_Graph, type(graph))\n \n if not isinstance(customers_list, list):\n utils.raise_value_error(\"customers_list\", list, type(customers_list))\n \n customers = np.empty((len(customers_list),), \n dtype=[('id', 'i4'), ('ws', 'i8'), ('we', 'i8'), ('t', 'i8')])\n \n for i, customer in enumerate(customers_list):\n depot_pos = graph.get_customer_index(0) \n c_pos = customer.get_row()\n customers[i] = (customer.get_row(), \n customer.get_window_start(),\n customer.get_window_end(),\n graph.get_value(depot_pos, c_pos))\n \n # Almacen siempre el primero, su ventana empieza en 0 y el tiempo hasta si\n # mismo es 0\n \n customers = customers[np.argsort(customers, order=('we', 't'))]\n \n depot = np.where(customers['id'] == 0)[0][0]\n \n customers = np.concatenate(([customers[depot]],\n customers[0:depot],\n customers[depot+1:]))\n ###############\n ## Provisional, se quitara la inversion del orden, es para forzar una solucion\n ## inicial invalida\n ##\n ## customers = customers[np.argsort(customers, order=('ws', 't'))[::-1]]\n ## first = customers[0]\n ## last = customers[-1]\n ## customers = np.concatenate(([last], customers[1:-1], [first]))\n ###############\n \n solution = Solution(len(customers_list))\n \n solution.set_graph(graph)\n solution.set_solution(customers['id'])\n \n \n start_time = int(customers['ws'][1] - customers['t'][1])\n if start_time < 0:\n start_time = 0\n\n solution.set_start_time(start_time)\n \n curr_time = start_time\n for i, c_row in enumerate(solution.get_solution()):\n customer = next((c for c in customers_list if c.get_row() == c_row), None)\n time_visited = curr_time + customers['t'][i]\n\n if time_visited < customer.get_window_start():\n time_visited = customer.get_window_start()\n \n customer.set_time_visited(int(time_visited))\n curr_time = time_visited\n \n solution.set_customer_list(customers_list)\n \n solution.compute_validity()\n \n return solution\n \ndef perturbation(solution, level):\n solution_new = Solution(solution.get_solution().size, solution=solution) \n \n min_index = solution_new.get_solution().size * 10000\n for i in range(level):\n index_origin = random.randint(1, solution_new.get_solution().size - 1)\n index_new = random.randint(1, solution_new.get_solution().size - 1)\n \n curr_min = min(index_origin, index_new)\n \n if curr_min < min_index:\n min_index = curr_min\n \n solution_new.one_shift(index_origin, index_new)\n \n solution_new.recompute_validity(min_index)\n solution_new.compute_validity()\n \n return solution_new\n \ndef local1shift(solution):\n customers_validity = solution.get_valid_customers()\n \n valid_customers = np.where(customers_validity == 1)[0]\n violated_customers = np.where(customers_validity == 0)[0]\n \n better_solution = None\n min_index = customers_validity.size * 10000\n\n # Backward movement of violated customers \n for i in violated_customers:\n index_origin = i\n for j in range(i - 1, 0, -1):\n index_new = j\n \n if not solution.is_arc_valid(i, j):\n break\n \n solution_new = Solution(solution.get_solution().size, solution=solution) \n solution_new.one_shift(index_origin, index_new)\n min_ = min(index_origin, index_new)\n\n if min_ < min_index: \n min_index = min_\n \n if solution_new.get_constructive_obj() > solution.get_constructive_obj():\n better_solution = solution_new\n break\n \n if better_solution is not None:\n break\n \n if better_solution is None:\n # Forward movement of non-violated customers\n for i in valid_customers:\n # Depot can't be moved\n if solution.get_solution()[i] == 0:\n continue\n\n index_origin = i\n for j in range(i + 1, customers_validity.size):\n index_new = j\n \n if not solution.is_arc_valid(j, i):\n break\n \n solution_new = Solution(solution.get_solution().size, solution=solution) \n solution_new.one_shift(index_origin, index_new)\n min_ = min(index_origin, index_new)\n\n if min_ < min_index: \n min_index = min_\n \n if solution_new.get_constructive_obj() > solution.get_constructive_obj():\n better_solution = solution_new\n break\n \n if better_solution is not None:\n break\n \n if better_solution is None:\n # Backward movement of non-violated customers\n for i in valid_customers:\n # Depot can't be moved\n if solution.get_solution()[i] == 0:\n continue\n\n index_origin = i\n for j in range(i - 1, 0, -1):\n index_new = j\n \n if not solution.is_arc_valid(i, j):\n break\n \n solution_new = Solution(solution.get_solution().size, solution=solution) \n solution_new.one_shift(index_origin, index_new)\n min_ = min(index_origin, index_new)\n\n if min_ < min_index: \n min_index = min_\n \n if solution_new.get_constructive_obj() > solution.get_constructive_obj():\n better_solution = solution_new\n break\n \n if better_solution is not None:\n break\n \n if better_solution is None:\n # Forward movement of violated customers\n for i in violated_customers:\n index_origin = i\n \n for j in range(i + 1, customers_validity.size):\n index_new = j\n \n if not solution.is_arc_valid(j, i):\n break\n \n solution_new = Solution(solution.get_solution().size, solution=solution) \n solution_new.one_shift(index_origin, index_new)\n min_ = min(index_origin, index_new)\n\n if min_ < min_index: \n min_index = min_\n \n if solution_new.get_constructive_obj() > solution.get_constructive_obj():\n better_solution = solution_new\n break\n \n if better_solution is not None:\n break\n \n if better_solution is None:\n better_solution = solution\n\n better_solution.recompute_validity(min_index)\n better_solution.compute_validity()\n\n return better_solution" ]
[ [ "numpy.concatenate", "numpy.where", "numpy.argsort" ] ]
arthur801031/3d-multi-resolution-rcnn
[ "8e5454a72f8daa174bf3eabfa5964152f04ab287", "8e5454a72f8daa174bf3eabfa5964152f04ab287" ]
[ "mmdet/models/backbones/unet3d.py", "mmdet/core/bbox/bbox_target.py" ]
[ "# based on implementation: https://github.com/usuyama/pytorch-unet/blob/master/pytorch_unet.py\n\nfrom ..registry import BACKBONES\n\nimport torch\nimport torch.nn as nn\n\ndef double_conv(in_channels, out_channels):\n return nn.Sequential(\n nn.Conv3d(in_channels, out_channels, 3, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv3d(out_channels, out_channels, 3, padding=1),\n nn.ReLU(inplace=True)\n ) \n\[email protected]_module\nclass UNet3D(nn.Module):\n\n def __init__(self):\n super(UNet3D, self).__init__()\n \n self.dconv_down1 = double_conv(3, 16)\n self.dconv_down2 = double_conv(16, 32)\n self.dconv_down3 = double_conv(32, 64)\n self.dconv_down4 = double_conv(64, 128) \n\n self.maxpool = nn.MaxPool3d(2)\n # self.upsample = nn.functional.interpolate(scale_factor=2, mode='trilinear', align_corners=True) \n \n self.dconv_up3 = double_conv(64 + 128, 64)\n self.dconv_up2 = double_conv(32 + 64, 32)\n self.dconv_up1 = double_conv(32 + 16, 16)\n \n # self.conv_last = nn.Conv2d(64, n_class, 1)\n \n def init_weights(self, pretrained=None):\n pass\n\n def forward(self, x):\n conv1 = self.dconv_down1(x)\n x = self.maxpool(conv1)\n\n conv2 = self.dconv_down2(x)\n x = self.maxpool(conv2)\n \n conv3 = self.dconv_down3(x)\n x = self.maxpool(conv3) \n \n x = self.dconv_down4(x)\n \n x = nn.functional.interpolate(x, scale_factor=2, mode='trilinear', align_corners=True) \n x = torch.cat([x, conv3], dim=1)\n \n x = self.dconv_up3(x)\n x = nn.functional.interpolate(x, scale_factor=2, mode='trilinear', align_corners=True) \n x = torch.cat([x, conv2], dim=1) \n\n x = self.dconv_up2(x)\n x = nn.functional.interpolate(x, scale_factor=2, mode='trilinear', align_corners=True) \n x = torch.cat([x, conv1], dim=1) \n \n x = self.dconv_up1(x)\n \n return x", "import torch\n\nfrom .transforms import bbox2delta, bbox2delta3d\nfrom ..utils import multi_apply\n\n\ndef bbox_target(pos_bboxes_list,\n neg_bboxes_list,\n pos_gt_bboxes_list,\n pos_gt_labels_list,\n cfg,\n reg_classes=1,\n target_means=[.0, .0, .0, .0],\n target_stds=[1.0, 1.0, 1.0, 1.0],\n concat=True):\n labels, label_weights, bbox_targets, bbox_weights = multi_apply(\n bbox_target_single,\n pos_bboxes_list,\n neg_bboxes_list,\n pos_gt_bboxes_list,\n pos_gt_labels_list,\n cfg=cfg,\n reg_classes=reg_classes,\n target_means=target_means,\n target_stds=target_stds)\n\n if concat:\n labels = torch.cat(labels, 0)\n label_weights = torch.cat(label_weights, 0)\n bbox_targets = torch.cat(bbox_targets, 0)\n bbox_weights = torch.cat(bbox_weights, 0)\n return labels, label_weights, bbox_targets, bbox_weights\n\ndef bbox_target_3d(pos_bboxes_list,\n neg_bboxes_list,\n pos_gt_bboxes_list,\n pos_gt_labels_list,\n cfg,\n reg_classes=1,\n target_means=[.0, .0, .0, .0, .0, .0],\n target_stds=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n concat=True):\n labels, label_weights, bbox_targets, bbox_weights = multi_apply(\n bbox_target_single_3d,\n pos_bboxes_list,\n neg_bboxes_list,\n pos_gt_bboxes_list,\n pos_gt_labels_list,\n cfg=cfg,\n reg_classes=reg_classes,\n target_means=target_means,\n target_stds=target_stds)\n if concat:\n labels = torch.cat(labels, 0)\n label_weights = torch.cat(label_weights, 0)\n bbox_targets = torch.cat(bbox_targets, 0)\n bbox_weights = torch.cat(bbox_weights, 0)\n return labels, label_weights, bbox_targets, bbox_weights\n\ndef bbox_target_3d_parcel(pos_bboxes_list,\n neg_bboxes_list,\n pos_gt_bboxes_list,\n pos_gt_labels_list,\n pos_gt_bregions_list,\n cfg,\n reg_classes=1,\n target_means=[.0, .0, .0, .0, .0, .0],\n target_stds=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n concat=True):\n labels, label_weights, bbox_targets, bbox_weights, bregions, bregion_weights = multi_apply(\n bbox_target_single_3d_parcel,\n pos_bboxes_list,\n neg_bboxes_list,\n pos_gt_bboxes_list,\n pos_gt_labels_list,\n pos_gt_bregions_list,\n cfg=cfg,\n reg_classes=reg_classes,\n target_means=target_means,\n target_stds=target_stds)\n if concat:\n labels = torch.cat(labels, 0)\n label_weights = torch.cat(label_weights, 0)\n bbox_targets = torch.cat(bbox_targets, 0)\n bbox_weights = torch.cat(bbox_weights, 0)\n bregions = torch.cat(bregions, 0)\n bregion_weights = torch.cat(bregion_weights, 0)\n return labels, label_weights, bbox_targets, bbox_weights, bregions, bregion_weights\n\ndef bbox_target_single(pos_bboxes,\n neg_bboxes,\n pos_gt_bboxes,\n pos_gt_labels,\n cfg,\n reg_classes=1,\n target_means=[.0, .0, .0, .0],\n target_stds=[1.0, 1.0, 1.0, 1.0]):\n num_pos = pos_bboxes.size(0)\n num_neg = neg_bboxes.size(0)\n num_samples = num_pos + num_neg\n labels = pos_bboxes.new_zeros(num_samples, dtype=torch.long)\n label_weights = pos_bboxes.new_zeros(num_samples)\n bbox_targets = pos_bboxes.new_zeros(num_samples, 4)\n bbox_weights = pos_bboxes.new_zeros(num_samples, 4)\n if num_pos > 0:\n labels[:num_pos] = pos_gt_labels\n pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight\n label_weights[:num_pos] = pos_weight\n pos_bbox_targets = bbox2delta(pos_bboxes, pos_gt_bboxes, target_means,\n target_stds)\n bbox_targets[:num_pos, :] = pos_bbox_targets\n bbox_weights[:num_pos, :] = 1\n if num_neg > 0:\n label_weights[-num_neg:] = 1.0\n\n return labels, label_weights, bbox_targets, bbox_weights\n\ndef bbox_target_single_3d(pos_bboxes,\n neg_bboxes,\n pos_gt_bboxes,\n pos_gt_labels,\n cfg,\n reg_classes=1,\n target_means=[.0, .0, .0, .0, .0, .0],\n target_stds=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0]):\n num_pos = pos_bboxes.size(0)\n num_neg = neg_bboxes.size(0)\n num_samples = num_pos + num_neg\n labels = pos_bboxes.new_zeros(num_samples, dtype=torch.long)\n label_weights = pos_bboxes.new_zeros(num_samples)\n bbox_targets = pos_bboxes.new_zeros(num_samples, 6)\n bbox_weights = pos_bboxes.new_zeros(num_samples, 6)\n if num_pos > 0:\n labels[:num_pos] = pos_gt_labels\n pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight\n label_weights[:num_pos] = pos_weight\n pos_bbox_targets = bbox2delta3d(pos_bboxes, pos_gt_bboxes, target_means,\n target_stds)\n bbox_targets[:num_pos, :] = pos_bbox_targets\n bbox_weights[:num_pos, :] = 1\n if num_neg > 0:\n label_weights[-num_neg:] = 1.0\n\n # if torch.isnan(bbox_targets).any().item() == 1:\n # breakpoint()\n return labels, label_weights, bbox_targets, bbox_weights\n\ndef bbox_target_single_3d_parcel(pos_bboxes,\n neg_bboxes,\n pos_gt_bboxes,\n pos_gt_labels,\n pos_gt_bregions,\n cfg,\n reg_classes=1,\n target_means=[.0, .0, .0, .0, .0, .0],\n target_stds=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0]):\n num_pos = pos_bboxes.size(0)\n num_neg = neg_bboxes.size(0)\n num_samples = num_pos + num_neg\n labels = pos_bboxes.new_zeros(num_samples, dtype=torch.long)\n bregions = pos_bboxes.new_zeros(num_samples, dtype=torch.long)\n label_weights = pos_bboxes.new_zeros(num_samples)\n bregion_weights = pos_bboxes.new_zeros(num_samples)\n bbox_targets = pos_bboxes.new_zeros(num_samples, 6)\n bbox_weights = pos_bboxes.new_zeros(num_samples, 6)\n if num_pos > 0:\n labels[:num_pos] = pos_gt_labels\n bregions[:num_pos] = pos_gt_bregions\n pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight\n label_weights[:num_pos] = pos_weight\n bregion_weights[:num_pos] = pos_weight\n pos_bbox_targets = bbox2delta3d(pos_bboxes, pos_gt_bboxes, target_means,\n target_stds)\n bbox_targets[:num_pos, :] = pos_bbox_targets\n bbox_weights[:num_pos, :] = 1\n if num_neg > 0:\n label_weights[-num_neg:] = 1.0\n bregion_weights[-num_neg:] = 1.0\n\n # if torch.isnan(bbox_targets).any().item() == 1:\n # breakpoint()\n return labels, label_weights, bbox_targets, bbox_weights, bregions, bregion_weights\n\n\ndef expand_target(bbox_targets, bbox_weights, labels, num_classes):\n breakpoint()\n bbox_targets_expand = bbox_targets.new_zeros((bbox_targets.size(0),\n 4 * num_classes))\n bbox_weights_expand = bbox_weights.new_zeros((bbox_weights.size(0),\n 4 * num_classes))\n for i in torch.nonzero(labels > 0).squeeze(-1):\n start, end = labels[i] * 4, (labels[i] + 1) * 4\n bbox_targets_expand[i, start:end] = bbox_targets[i, :]\n bbox_weights_expand[i, start:end] = bbox_weights[i, :]\n return bbox_targets_expand, bbox_weights_expand\n" ]
[ [ "torch.cat", "torch.nn.MaxPool3d", "torch.nn.functional.interpolate", "torch.nn.ReLU", "torch.nn.Conv3d" ], [ "torch.nonzero", "torch.cat" ] ]
DefTruth/tensorpack
[ "df82c65a29883984a04a75885e0475df19ca4f19" ]
[ "examples/FasterRCNN/predict.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport argparse\nimport itertools\nimport numpy as np\nimport os\nimport shutil\nimport tensorflow as tf\nimport cv2\nimport six\nimport tqdm\n\nassert six.PY3, \"This example requires Python 3!\"\n\nimport tensorpack.utils.viz as tpviz\nfrom tensorpack.predict import MultiTowerOfflinePredictor, OfflinePredictor, PredictConfig\nfrom tensorpack.tfutils import get_model_loader, get_tf_version_tuple\nfrom tensorpack.utils import fs, logger\n\nfrom dataset import DatasetRegistry, register_coco\nfrom config import config as cfg\nfrom config import finalize_configs\nfrom data import get_eval_dataflow, get_train_dataflow\nfrom eval import DetectionResult, multithread_predict_dataflow, predict_image\nfrom modeling.generalized_rcnn import ResNetC4Model, ResNetFPNModel\nfrom viz import draw_annotation, draw_final_outputs, draw_predictions, draw_proposal_recall\n\n\ndef do_visualize(model, model_path, nr_visualize=100, output_dir='output'):\n \"\"\"\n Visualize some intermediate results (proposals, raw predictions) inside the pipeline.\n \"\"\"\n df = get_train_dataflow()\n df.reset_state()\n\n pred = OfflinePredictor(PredictConfig(\n model=model,\n session_init=get_model_loader(model_path),\n input_names=['image', 'gt_boxes', 'gt_labels'],\n output_names=[\n 'generate_{}_proposals/boxes'.format('fpn' if cfg.MODE_FPN else 'rpn'),\n 'generate_{}_proposals/scores'.format('fpn' if cfg.MODE_FPN else 'rpn'),\n 'fastrcnn_all_scores',\n 'output/boxes',\n 'output/scores',\n 'output/labels',\n ]))\n\n if os.path.isdir(output_dir):\n shutil.rmtree(output_dir)\n fs.mkdir_p(output_dir)\n with tqdm.tqdm(total=nr_visualize) as pbar:\n for idx, dp in itertools.islice(enumerate(df), nr_visualize):\n img, gt_boxes, gt_labels = dp['image'], dp['gt_boxes'], dp['gt_labels']\n\n rpn_boxes, rpn_scores, all_scores, \\\n final_boxes, final_scores, final_labels = pred(img, gt_boxes, gt_labels)\n\n # draw groundtruth boxes\n gt_viz = draw_annotation(img, gt_boxes, gt_labels)\n # draw best proposals for each groundtruth, to show recall\n proposal_viz, good_proposals_ind = draw_proposal_recall(img, rpn_boxes, rpn_scores, gt_boxes)\n # draw the scores for the above proposals\n score_viz = draw_predictions(img, rpn_boxes[good_proposals_ind], all_scores[good_proposals_ind])\n\n results = [DetectionResult(*args) for args in\n zip(final_boxes, final_scores, final_labels,\n [None] * len(final_labels))]\n final_viz = draw_final_outputs(img, results)\n\n viz = tpviz.stack_patches([\n gt_viz, proposal_viz,\n score_viz, final_viz], 2, 2)\n\n if os.environ.get('DISPLAY', None):\n tpviz.interactive_imshow(viz)\n cv2.imwrite(\"{}/{:03d}.png\".format(output_dir, idx), viz)\n pbar.update()\n\n\ndef do_evaluate(pred_config, output_file):\n num_tower = max(cfg.TRAIN.NUM_GPUS, 1)\n graph_funcs = MultiTowerOfflinePredictor(\n pred_config, list(range(num_tower))).get_predictors()\n\n for dataset in cfg.DATA.VAL:\n logger.info(\"Evaluating {} ...\".format(dataset))\n dataflows = [\n get_eval_dataflow(dataset, shard=k, num_shards=num_tower)\n for k in range(num_tower)]\n all_results = multithread_predict_dataflow(dataflows, graph_funcs)\n output = output_file + '-' + dataset\n DatasetRegistry.get(dataset).eval_inference_results(all_results, output)\n\n\ndef do_predict(pred_func, input_file):\n img = cv2.imread(input_file, cv2.IMREAD_COLOR)\n results = predict_image(img, pred_func)\n final = draw_final_outputs(img, results)\n viz = np.concatenate((img, final), axis=1)\n cv2.imwrite(\"output.png\", viz)\n logger.info(\"Inference output for {} written to output.png\".format(input_file))\n tpviz.interactive_imshow(viz)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--load', help='load a model for evaluation.', required=True)\n parser.add_argument('--visualize', action='store_true', help='visualize intermediate results')\n parser.add_argument('--evaluate', help=\"Run evaluation. \"\n \"This argument is the path to the output json evaluation file\")\n parser.add_argument('--predict', help=\"Run prediction on a given image. \"\n \"This argument is the path to the input image file\", nargs='+')\n parser.add_argument('--benchmark', action='store_true', help=\"Benchmark the speed of the model + postprocessing\")\n parser.add_argument('--config', help=\"A list of KEY=VALUE to overwrite those defined in config.py\",\n nargs='+')\n\n args = parser.parse_args()\n if args.config:\n cfg.update_args(args.config)\n register_coco(cfg.DATA.BASEDIR) # add COCO datasets to the registry\n MODEL = ResNetFPNModel() if cfg.MODE_FPN else ResNetC4Model()\n\n if not tf.test.is_gpu_available():\n from tensorflow.python.framework import test_util\n assert get_tf_version_tuple() >= (1, 7) and test_util.IsMklEnabled(), \\\n \"Inference requires either GPU support or MKL support!\"\n assert args.load\n finalize_configs(is_training=False)\n\n if args.predict or args.visualize:\n cfg.TEST.RESULT_SCORE_THRESH = cfg.TEST.RESULT_SCORE_THRESH_VIS\n\n if args.visualize:\n do_visualize(MODEL, args.load)\n else:\n predcfg = PredictConfig(\n model=MODEL,\n session_init=get_model_loader(args.load),\n input_names=MODEL.get_inference_tensor_names()[0],\n output_names=MODEL.get_inference_tensor_names()[1])\n if args.predict:\n predictor = OfflinePredictor(predcfg)\n for image_file in args.predict:\n do_predict(predictor, image_file)\n elif args.evaluate:\n assert args.evaluate.endswith('.json'), args.evaluate\n do_evaluate(predcfg, args.evaluate)\n elif args.benchmark:\n df = get_eval_dataflow(cfg.DATA.VAL[0])\n df.reset_state()\n predictor = OfflinePredictor(predcfg)\n for img in tqdm.tqdm(df, total=len(df)):\n # This include post-processing time, which is done on CPU and not optimized\n # To exclude it, modify `predict_image`.\n predict_image(img[0], predictor)\n" ]
[ [ "numpy.concatenate", "tensorflow.python.framework.test_util.IsMklEnabled", "tensorflow.test.is_gpu_available" ] ]
aitoehigie/britecore_flask
[ "eef1873dbe6b2cc21f770bc6dec783007ae4493b" ]
[ "venv/lib/python3.6/site-packages/pylint/test/functional/undefined_variable.py" ]
[ "# pylint: disable=missing-docstring, multiple-statements, useless-object-inheritance\n# pylint: disable=too-few-public-methods, no-init, no-self-use,bare-except,broad-except, import-error\nfrom __future__ import print_function\n\nDEFINED = 1\n\nif DEFINED != 1:\n if DEFINED in (unknown, DEFINED): # [undefined-variable]\n DEFINED += 1\n\n\ndef in_method(var):\n \"\"\"method doc\"\"\"\n var = nomoreknown # [undefined-variable]\n assert var\n\n\nDEFINED = {DEFINED: __revision__} # [undefined-variable]\n# +1:[undefined-variable]\nDEFINED[__revision__] = OTHER = \"move this is astroid test\"\n\nOTHER += \"$\"\n\n\ndef bad_default(var, default=unknown2): # [undefined-variable]\n \"\"\"function with defaut arg's value set to an unexistant name\"\"\"\n print(var, default)\n print(xxxx) # [undefined-variable]\n augvar += 1 # [undefined-variable]\n del vardel # [undefined-variable]\n\n\nLMBD = lambda x, y=doesnotexist: x + y # [undefined-variable]\nLMBD2 = lambda x, y: x + z # [undefined-variable]\n\ntry:\n POUET # don't catch me\nexcept NameError:\n POUET = \"something\"\n\ntry:\n POUETT # [used-before-assignment]\nexcept Exception: # pylint:disable = broad-except\n POUETT = \"something\"\n\ntry:\n POUETTT # don't catch me\nexcept: # pylint:disable = bare-except\n POUETTT = \"something\"\n\nprint(POUET, POUETT, POUETTT)\n\n\ntry:\n PLOUF # [used-before-assignment]\nexcept ValueError:\n PLOUF = \"something\"\n\nprint(PLOUF)\n\n\ndef if_branch_test(something):\n \"\"\"hop\"\"\"\n if something == 0:\n if xxx == 1: # [used-before-assignment]\n pass\n else:\n print(xxx)\n xxx = 3\n\n\ndef decorator(arg):\n \"\"\"Decorator with one argument.\"\"\"\n return lambda: list(arg)\n\n\n@decorator(arg=[i * 2 for i in range(15)])\ndef func1():\n \"\"\"A function with a decorator that contains a listcomp.\"\"\"\n\n\n@decorator(arg=(i * 2 for i in range(15)))\ndef func2():\n \"\"\"A function with a decorator that contains a genexpr.\"\"\"\n\n\n@decorator(lambda x: x > 0)\ndef main():\n \"\"\"A function with a decorator that contains a lambda.\"\"\"\n\n\n# Test shared scope.\n\n\ndef test_arguments(arg=TestClass): # [used-before-assignment]\n \"\"\" TestClass isn't defined yet. \"\"\"\n return arg\n\n\nclass TestClass(Ancestor): # [used-before-assignment]\n \"\"\" contains another class, which uses an undefined ancestor. \"\"\"\n\n class MissingAncestor(Ancestor1): # [used-before-assignment]\n \"\"\" no op \"\"\"\n\n def test1(self):\n \"\"\" It should trigger here, because the two classes\n have the same scope.\n \"\"\"\n\n class UsingBeforeDefinition(Empty): # [used-before-assignment]\n \"\"\" uses Empty before definition \"\"\"\n\n class Empty(object):\n \"\"\" no op \"\"\"\n\n return UsingBeforeDefinition\n\n def test(self):\n \"\"\" Ancestor isn't defined yet, but we don't care. \"\"\"\n\n class MissingAncestor1(Ancestor):\n \"\"\" no op \"\"\"\n\n return MissingAncestor1\n\n\nclass Self(object):\n \"\"\" Detect when using the same name inside the class scope. \"\"\"\n\n obj = Self # [undefined-variable]\n\n\nclass Self1(object):\n \"\"\" No error should be raised here. \"\"\"\n\n def test(self):\n \"\"\" empty \"\"\"\n return Self1\n\n\nclass Ancestor(object):\n \"\"\" No op \"\"\"\n\n\nclass Ancestor1(object):\n \"\"\" No op \"\"\"\n\n\nNANA = BAT # [undefined-variable]\ndel BAT\n\n\nclass KeywordArgument(object):\n \"\"\"Test keyword arguments.\"\"\"\n\n enable = True\n\n def test(self, is_enabled=enable):\n \"\"\"do nothing.\"\"\"\n\n def test1(self, is_enabled=enabled): # [used-before-assignment]\n \"\"\"enabled is undefined at this point, but it is used before assignment.\"\"\"\n\n def test2(self, is_disabled=disabled): # [undefined-variable]\n \"\"\"disabled is undefined\"\"\"\n\n enabled = True\n\n func = lambda arg=arg: arg * arg # [undefined-variable]\n\n arg2 = 0\n func2 = lambda arg2=arg2: arg2 * arg2\n\n\n# Don't emit if the code is protected by NameError\ntry:\n unicode_1\nexcept NameError:\n pass\n\ntry:\n unicode_2 # [undefined-variable]\nexcept Exception:\n pass\n\ntry:\n unicode_3\nexcept:\n pass\n\ntry:\n unicode_4 # [undefined-variable]\nexcept ValueError:\n pass\n\n# See https://bitbucket.org/logilab/pylint/issue/111/\ntry:\n raise IOError(1, \"a\")\nexcept IOError as err:\n print(err)\n\n\ndef test_conditional_comprehension():\n methods = [\"a\", \"b\", \"_c\", \"_d\"]\n my_methods = sum(1 for method in methods if not method.startswith(\"_\"))\n return my_methods\n\n\nclass MyError(object):\n pass\n\n\nclass MyClass(object):\n class MyError(MyError):\n pass\n\n\ndef dec(inp):\n def inner(func):\n print(inp)\n return func\n\n return inner\n\n\n# Make sure lambdas with expressions\n# referencing parent class do not raise undefined variable\n# because at the time of their calling, the class name will\n# be populated\n# See https://github.com/PyCQA/pylint/issues/704\nclass LambdaClass:\n myattr = 1\n mylambda = lambda: LambdaClass.myattr\n\n\n# Need different classes to make sure\n# consumed variables don't get in the way\nclass LambdaClass2:\n myattr = 1\n # Different base_scope scope but still applies\n mylambda2 = lambda: [LambdaClass2.myattr for _ in [1, 2]]\n\n\nclass LambdaClass3:\n myattr = 1\n # Nested default argument in lambda\n # Should not raise error\n mylambda3 = lambda: lambda a=LambdaClass3: a\n\n\nclass LambdaClass4:\n myattr = 1\n mylambda4 = lambda a=LambdaClass4: lambda: a # [undefined-variable]\n\n\n# Make sure the first lambda does not consume the LambdaClass5 class\n# name although the expression is is valid\n# Consuming the class would cause the subsequent undefined-variable to be masked\nclass LambdaClass5:\n myattr = 1\n mylambda = lambda: LambdaClass5.myattr\n mylambda4 = lambda a=LambdaClass5: lambda: a # [undefined-variable]\n\n\ndef nonlocal_in_ifexp():\n import matplotlib.pyplot as plt\n\n def onclick(event):\n if event:\n nonlocal i\n i += 1\n print(i)\n\n i = 0\n fig = plt.figure()\n fig.canvas.mpl_connect(\"button_press_event\", onclick)\n plt.show(block=True)\n" ]
[ [ "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
QuadCtrl/quad-ctrl
[ "ed1a6b7ee747a7ab045f9591b4747c6a2fe0a2f4" ]
[ "gym_pybullet_drones/envs/BaseAviary.py" ]
[ "import os\nfrom sys import platform\nimport time\nimport collections\nfrom datetime import datetime\nfrom enum import Enum\nimport xml.etree.ElementTree as etxml\nfrom PIL import Image\n# import pkgutil\n# egl = pkgutil.get_loader('eglRenderer')\nimport numpy as np\nimport pybullet as p\nimport pybullet_data\nimport gym\n\nclass DroneModel(Enum):\n \"\"\"Drone models enumeration class.\"\"\"\n\n CF2X = \"cf2x\" # Bitcraze Craziflie 2.0 in the X configuration\n CF2P = \"cf2p\" # Bitcraze Craziflie 2.0 in the + configuration\n HB = \"hb\" # Generic quadrotor (with AscTec Hummingbird inertial properties)\n\n################################################################################\n\nclass Physics(Enum):\n \"\"\"Physics implementations enumeration class.\"\"\"\n\n PYB = \"pyb\" # Base PyBullet physics update\n DYN = \"dyn\" # Update with an explicit model of the dynamics\n PYB_GND = \"pyb_gnd\" # PyBullet physics update with ground effect\n PYB_DRAG = \"pyb_drag\" # PyBullet physics update with drag\n PYB_DW = \"pyb_dw\" # PyBullet physics update with downwash\n PYB_GND_DRAG_DW = \"pyb_gnd_drag_dw\" # PyBullet physics update with ground effect, drag, and downwash\n\n################################################################################\n\nclass ImageType(Enum):\n \"\"\"Camera capture image type enumeration class.\"\"\"\n\n RGB = 0 # Red, green, blue (and alpha)\n DEP = 1 # Depth\n SEG = 2 # Segmentation by object id\n BW = 3 # Black and white\n\n################################################################################\n\nclass BaseAviary(gym.Env):\n \"\"\"Base class for \"drone aviary\" Gym environments.\"\"\"\n\n metadata = {'render.modes': ['human']}\n \n ################################################################################\n\n def __init__(self,\n drone_model: DroneModel=DroneModel.CF2X,\n num_drones: int=1,\n neighbourhood_radius: float=np.inf,\n initial_xyzs=None,\n initial_rpys=None,\n physics: Physics=Physics.PYB,\n freq: int=240,\n aggregate_phy_steps: int=1,\n gui=False,\n record=False,\n obstacles=False,\n user_debug_gui=True,\n vision_attributes=False,\n dynamics_attributes=False\n ):\n \"\"\"Initialization of a generic aviary environment.\n\n Parameters\n ----------\n drone_model : DroneModel, optional\n The desired drone type (detailed in an .urdf file in folder `assets`).\n num_drones : int, optional\n The desired number of drones in the aviary.\n neighbourhood_radius : float, optional\n Radius used to compute the drones' adjacency matrix, in meters.\n initial_xyzs: ndarray | None, optional\n (NUM_DRONES, 3)-shaped array containing the initial XYZ position of the drones.\n initial_rpys: ndarray | None, optional\n (NUM_DRONES, 3)-shaped array containing the initial orientations of the drones (in radians).\n physics : Physics, optional\n The desired implementation of PyBullet physics/custom dynamics.\n freq : int, optional\n The frequency (Hz) at which the physics engine steps.\n aggregate_phy_steps : int, optional\n The number of physics steps within one call to `BaseAviary.step()`.\n gui : bool, optional\n Whether to use PyBullet's GUI.\n record : bool, optional\n Whether to save a video of the simulation in folder `files/videos/`.\n obstacles : bool, optional\n Whether to add obstacles to the simulation.\n user_debug_gui : bool, optional\n Whether to draw the drones' axes and the GUI RPMs sliders.\n vision_attributes : bool, optional\n Whether to allocate the attributes needed by vision-based aviary subclasses.\n dynamics_attributes : bool, optional\n Whether to allocate the attributes needed by subclasses accepting thrust and torques inputs.\n\n \"\"\"\n #### Constants #############################################\n self.G = 9.8\n self.RAD2DEG = 180/np.pi\n self.DEG2RAD = np.pi/180\n self.SIM_FREQ = freq\n self.TIMESTEP = 1./self.SIM_FREQ\n self.AGGR_PHY_STEPS = aggregate_phy_steps\n #### Parameters ############################################\n self.NUM_DRONES = num_drones\n self.NEIGHBOURHOOD_RADIUS = neighbourhood_radius\n #### Options ###############################################\n self.DRONE_MODEL = drone_model\n self.GUI = gui\n self.RECORD = record\n self.PHYSICS = physics\n self.OBSTACLES = obstacles\n self.USER_DEBUG = user_debug_gui\n self.URDF = self.DRONE_MODEL.value + \".urdf\"\n #### Load the drone properties from the .urdf file #########\n self.M, \\\n self.L, \\\n self.THRUST2WEIGHT_RATIO, \\\n self.J, \\\n self.J_INV, \\\n self.KF, \\\n self.KM, \\\n self.COLLISION_H,\\\n self.COLLISION_R, \\\n self.COLLISION_Z_OFFSET, \\\n self.MAX_SPEED_KMH, \\\n self.GND_EFF_COEFF, \\\n self.PROP_RADIUS, \\\n self.DRAG_COEFF, \\\n self.DW_COEFF_1, \\\n self.DW_COEFF_2, \\\n self.DW_COEFF_3 = self._parseURDFParameters()\n print(\"[INFO] BaseAviary.__init__() loaded parameters from the drone's .urdf:\\n[INFO] m {:f}, L {:f},\\n[INFO] ixx {:f}, iyy {:f}, izz {:f},\\n[INFO] kf {:f}, km {:f},\\n[INFO] t2w {:f}, max_speed_kmh {:f},\\n[INFO] gnd_eff_coeff {:f}, prop_radius {:f},\\n[INFO] drag_xy_coeff {:f}, drag_z_coeff {:f},\\n[INFO] dw_coeff_1 {:f}, dw_coeff_2 {:f}, dw_coeff_3 {:f}\".format(\n self.M, self.L, self.J[0,0], self.J[1,1], self.J[2,2], self.KF, self.KM, self.THRUST2WEIGHT_RATIO, self.MAX_SPEED_KMH, self.GND_EFF_COEFF, self.PROP_RADIUS, self.DRAG_COEFF[0], self.DRAG_COEFF[2], self.DW_COEFF_1, self.DW_COEFF_2, self.DW_COEFF_3))\n #### Compute constants #####################################\n self.GRAVITY = self.G*self.M\n self.HOVER_RPM = np.sqrt(self.GRAVITY / (4*self.KF))\n self.MAX_RPM = np.sqrt((self.THRUST2WEIGHT_RATIO*self.GRAVITY) / (4*self.KF))\n self.MAX_THRUST = (4*self.KF*self.MAX_RPM**2)\n if self.DRONE_MODEL == DroneModel.CF2X:\n self.MAX_XY_TORQUE = (2*self.L*self.KF*self.MAX_RPM**2)/np.sqrt(2)\n elif self.DRONE_MODEL in [DroneModel.CF2P, DroneModel.HB]:\n self.MAX_XY_TORQUE = (self.L*self.KF*self.MAX_RPM**2)\n self.MAX_Z_TORQUE = (2*self.KM*self.MAX_RPM**2)\n self.GND_EFF_H_CLIP = 0.25 * self.PROP_RADIUS * np.sqrt((15 * self.MAX_RPM**2 * self.KF * self.GND_EFF_COEFF) / self.MAX_THRUST)\n #### Create attributes for vision tasks ####################\n self.VISION_ATTR = vision_attributes\n if self.VISION_ATTR:\n self.IMG_RES = np.array([64, 48])\n self.IMG_FRAME_PER_SEC = 24\n self.IMG_CAPTURE_FREQ = int(self.SIM_FREQ/self.IMG_FRAME_PER_SEC)\n self.rgb = np.zeros(((self.NUM_DRONES, self.IMG_RES[1], self.IMG_RES[0], 4)))\n self.dep = np.ones(((self.NUM_DRONES, self.IMG_RES[1], self.IMG_RES[0])))\n self.seg = np.zeros(((self.NUM_DRONES, self.IMG_RES[1], self.IMG_RES[0])))\n if self.IMG_CAPTURE_FREQ%self.AGGR_PHY_STEPS != 0:\n print(\"[ERROR] in BaseAviary.__init__(), aggregate_phy_steps incompatible with the desired video capture frame rate ({:f}Hz)\".format(self.IMG_FRAME_PER_SEC))\n exit()\n if self.RECORD:\n self.ONBOARD_IMG_PATH = os.path.dirname(os.path.abspath(__file__))+\"/../../files/videos/onboard-\"+datetime.now().strftime(\"%m.%d.%Y_%H.%M.%S\")+\"/\"\n os.makedirs(os.path.dirname(self.ONBOARD_IMG_PATH), exist_ok=True)\n #### Create attributes for dynamics control inputs #########\n self.DYNAMICS_ATTR = dynamics_attributes\n if self.DYNAMICS_ATTR:\n if self.DRONE_MODEL == DroneModel.CF2X:\n self.A = np.array([ [1, 1, 1, 1], [1/np.sqrt(2), 1/np.sqrt(2), -1/np.sqrt(2), -1/np.sqrt(2)], [-1/np.sqrt(2), 1/np.sqrt(2), 1/np.sqrt(2), -1/np.sqrt(2)], [-1, 1, -1, 1] ])\n elif self.DRONE_MODEL in [DroneModel.CF2P, DroneModel.HB]:\n self.A = np.array([ [1, 1, 1, 1], [0, 1, 0, -1], [-1, 0, 1, 0], [-1, 1, -1, 1] ])\n self.INV_A = np.linalg.inv(self.A)\n self.B_COEFF = np.array([1/self.KF, 1/(self.KF*self.L), 1/(self.KF*self.L), 1/self.KM])\n #### Connect to PyBullet ###################################\n if self.GUI:\n #### With debug GUI ########################################\n self.CLIENT = p.connect(p.GUI) # p.connect(p.GUI, options=\"--opengl2\")\n for i in [p.COV_ENABLE_RGB_BUFFER_PREVIEW, p.COV_ENABLE_DEPTH_BUFFER_PREVIEW, p.COV_ENABLE_SEGMENTATION_MARK_PREVIEW]:\n p.configureDebugVisualizer(i, 0, physicsClientId=self.CLIENT)\n p.resetDebugVisualizerCamera(cameraDistance=1.45,\n cameraYaw=-30,\n cameraPitch=-30,\n cameraTargetPosition=[0, 0, 0.5],\n physicsClientId=self.CLIENT\n )\n ret = p.getDebugVisualizerCamera(physicsClientId=self.CLIENT)\n print(\"viewMatrix\", ret[2])\n print(\"projectionMatrix\", ret[3])\n if self.USER_DEBUG:\n #### Add input sliders to the GUI ##########################\n self.SLIDERS = -1*np.ones(4)\n for i in range(4):\n self.SLIDERS[i] = p.addUserDebugParameter(\"Propeller \"+str(i)+\" RPM\", 0, self.MAX_RPM, self.HOVER_RPM, physicsClientId=self.CLIENT)\n self.INPUT_SWITCH = p.addUserDebugParameter(\"Use GUI RPM\", 9999, -1, 0, physicsClientId=self.CLIENT)\n else:\n #### Without debug GUI #####################################\n self.CLIENT = p.connect(p.DIRECT)\n #### Uncomment the following line to use EGL Render Plugin #\n #### Instead of TinyRender (CPU-based) in PYB's Direct mode\n # if platform == \"linux\": p.setAdditionalSearchPath(pybullet_data.getDataPath()); plugin = p.loadPlugin(egl.get_filename(), \"_eglRendererPlugin\"); print(\"plugin=\", plugin)\n if self.RECORD:\n #### Set the camera parameters to save frames in DIRECT mode\n self.VID_WIDTH=int(640)\n self.VID_HEIGHT=int(480)\n self.FRAME_PER_SEC = 24\n self.CAPTURE_FREQ = int(self.SIM_FREQ/self.FRAME_PER_SEC)\n self.CAM_VIEW = p.computeViewMatrixFromYawPitchRoll(distance=2.8,\n yaw=-30,\n pitch=-30,\n roll=0,\n cameraTargetPosition=[0, 0, 1],\n upAxisIndex=2,\n physicsClientId=self.CLIENT\n )\n self.CAM_PRO = p.computeProjectionMatrixFOV(fov=60.0,\n aspect=self.VID_WIDTH/self.VID_HEIGHT,\n nearVal=0.1,\n farVal=1000.0\n )\n #### Set initial poses #####################################\n if initial_xyzs is None:\n self.INIT_XYZS = np.vstack([np.array([x*4*self.L for x in range(self.NUM_DRONES)]), \\\n np.array([y*4*self.L for y in range(self.NUM_DRONES)]), \\\n np.ones(self.NUM_DRONES) * (self.COLLISION_H/2-self.COLLISION_Z_OFFSET+.1)]).transpose().reshape(self.NUM_DRONES, 3)\n elif np.array(initial_xyzs).shape == (self.NUM_DRONES, 3):\n self.INIT_XYZS = initial_xyzs\n else:\n print(\"[ERROR] invalid initial_xyzs in BaseAviary.__init__(), try initial_xyzs.reshape(NUM_DRONES,3)\")\n if initial_rpys is None:\n self.INIT_RPYS = np.zeros((self.NUM_DRONES, 3))\n elif np.array(initial_rpys).shape == (self.NUM_DRONES, 3):\n self.INIT_RPYS = initial_rpys\n else:\n print(\"[ERROR] invalid initial_rpys in BaseAviary.__init__(), try initial_rpys.reshape(NUM_DRONES,3)\")\n #### Create action and observation spaces ##################\n self.action_space = self._actionSpace()\n self.observation_space = self._observationSpace()\n #### Housekeeping ##########################################\n self._housekeeping()\n #### Update and store the drones kinematic information #####\n self._updateAndStoreKinematicInformation()\n #### Start video recording #################################\n self._startVideoRecording()\n \n ################################################################################\n\n def reset(self):\n \"\"\"Resets the environment.\n\n Returns\n -------\n ndarray | dict[..]\n The initial observation, check the specific implementation of `_computeObs()`\n in each subclass for its format.\n\n \"\"\"\n p.resetSimulation(physicsClientId=self.CLIENT)\n #### Housekeeping ##########################################\n self._housekeeping()\n #### Update and store the drones kinematic information #####\n self._updateAndStoreKinematicInformation()\n #### Start video recording #################################\n self._startVideoRecording()\n #### Return the initial observation ########################\n return self._computeObs()\n \n ################################################################################\n\n def step(self,\n action\n ):\n \"\"\"Advances the environment by one simulation step.\n\n Parameters\n ----------\n action : ndarray | dict[..]\n The input action for one or more drones, translated into RPMs by\n the specific implementation of `_preprocessAction()` in each subclass.\n\n Returns\n -------\n ndarray | dict[..]\n The step's observation, check the specific implementation of `_computeObs()`\n in each subclass for its format.\n float | dict[..]\n The step's reward value(s), check the specific implementation of `_computeReward()`\n in each subclass for its format.\n bool | dict[..]\n Whether the current epoisode is over, check the specific implementation of `_computeDone()`\n in each subclass for its format.\n dict[..]\n Additional information as a dictionary, check the specific implementation of `_computeInfo()`\n in each subclass for its format.\n\n \"\"\"\n #### Save PNG video frames if RECORD=True and GUI=False ####\n if self.RECORD and not self.GUI and self.step_counter%self.CAPTURE_FREQ == 0:\n [w, h, rgb, dep, seg] = p.getCameraImage(width=self.VID_WIDTH,\n height=self.VID_HEIGHT,\n shadow=1,\n viewMatrix=self.CAM_VIEW,\n projectionMatrix=self.CAM_PRO,\n renderer=p.ER_TINY_RENDERER,\n flags=p.ER_SEGMENTATION_MASK_OBJECT_AND_LINKINDEX,\n physicsClientId=self.CLIENT\n )\n (Image.fromarray(np.reshape(rgb, (h, w, 4)), 'RGBA')).save(self.IMG_PATH+\"frame_\"+str(self.FRAME_NUM)+\".png\")\n #### Save the depth or segmentation view instead #######\n # dep = ((dep-np.min(dep)) * 255 / (np.max(dep)-np.min(dep))).astype('uint8')\n # (Image.fromarray(np.reshape(dep, (h, w)))).save(self.IMG_PATH+\"frame_\"+str(self.FRAME_NUM)+\".png\")\n # seg = ((seg-np.min(seg)) * 255 / (np.max(seg)-np.min(seg))).astype('uint8')\n # (Image.fromarray(np.reshape(seg, (h, w)))).save(self.IMG_PATH+\"frame_\"+str(self.FRAME_NUM)+\".png\")\n self.FRAME_NUM += 1\n #### Read the GUI's input parameters #######################\n if self.GUI and self.USER_DEBUG:\n current_input_switch = p.readUserDebugParameter(self.INPUT_SWITCH, physicsClientId=self.CLIENT)\n if current_input_switch > self.last_input_switch:\n self.last_input_switch = current_input_switch\n self.USE_GUI_RPM = True if self.USE_GUI_RPM == False else False\n if self.USE_GUI_RPM:\n for i in range(4):\n self.gui_input[i] = p.readUserDebugParameter(int(self.SLIDERS[i]), physicsClientId=self.CLIENT)\n clipped_action = np.tile(self.gui_input, (self.NUM_DRONES, 1))\n if self.step_counter%(self.SIM_FREQ/2) == 0:\n self.GUI_INPUT_TEXT = [p.addUserDebugText(\"Using GUI RPM\",\n textPosition=[0, 0, 0],\n textColorRGB=[1, 0, 0],\n lifeTime=1,\n textSize=2,\n parentObjectUniqueId=self.DRONE_IDS[i],\n parentLinkIndex=-1,\n replaceItemUniqueId=int(self.GUI_INPUT_TEXT[i]),\n physicsClientId=self.CLIENT\n ) for i in range(self.NUM_DRONES)]\n #### Save, preprocess, and clip the action to the max. RPM #\n else:\n self._saveLastAction(action)\n clipped_action = np.reshape(self._preprocessAction(action), (self.NUM_DRONES, 4))\n #### Repeat for as many as the aggregate physics steps #####\n for _ in range(self.AGGR_PHY_STEPS):\n #### Update and store the drones kinematic info for certain\n #### Between aggregate steps for certain types of update ###\n if self.AGGR_PHY_STEPS > 1 and self.PHYSICS in [Physics.DYN, Physics.PYB_GND, Physics.PYB_DRAG, Physics.PYB_DW, Physics.PYB_GND_DRAG_DW]:\n self._updateAndStoreKinematicInformation()\n #### Step the simulation using the desired physics update ##\n for i in range (self.NUM_DRONES):\n if self.PHYSICS == Physics.PYB:\n self._physics(clipped_action[i, :], i)\n elif self.PHYSICS == Physics.DYN:\n self._dynamics(clipped_action[i, :], i)\n elif self.PHYSICS == Physics.PYB_GND:\n self._physics(clipped_action[i, :], i)\n self._groundEffect(clipped_action[i, :], i)\n elif self.PHYSICS == Physics.PYB_DRAG:\n self._physics(clipped_action[i, :], i)\n self._drag(self.last_clipped_action[i, :], i)\n elif self.PHYSICS == Physics.PYB_DW:\n self._physics(clipped_action[i, :], i)\n self._downwash(i)\n elif self.PHYSICS == Physics.PYB_GND_DRAG_DW:\n self._physics(clipped_action[i, :], i)\n self._groundEffect(clipped_action[i, :], i)\n self._drag(self.last_clipped_action[i, :], i)\n self._downwash(i)\n #### PyBullet computes the new state, unless Physics.DYN ###\n if self.PHYSICS != Physics.DYN:\n p.stepSimulation(physicsClientId=self.CLIENT)\n #### Save the last applied action (e.g. to compute drag) ###\n self.last_clipped_action = clipped_action\n #### Update and store the drones kinematic information #####\n self._updateAndStoreKinematicInformation()\n #### Prepare the return values #############################\n obs = self._computeObs()\n reward = self._computeReward()\n done = self._computeDone()\n info = self._computeInfo()\n #### Advance the step counter ##############################\n self.step_counter = self.step_counter + (1 * self.AGGR_PHY_STEPS)\n return obs, reward, done, info\n \n ################################################################################\n \n def render(self,\n mode='human',\n close=False\n ):\n \"\"\"Prints a textual output of the environment.\n\n Parameters\n ----------\n mode : str, optional\n Unused.\n close : bool, optional\n Unused.\n\n \"\"\"\n if self.first_render_call and not self.GUI:\n print(\"[WARNING] BaseAviary.render() is implemented as text-only, re-initialize the environment using Aviary(gui=True) to use PyBullet's graphical interface\")\n self.first_render_call = False\n print(\"\\n[INFO] BaseAviary.render() ——— it {:04d}\".format(self.step_counter),\n \"——— wall-clock time {:.1f}s,\".format(time.time()-self.RESET_TIME),\n \"simulation time {:.1f}s@{:d}Hz ({:.2f}x)\".format(self.step_counter*self.TIMESTEP, self.SIM_FREQ, (self.step_counter*self.TIMESTEP)/(time.time()-self.RESET_TIME)))\n for i in range (self.NUM_DRONES):\n print(\"[INFO] BaseAviary.render() ——— drone {:d}\".format(i),\n \"——— x {:+06.2f}, y {:+06.2f}, z {:+06.2f}\".format(self.pos[i, 0], self.pos[i, 1], self.pos[i, 2]),\n \"——— velocity {:+06.2f}, {:+06.2f}, {:+06.2f}\".format(self.vel[i, 0], self.vel[i, 1], self.vel[i, 2]),\n \"——— roll {:+06.2f}, pitch {:+06.2f}, yaw {:+06.2f}\".format(self.rpy[i, 0]*self.RAD2DEG, self.rpy[i, 1]*self.RAD2DEG, self.rpy[i, 2]*self.RAD2DEG),\n \"——— angular velocity {:+06.4f}, {:+06.4f}, {:+06.4f} ——— \".format(self.ang_v[i, 0], self.ang_v[i, 1], self.ang_v[i, 2]))\n \n ################################################################################\n\n def close(self):\n \"\"\"Terminates the environment.\n \"\"\"\n if self.RECORD and self.GUI:\n p.stopStateLogging(self.VIDEO_ID, physicsClientId=self.CLIENT)\n p.disconnect(physicsClientId=self.CLIENT)\n \n ################################################################################\n\n def getPyBulletClient(self):\n \"\"\"Returns the PyBullet Client Id.\n\n Returns\n -------\n int:\n The PyBullet Client Id.\n\n \"\"\"\n return self.CLIENT\n \n ################################################################################\n\n def getDroneIds(self):\n \"\"\"Return the Drone Ids.\n\n Returns\n -------\n ndarray:\n (NUM_DRONES,)-shaped array of ints containing the drones' ids.\n\n \"\"\"\n return self.DRONE_IDS\n \n ################################################################################\n\n def _housekeeping(self):\n \"\"\"Housekeeping function.\n\n Allocation and zero-ing of the variables and PyBullet's parameters/objects\n in the `reset()` function.\n\n \"\"\"\n #### Initialize/reset counters and zero-valued variables ###\n self.RESET_TIME = time.time()\n self.step_counter = 0\n self.first_render_call = True\n self.X_AX = -1*np.ones(self.NUM_DRONES)\n self.Y_AX = -1*np.ones(self.NUM_DRONES)\n self.Z_AX = -1*np.ones(self.NUM_DRONES)\n self.GUI_INPUT_TEXT = -1*np.ones(self.NUM_DRONES)\n self.USE_GUI_RPM=False\n self.last_input_switch = 0\n self.last_action = -1*np.ones((self.NUM_DRONES, 4))\n self.last_clipped_action = np.zeros((self.NUM_DRONES, 4))\n self.gui_input = np.zeros(4)\n #### Initialize the drones kinemaatic information ##########\n self.pos = np.zeros((self.NUM_DRONES, 3))\n self.quat = np.zeros((self.NUM_DRONES, 4))\n self.rpy = np.zeros((self.NUM_DRONES, 3))\n self.vel = np.zeros((self.NUM_DRONES, 3))\n self.ang_v = np.zeros((self.NUM_DRONES, 3))\n if self.PHYSICS == Physics.DYN:\n self.rpy_rates = np.zeros((self.NUM_DRONES, 3))\n #### Set PyBullet's parameters #############################\n p.setGravity(0, 0, -self.G, physicsClientId=self.CLIENT)\n p.setRealTimeSimulation(0, physicsClientId=self.CLIENT)\n p.setTimeStep(self.TIMESTEP, physicsClientId=self.CLIENT)\n p.setAdditionalSearchPath(pybullet_data.getDataPath(), physicsClientId=self.CLIENT)\n #### Load ground plane, drone and obstacles models #########\n self.PLANE_ID = p.loadURDF(\"plane.urdf\", physicsClientId=self.CLIENT)\n self.DRONE_IDS = np.array([p.loadURDF(os.path.dirname(os.path.abspath(__file__))+\"/../assets/\"+self.URDF,\n self.INIT_XYZS[i,:],\n p.getQuaternionFromEuler(self.INIT_RPYS[i,:]),\n flags = p.URDF_USE_INERTIA_FROM_FILE,\n physicsClientId=self.CLIENT\n ) for i in range(self.NUM_DRONES)])\n for i in range(self.NUM_DRONES):\n #### Show the frame of reference of the drone, note that ###\n #### It severly slows down the GUI #########################\n if self.GUI and self.USER_DEBUG:\n self._showDroneLocalAxes(i)\n #### Disable collisions between drones' and the ground plane\n #### E.g., to start a drone at [0,0,0] #####################\n # p.setCollisionFilterPair(bodyUniqueIdA=self.PLANE_ID, bodyUniqueIdB=self.DRONE_IDS[i], linkIndexA=-1, linkIndexB=-1, enableCollision=0, physicsClientId=self.CLIENT)\n if self.OBSTACLES:\n self._addObstacles()\n \n ################################################################################\n\n def _updateAndStoreKinematicInformation(self):\n \"\"\"Updates and stores the drones kinemaatic information.\n\n This method is meant to limit the number of calls to PyBullet in each step\n and improve performance (at the expense of memory).\n\n \"\"\"\n for i in range (self.NUM_DRONES):\n self.pos[i], self.quat[i] = p.getBasePositionAndOrientation(self.DRONE_IDS[i], physicsClientId=self.CLIENT)\n self.rpy[i] = p.getEulerFromQuaternion(self.quat[i])\n self.vel[i], self.ang_v[i] = p.getBaseVelocity(self.DRONE_IDS[i], physicsClientId=self.CLIENT)\n \n ################################################################################\n\n def _startVideoRecording(self):\n \"\"\"Starts the recording of a video output.\n\n The format of the video output is .mp4, if GUI is True, or .png, otherwise.\n The video is saved under folder `files/videos`.\n\n \"\"\"\n if self.RECORD and self.GUI:\n self.VIDEO_ID = p.startStateLogging(loggingType=p.STATE_LOGGING_VIDEO_MP4,\n fileName=os.path.dirname(os.path.abspath(__file__))+\"/../../files/videos/video-\"+datetime.now().strftime(\"%m.%d.%Y_%H.%M.%S\")+\".mp4\",\n physicsClientId=self.CLIENT\n )\n if self.RECORD and not self.GUI:\n self.FRAME_NUM = 0\n self.IMG_PATH = os.path.dirname(os.path.abspath(__file__))+\"/../../files/videos/video-\"+datetime.now().strftime(\"%m.%d.%Y_%H.%M.%S\")+\"/\"\n os.makedirs(os.path.dirname(self.IMG_PATH), exist_ok=True)\n \n ################################################################################\n\n def _getDroneStateVector(self,\n nth_drone\n ):\n \"\"\"Returns the state vector of the n-th drone.\n\n Parameters\n ----------\n nth_drone : int\n The ordinal number/position of the desired drone in list self.DRONE_IDS.\n\n Returns\n -------\n ndarray \n (20,)-shaped array of floats containing the state vector of the n-th drone.\n Check the only line in this method and `_updateAndStoreKinematicInformation()`\n to understand its format.\n\n \"\"\"\n state = np.hstack([self.pos[nth_drone, :], self.quat[nth_drone, :], self.rpy[nth_drone, :],\n self.vel[nth_drone, :], self.ang_v[nth_drone, :], self.last_clipped_action[nth_drone, :]])\n return state.reshape(20,)\n\n ################################################################################\n\n def _getDroneImages(self,\n nth_drone,\n segmentation: bool=True\n ):\n \"\"\"Returns camera captures from the n-th drone POV.\n\n Parameters\n ----------\n nth_drone : int\n The ordinal number/position of the desired drone in list self.DRONE_IDS.\n segmentation : bool, optional\n Whehter to compute the compute the segmentation mask.\n It affects performance.\n\n Returns\n -------\n ndarray \n (h, w, 4)-shaped array of uint8's containing the RBG(A) image captured from the n-th drone's POV.\n ndarray\n (h, w)-shaped array of uint8's containing the depth image captured from the n-th drone's POV.\n ndarray\n (h, w)-shaped array of uint8's containing the segmentation image captured from the n-th drone's POV.\n\n \"\"\"\n if self.IMG_RES is None:\n print(\"[ERROR] in BaseAviary._getDroneImages(), remember to set self.IMG_RES to np.array([width, height])\")\n exit()\n rot_mat = np.array(p.getMatrixFromQuaternion(self.quat[nth_drone, :])).reshape(3, 3)\n #### Set target point, camera view and projection matrices #\n target = np.dot(rot_mat,np.array([1000, 0, 0])) + np.array(self.pos[nth_drone, :])\n DRONE_CAM_VIEW = p.computeViewMatrix(cameraEyePosition=self.pos[nth_drone, :]+np.array([0, 0, self.L]),\n cameraTargetPosition=target,\n cameraUpVector=[0, 0, 1],\n physicsClientId=self.CLIENT\n )\n DRONE_CAM_PRO = p.computeProjectionMatrixFOV(fov=60.0,\n aspect=1.0,\n nearVal=self.L,\n farVal=1000.0\n )\n SEG_FLAG = p.ER_SEGMENTATION_MASK_OBJECT_AND_LINKINDEX if segmentation else p.ER_NO_SEGMENTATION_MASK\n [w, h, rgb, dep, seg] = p.getCameraImage(width=self.IMG_RES[0],\n height=self.IMG_RES[1],\n shadow=1,\n viewMatrix=DRONE_CAM_VIEW,\n projectionMatrix=DRONE_CAM_PRO,\n flags=SEG_FLAG,\n physicsClientId=self.CLIENT\n )\n rgb = np.reshape(rgb, (h, w, 4))\n dep = np.reshape(dep, (h, w))\n seg = np.reshape(seg, (h, w))\n return rgb, dep, seg\n\n ################################################################################\n\n def _exportImage(self,\n img_type: ImageType,\n img_input,\n path: str,\n frame_num: int=0\n ):\n \"\"\"Returns camera captures from the n-th drone POV.\n\n Parameters\n ----------\n img_type : ImageType\n The image type: RGB(A), depth, segmentation, or B&W (from RGB).\n img_input : ndarray\n (h, w, 4)-shaped array of uint8's for RBG(A) or B&W images.\n (h, w)-shaped array of uint8's for depth or segmentation images.\n path : str\n Path where to save the output as PNG.\n fram_num: int, optional\n Frame number to append to the PNG's filename.\n\n \"\"\"\n if img_type == ImageType.RGB:\n (Image.fromarray(img_input.astype('uint8'), 'RGBA')).save(path+\"frame_\"+str(frame_num)+\".png\")\n elif img_type == ImageType.DEP:\n temp = ((img_input-np.min(img_input)) * 255 / (np.max(img_input)-np.min(img_input))).astype('uint8')\n elif img_type == ImageType.SEG:\n temp = ((img_input-np.min(img_input)) * 255 / (np.max(img_input)-np.min(img_input))).astype('uint8')\n elif img_type == ImageType.BW:\n temp = (np.sum(img_input[:, :, 0:2], axis=2) / 3).astype('uint8')\n else:\n print(\"[ERROR] in BaseAviary._exportImage(), unknown ImageType\")\n exit()\n if img_type != ImageType.RGB:\n (Image.fromarray(temp)).save(path+\"frame_\"+str(frame_num)+\".png\")\n\n ################################################################################\n\n def _getAdjacencyMatrix(self):\n \"\"\"Computes the adjacency matrix of a multi-drone system.\n\n Attribute NEIGHBOURHOOD_RADIUS is used to determine neighboring relationships.\n\n Returns\n -------\n ndarray\n (NUM_DRONES, NUM_DRONES)-shaped array of 0's and 1's representing the adjacency matrix \n of the system: adj_mat[i,j] == 1 if (i, j) are neighbors; == 0 otherwise.\n\n \"\"\"\n adjacency_mat = np.identity(self.NUM_DRONES)\n for i in range(self.NUM_DRONES-1):\n for j in range(self.NUM_DRONES-i-1):\n if np.linalg.norm(self.pos[i, :]-self.pos[j+i+1, :]) < self.NEIGHBOURHOOD_RADIUS:\n adjacency_mat[i, j+i+1] = adjacency_mat[j+i+1, i] = 1\n return adjacency_mat\n \n ################################################################################\n \n def _physics(self,\n rpm,\n nth_drone\n ):\n \"\"\"Base PyBullet physics implementation.\n\n Parameters\n ----------\n rpm : ndarray\n (4)-shaped array of ints containing the RPMs values of the 4 motors.\n nth_drone : int\n The ordinal number/position of the desired drone in list self.DRONE_IDS.\n\n \"\"\"\n forces = np.array(rpm**2)*self.KF\n torques = np.array(rpm**2)*self.KM\n z_torque = (-torques[0] + torques[1] - torques[2] + torques[3])\n for i in range(4):\n p.applyExternalForce(self.DRONE_IDS[nth_drone],\n i,\n forceObj=[0, 0, forces[i]],\n posObj=[0, 0, 0],\n flags=p.LINK_FRAME,\n physicsClientId=self.CLIENT\n )\n p.applyExternalTorque(self.DRONE_IDS[nth_drone],\n 4,\n torqueObj=[0, 0, z_torque],\n flags=p.LINK_FRAME,\n physicsClientId=self.CLIENT\n )\n\n ################################################################################\n\n def _groundEffect(self,\n rpm,\n nth_drone\n ):\n \"\"\"PyBullet implementation of a ground effect model.\n\n Inspired by the analytical model used for comparison in (Shi et al., 2019).\n\n Parameters\n ----------\n rpm : ndarray\n (4)-shaped array of ints containing the RPMs values of the 4 motors.\n nth_drone : int\n The ordinal number/position of the desired drone in list self.DRONE_IDS.\n\n \"\"\"\n #### Kin. info of all links (propellers and center of mass)\n link_states = np.array(p.getLinkStates(self.DRONE_IDS[nth_drone],\n linkIndices=[0, 1, 2, 3, 4],\n computeLinkVelocity=1,\n computeForwardKinematics=1,\n physicsClientId=self.CLIENT\n ))\n #### Simple, per-propeller ground effects ##################\n prop_heights = np.array([link_states[0, 0][2], link_states[1, 0][2], link_states[2, 0][2], link_states[3, 0][2]])\n prop_heights = np.clip(prop_heights, self.GND_EFF_H_CLIP, np.inf)\n gnd_effects = np.array(rpm**2) * self.KF * self.GND_EFF_COEFF * (self.PROP_RADIUS/(4 * prop_heights))**2\n if np.abs(self.rpy[nth_drone,0]) < np.pi/2 and np.abs(self.rpy[nth_drone,1]) < np.pi/2:\n for i in range(4):\n p.applyExternalForce(self.DRONE_IDS[nth_drone],\n i,\n forceObj=[0, 0, gnd_effects[i]],\n posObj=[0, 0, 0],\n flags=p.LINK_FRAME,\n physicsClientId=self.CLIENT\n )\n\n ################################################################################\n\n def _drag(self,\n rpm,\n nth_drone\n ):\n \"\"\"PyBullet implementation of a drag model.\n\n Based on the the system identification in (Forster, 2015).\n\n Parameters\n ----------\n rpm : ndarray\n (4)-shaped array of ints containing the RPMs values of the 4 motors.\n nth_drone : int\n The ordinal number/position of the desired drone in list self.DRONE_IDS.\n\n \"\"\"\n #### Rotation matrix of the base ###########################\n base_rot = np.array(p.getMatrixFromQuaternion(self.quat[nth_drone, :])).reshape(3, 3)\n #### Simple draft model applied to the base/center of mass #\n drag_factors = -1 * self.DRAG_COEFF * np.sum(np.array(2*np.pi*rpm/60))\n drag = np.dot(base_rot, drag_factors*np.array(self.vel[nth_drone, :]))\n p.applyExternalForce(self.DRONE_IDS[nth_drone],\n 4,\n forceObj=drag,\n posObj=[0, 0, 0],\n flags=p.LINK_FRAME,\n physicsClientId=self.CLIENT\n )\n \n ################################################################################\n\n def _downwash(self,\n nth_drone\n ):\n \"\"\"PyBullet implementation of a ground effect model.\n\n Based on experiments conducted at the Dynamic Systems Lab by SiQi Zhou.\n\n Parameters\n ----------\n nth_drone : int\n The ordinal number/position of the desired drone in list self.DRONE_IDS.\n\n \"\"\"\n for i in range(self.NUM_DRONES):\n delta_z = self.pos[i, 2] - self.pos[nth_drone, 2]\n delta_xy = np.linalg.norm(np.array(self.pos[i, 0:2]) - np.array(self.pos[nth_drone, 0:2]))\n if delta_z > 0 and delta_xy < 10: # Ignore drones more than 10 meters away\n alpha = self.DW_COEFF_1 * (self.PROP_RADIUS/(4*delta_z))**2\n beta = self.DW_COEFF_2 * delta_z + self.DW_COEFF_3\n downwash = [0, 0, -alpha * np.exp(-.5*(delta_xy/beta)**2)]\n p.applyExternalForce(self.DRONE_IDS[nth_drone],\n 4,\n forceObj=downwash,\n posObj=[0, 0, 0],\n flags=p.LINK_FRAME,\n physicsClientId=self.CLIENT\n )\n\n ################################################################################\n\n def _dynamics(self,\n rpm,\n nth_drone\n ):\n \"\"\"Explicit dynamics implementation.\n\n Based on code written at the Dynamic Systems Lab by James Xu.\n\n Parameters\n ----------\n rpm : ndarray\n (4)-shaped array of ints containing the RPMs values of the 4 motors.\n nth_drone : int\n The ordinal number/position of the desired drone in list self.DRONE_IDS.\n\n \"\"\"\n #### Current state #########################################\n pos = self.pos[nth_drone,:]\n quat = self.quat[nth_drone,:]\n rpy = self.rpy[nth_drone,:]\n vel = self.vel[nth_drone,:]\n rpy_rates = self.rpy_rates[nth_drone,:]\n rotation = np.array(p.getMatrixFromQuaternion(quat)).reshape(3, 3)\n #### Compute forces and torques ############################\n forces = np.array(rpm**2) * self.KF\n thrust = np.array([0, 0, np.sum(forces)])\n thrust_world_frame = np.dot(rotation, thrust)\n force_world_frame = thrust_world_frame - np.array([0, 0, self.GRAVITY])\n z_torques = np.array(rpm**2)*self.KM\n z_torque = (-z_torques[0] + z_torques[1] - z_torques[2] + z_torques[3])\n if self.DRONE_MODEL==DroneModel.CF2X:\n x_torque = (forces[0] + forces[1] - forces[2] - forces[3]) * (self.L/np.sqrt(2))\n y_torque = (- forces[0] + forces[1] + forces[2] - forces[3]) * (self.L/np.sqrt(2))\n elif self.DRONE_MODEL==DroneModel.CF2P or self.DRONE_MODEL==DroneModel.HB:\n x_torque = (forces[1] - forces[3]) * self.L\n y_torque = (-forces[0] + forces[2]) * self.L\n torques = np.array([x_torque, y_torque, z_torque])\n torques = torques - np.cross(rpy_rates, np.dot(self.J, rpy_rates))\n rpy_rates_deriv = np.dot(self.J_INV, torques)\n no_pybullet_dyn_accs = force_world_frame / self.M\n #### Update state ##########################################\n vel = vel + self.TIMESTEP * no_pybullet_dyn_accs\n rpy_rates = rpy_rates + self.TIMESTEP * rpy_rates_deriv\n pos = pos + self.TIMESTEP * vel\n rpy = rpy + self.TIMESTEP * rpy_rates\n #### Set PyBullet's state ##################################\n p.resetBasePositionAndOrientation(self.DRONE_IDS[nth_drone],\n pos,\n p.getQuaternionFromEuler(rpy),\n physicsClientId=self.CLIENT\n )\n #### Note: the base's velocity only stored and not used ####\n p.resetBaseVelocity(self.DRONE_IDS[nth_drone],\n vel,\n [-1, -1, -1], # ang_vel not computed by DYN\n physicsClientId=self.CLIENT\n )\n #### Store the roll, pitch, yaw rates for the next step ####\n self.rpy_rates[nth_drone,:] = rpy_rates\n \n ################################################################################\n\n def _normalizedActionToRPM(self,\n action\n ):\n \"\"\"De-normalizes the [-1, 1] range to the [0, MAX_RPM] range.\n\n Parameters\n ----------\n action : ndarray\n (4)-shaped array of ints containing an input in the [-1, 1] range.\n\n Returns\n -------\n ndarray\n (4)-shaped array of ints containing RPMs for the 4 motors in the [0, MAX_RPM] range.\n\n \"\"\"\n if np.any(np.abs(action)) > 1:\n print(\"\\n[ERROR] it\", self.step_counter, \"in BaseAviary._normalizedActionToRPM(), out-of-bound action\")\n return np.where(action <= 0, (action+1)*self.HOVER_RPM, action*self.MAX_RPM) # Non-linear mapping: -1 -> 0, 0 -> HOVER_RPM, 1 -> MAX_RPM\n \n ################################################################################\n\n def _saveLastAction(self,\n action\n ):\n \"\"\"Stores the most recent action into attribute `self.last_action`.\n\n The last action can be used to compute aerodynamic effects.\n The method disambiguates between array and dict inputs \n (for single or multi-agent aviaries, respectively).\n\n Parameters\n ----------\n action : ndarray | dict\n (4)-shaped array of ints (or dictionary of arrays) containing the current RPMs input.\n\n \"\"\"\n if isinstance(action, collections.abc.Mapping):\n for k, v in action.items(): \n res_v = np.resize(v, (1, 4)) # Resize, possibly with repetition, to cope with different action spaces in RL subclasses\n self.last_action[int(k), :] = res_v\n else: \n res_action = np.resize(action, (1, 4)) # Resize, possibly with repetition, to cope with different action spaces in RL subclasses\n self.last_action = np.reshape(res_action, (self.NUM_DRONES, 4))\n \n ################################################################################\n\n def _showDroneLocalAxes(self,\n nth_drone\n ):\n \"\"\"Draws the local frame of the n-th drone in PyBullet's GUI.\n\n Parameters\n ----------\n nth_drone : int\n The ordinal number/position of the desired drone in list self.DRONE_IDS.\n\n \"\"\"\n if self.GUI:\n AXIS_LENGTH = 2*self.L\n self.X_AX[nth_drone] = p.addUserDebugLine(lineFromXYZ=[0, 0, 0],\n lineToXYZ=[AXIS_LENGTH, 0, 0],\n lineColorRGB=[1, 0, 0],\n parentObjectUniqueId=self.DRONE_IDS[nth_drone],\n parentLinkIndex=-1,\n replaceItemUniqueId=int(self.X_AX[nth_drone]),\n physicsClientId=self.CLIENT\n )\n self.Y_AX[nth_drone] = p.addUserDebugLine(lineFromXYZ=[0, 0, 0],\n lineToXYZ=[0, AXIS_LENGTH, 0],\n lineColorRGB=[0, 1, 0],\n parentObjectUniqueId=self.DRONE_IDS[nth_drone],\n parentLinkIndex=-1,\n replaceItemUniqueId=int(self.Y_AX[nth_drone]),\n physicsClientId=self.CLIENT\n )\n self.Z_AX[nth_drone] = p.addUserDebugLine(lineFromXYZ=[0, 0, 0],\n lineToXYZ=[0, 0, AXIS_LENGTH],\n lineColorRGB=[0, 0, 1],\n parentObjectUniqueId=self.DRONE_IDS[nth_drone],\n parentLinkIndex=-1,\n replaceItemUniqueId=int(self.Z_AX[nth_drone]),\n physicsClientId=self.CLIENT\n )\n \n ################################################################################\n\n def _addObstacles(self):\n \"\"\"Add obstacles to the environment.\n\n These obstacles are loaded from standard URDF files included in Bullet.\n\n \"\"\"\n p.loadURDF(\"samurai.urdf\",\n physicsClientId=self.CLIENT\n )\n p.loadURDF(\"duck_vhacd.urdf\",\n [-.5, -.5, .05],\n p.getQuaternionFromEuler([0, 0, 0]),\n physicsClientId=self.CLIENT\n )\n p.loadURDF(\"cube_no_rotation.urdf\",\n [-.5, -2.5, .5],\n p.getQuaternionFromEuler([0, 0, 0]),\n physicsClientId=self.CLIENT\n )\n p.loadURDF(\"sphere2.urdf\",\n [0, 2, .5],\n p.getQuaternionFromEuler([0,0,0]),\n physicsClientId=self.CLIENT\n )\n \n ################################################################################\n \n def _parseURDFParameters(self):\n \"\"\"Loads parameters from an URDF file.\n\n This method is nothing more than a custom XML parser for the .urdf\n files in folder `assets/`.\n\n \"\"\"\n URDF_TREE = etxml.parse(os.path.dirname(os.path.abspath(__file__))+\"/../assets/\"+self.URDF).getroot()\n M = float(URDF_TREE[1][0][1].attrib['value'])\n L = float(URDF_TREE[0].attrib['arm'])\n THRUST2WEIGHT_RATIO = float(URDF_TREE[0].attrib['thrust2weight'])\n IXX = float(URDF_TREE[1][0][2].attrib['ixx'])\n IYY = float(URDF_TREE[1][0][2].attrib['iyy'])\n IZZ = float(URDF_TREE[1][0][2].attrib['izz'])\n J = np.diag([IXX, IYY, IZZ])\n J_INV = np.linalg.inv(J)\n KF = float(URDF_TREE[0].attrib['kf'])\n KM = float(URDF_TREE[0].attrib['km'])\n COLLISION_H = float(URDF_TREE[1][2][1][0].attrib['length'])\n COLLISION_R = float(URDF_TREE[1][2][1][0].attrib['radius'])\n COLLISION_SHAPE_OFFSETS = [float(s) for s in URDF_TREE[1][2][0].attrib['xyz'].split(' ')]\n COLLISION_Z_OFFSET = COLLISION_SHAPE_OFFSETS[2]\n MAX_SPEED_KMH = float(URDF_TREE[0].attrib['max_speed_kmh'])\n GND_EFF_COEFF = float(URDF_TREE[0].attrib['gnd_eff_coeff'])\n PROP_RADIUS = float(URDF_TREE[0].attrib['prop_radius'])\n DRAG_COEFF_XY = float(URDF_TREE[0].attrib['drag_coeff_xy'])\n DRAG_COEFF_Z = float(URDF_TREE[0].attrib['drag_coeff_z'])\n DRAG_COEFF = np.array([DRAG_COEFF_XY, DRAG_COEFF_XY, DRAG_COEFF_Z])\n DW_COEFF_1 = float(URDF_TREE[0].attrib['dw_coeff_1'])\n DW_COEFF_2 = float(URDF_TREE[0].attrib['dw_coeff_2'])\n DW_COEFF_3 = float(URDF_TREE[0].attrib['dw_coeff_3'])\n return M, L, THRUST2WEIGHT_RATIO, J, J_INV, KF, KM, COLLISION_H, COLLISION_R, COLLISION_Z_OFFSET, MAX_SPEED_KMH, \\\n GND_EFF_COEFF, PROP_RADIUS, DRAG_COEFF, DW_COEFF_1, DW_COEFF_2, DW_COEFF_3\n \n ################################################################################\n \n def _actionSpace(self):\n \"\"\"Returns the action space of the environment.\n\n Must be implemented in a subclass.\n\n \"\"\"\n raise NotImplementedError\n \n ################################################################################\n\n def _observationSpace(self):\n \"\"\"Returns the observation space of the environment.\n\n Must be implemented in a subclass.\n\n \"\"\"\n raise NotImplementedError\n \n ################################################################################\n \n def _computeObs(self):\n \"\"\"Returns the current observation of the environment.\n\n Must be implemented in a subclass.\n\n \"\"\"\n raise NotImplementedError\n \n ################################################################################\n\n def _preprocessAction(self,\n action\n ):\n \"\"\"Pre-processes the action passed to `.step()` into motors' RPMs.\n\n Must be implemented in a subclass.\n\n Parameters\n ----------\n action : ndarray | dict[..]\n The input action for one or more drones, to be translated into RPMs.\n\n \"\"\"\n raise NotImplementedError\n\n ################################################################################\n\n def _computeReward(self):\n \"\"\"Computes the current reward value(s).\n\n Must be implemented in a subclass.\n\n \"\"\"\n raise NotImplementedError\n\n ################################################################################\n\n def _computeDone(self):\n \"\"\"Computes the current done value(s).\n\n Must be implemented in a subclass.\n\n \"\"\"\n raise NotImplementedError\n\n ################################################################################\n\n def _computeInfo(self):\n \"\"\"Computes the current info dict(s).\n\n Must be implemented in a subclass.\n\n \"\"\"\n raise NotImplementedError\n" ]
[ [ "numpy.dot", "numpy.tile", "numpy.exp", "numpy.min", "numpy.where", "numpy.resize", "numpy.max", "numpy.linalg.norm", "numpy.sqrt", "numpy.linalg.inv", "numpy.array", "numpy.reshape", "numpy.zeros", "numpy.identity", "numpy.clip", "numpy.hstack", "numpy.sum", "numpy.ones", "numpy.abs", "numpy.diag" ] ]
aroig/nnutil
[ "88df41ee89f592a28c1661ee8837dd8e8ca42cf3" ]
[ "nnutil/visual/bars.py" ]
[ "import numpy as np\nimport math\n\n_vbars = \" ▁▂▃▄▅▆▇█\"\n\ndef bar_graph(data):\n if len(data) > 64:\n data = np.interp(np.linspace(0, len(data), 64),\n np.arange(0, len(data)),\n np.array(data))\n\n M = max(data)\n def _bar(alpha):\n if math.isnan(alpha):\n return 'N'\n else:\n n = int((len(_vbars) - 1) * max(0.0, min(1.0, alpha)))\n return _vbars[n]\n\n if M > 0:\n return ''.join([_bar(x/M) for x in data])\n else:\n return len(data) * ' '\n" ]
[ [ "numpy.array" ] ]
ValterFallenius/metnet
[ "7cde48a7b5fc0b69a8ce9083f934949362620fd5" ]
[ "metnet/layers/ConvLSTM.py" ]
[ "\"\"\"Originally adapted from https://github.com/aserdega/convlstmgru, MIT License Andriy Serdega\"\"\"\nfrom typing import Any, List, Optional\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch import Tensor\n\n\nclass ConvLSTMCell(nn.Module):\n \"\"\"ConvLSTM Cell\"\"\"\n\n def __init__(\n self,\n input_dim: int,\n hidden_dim: int,\n kernel_size: int,\n bias=True,\n activation=F.tanh,\n batchnorm=False,\n ):\n \"\"\"\n ConLSTM Cell\n\n Args:\n input_dim: Number of input channels\n hidden_dim: Number of hidden channels\n kernel_size: Kernel size\n bias: Whether to add bias\n activation: Activation to use\n batchnorm: Whether to use batch norm\n \"\"\"\n super(ConvLSTMCell, self).__init__()\n\n self.input_dim = input_dim\n self.hidden_dim = hidden_dim\n\n self.kernel_size = kernel_size\n self.padding = kernel_size // 2, kernel_size // 2\n self.bias = bias\n self.activation = activation\n self.batchnorm = batchnorm\n\n self.conv = nn.Conv2d(\n in_channels=self.input_dim + self.hidden_dim,\n out_channels=4 * self.hidden_dim,\n kernel_size=self.kernel_size,\n padding=self.padding,\n bias=self.bias,\n )\n\n self.reset_parameters()\n\n def forward(self, x: torch.Tensor, prev_state: list) -> tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Compute forward pass\n\n Args:\n x: Input tensor of [Batch, Channel, Height, Width]\n prev_state: Previous hidden state\n\n Returns:\n The new hidden state and output\n \"\"\"\n h_prev, c_prev = prev_state\n\n combined = torch.cat((x, h_prev), dim=1) # concatenate along channel axis\n combined_conv = self.conv(combined)\n\n cc_i, cc_f, cc_o, cc_g = torch.split(combined_conv, self.hidden_dim, dim=1)\n\n i = F.sigmoid(cc_i)\n f = F.sigmoid(cc_f)\n\n g = self.activation(cc_g)\n c_cur = f * c_prev + i * g\n\n o = F.sigmoid(cc_o)\n\n h_cur = o * self.activation(c_cur)\n\n return h_cur, c_cur\n\n def init_hidden(self, x: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Initializes the hidden state\n Args:\n x: Input tensor to initialize for\n\n Returns:\n Tuple containing the hidden states\n \"\"\"\n state = (\n torch.zeros(x.size()[0], self.hidden_dim, x.size()[3], x.size()[4]),\n torch.zeros(x.size()[0], self.hidden_dim, x.size()[3], x.size()[4]),\n )\n state = (state[0].type_as(x), state[1].type_as(x))\n return state\n\n def reset_parameters(self) -> None:\n \"\"\"Resets parameters\"\"\"\n nn.init.xavier_uniform_(self.conv.weight, gain=nn.init.calculate_gain(\"tanh\"))\n self.conv.bias.data.zero_()\n\n if self.batchnorm:\n self.bn1.reset_parameters()\n self.bn2.reset_parameters()\n\n\nclass ConvLSTM(nn.Module):\n def __init__(\n self,\n input_dim: int,\n hidden_dim: int,\n kernel_size: int,\n num_layers: int,\n bias=True,\n activation=F.tanh,\n batchnorm=False,\n ):\n \"\"\"\n ConvLSTM module\n\n Args:\n input_dim: Input dimension size\n hidden_dim: Hidden dimension size\n kernel_size: Kernel size\n num_layers: Number of layers\n bias: Whether to add bias\n activation: Activation function\n batchnorm: Whether to use batch norm\n \"\"\"\n super(ConvLSTM, self).__init__()\n\n # Make sure that both `kernel_size` and `hidden_dim` are lists having len == num_layers\n kernel_size = self._extend_for_multilayer(kernel_size, num_layers)\n hidden_dim = self._extend_for_multilayer(hidden_dim, num_layers)\n activation = self._extend_for_multilayer(activation, num_layers)\n\n if not len(kernel_size) == len(hidden_dim) == len(activation) == num_layers:\n raise ValueError(\"Inconsistent list length.\")\n\n self.input_dim = input_dim\n self.hidden_dim = hidden_dim\n self.kernel_size = kernel_size\n self.num_layers = num_layers\n self.batch_first = True\n self.bias = bias\n\n cell_list = []\n for i in range(0, self.num_layers):\n cur_input_dim = self.input_dim if i == 0 else self.hidden_dim[i - 1]\n\n cell_list.append(\n ConvLSTMCell(\n input_dim=cur_input_dim,\n hidden_dim=self.hidden_dim[i],\n kernel_size=self.kernel_size[i],\n bias=self.bias,\n activation=activation[i],\n batchnorm=batchnorm,\n )\n )\n\n self.cell_list = nn.ModuleList(cell_list)\n\n self.reset_parameters()\n\n def forward(\n self, x: torch.Tensor, hidden_state: Optional[list] = None\n ) -> tuple[Tensor, list[tuple[Any, Any]]]:\n \"\"\"\n Computes the output of the ConvLSTM\n\n Args:\n x: Input Tensor of shape [Batch, Time, Channel, Width, Height]\n hidden_state: List of hidden states to use, if none passed, it will be generated\n\n Returns:\n The layer output and list of last states\n \"\"\"\n cur_layer_input = torch.unbind(x, dim=int(self.batch_first))\n\n if not hidden_state:\n hidden_state = self.get_init_states(x)\n\n seq_len = len(cur_layer_input)\n\n last_state_list = []\n\n for layer_idx in range(self.num_layers):\n h, c = hidden_state[layer_idx]\n output_inner = []\n for t in range(seq_len):\n h, c = self.cell_list[layer_idx](x=cur_layer_input[t], prev_state=[h, c])\n output_inner.append(h)\n\n cur_layer_input = output_inner\n last_state_list.append((h, c))\n\n layer_output = torch.stack(output_inner, dim=int(self.batch_first))\n\n return layer_output, last_state_list\n\n def reset_parameters(self) -> None:\n \"\"\"\n Reset parameters\n \"\"\"\n for c in self.cell_list:\n c.reset_parameters()\n\n def get_init_states(self, x: torch.Tensor) -> List[torch.Tensor]:\n \"\"\"\n Constructs the initial hidden states\n\n Args:\n x: Tensor to use for constructing state\n\n Returns:\n The initial hidden states for all the layers in the network\n \"\"\"\n init_states = []\n for i in range(self.num_layers):\n init_states.append(self.cell_list[i].init_hidden(x))\n return init_states\n\n @staticmethod\n def _extend_for_multilayer(param, num_layers):\n \"\"\"\n Extends a parameter for multiple layers\n\n Args:\n param: Parameter to copy\n num_layers: Number of layers\n\n Returns:\n The extended parameter\n \"\"\"\n if not isinstance(param, list):\n param = [param] * num_layers\n return param\n" ]
[ [ "torch.nn.functional.sigmoid", "torch.cat", "torch.nn.ModuleList", "torch.split", "torch.nn.Conv2d", "torch.nn.init.calculate_gain" ] ]
sundogu/ML-Bayes-Rule-Classification
[ "ac476e21130c86d082783ab83b8badd368c87291" ]
[ "bayes_rule_classifier.py" ]
[ "import numpy as np\r\nimport scipy.stats as stats\r\n\r\n\r\nclass Classifier:\r\n # Class Variables\r\n _n_class = _p_m_s = None\r\n\r\n # Constructor\r\n def __init__(self, col_1, col_2, n_class):\r\n self._init_var(col_1, col_2, n_class)\r\n\r\n # Methods\r\n def _init_var(self, col_1, col_2, n_class):\r\n self._n_class = n_class\r\n\r\n assert len(col_1) == len(col_2)\r\n hmap = self._sort_cols(col_1, col_2)\r\n\r\n assert self._n_class == len(list(hmap))\r\n self._load_prior(col_2)\r\n self._load_mean_std(hmap)\r\n\r\n def _load_prior(self, col_2):\r\n self._p_m_s = {}\r\n for i in range(self._n_class):\r\n self._p_m_s[i] = {\"prior\": col_2.count(i) / float(len(col_2))}\r\n\r\n return\r\n\r\n def _sort_cols(self, col_1, col_2):\r\n hmap = {}\r\n\r\n for i in range(len(col_1)):\r\n if col_2[i] not in hmap:\r\n hmap[col_2[i]] = []\r\n\r\n hmap[col_2[i]].append(col_1[i])\r\n\r\n return hmap\r\n\r\n def _load_mean_std(self, hmap):\r\n for k in list(hmap):\r\n self._p_m_s[k][\"mean\"] = np.mean(hmap[k])\r\n self._p_m_s[k][\"std\"] = np.std(hmap[k], ddof=1)\r\n\r\n return\r\n\r\n def classify(self, test_x):\r\n def likelihood_x_prior(x, class_n):\r\n pms = self._p_m_s[class_n]\r\n return stats.norm(pms[\"mean\"], pms[\"std\"]).pdf(x) * pms[\"prior\"]\r\n\r\n evidence = 0\r\n\r\n for k in list(self._p_m_s):\r\n evidence += likelihood_x_prior(test_x, k)\r\n\r\n hmap = {}\r\n\r\n for k in list(self._p_m_s):\r\n if evidence != 0:\r\n post = likelihood_x_prior(test_x, k) / evidence\r\n else:\r\n post = 0\r\n\r\n if post not in hmap:\r\n hmap[post] = []\r\n\r\n hmap[post].append(k)\r\n\r\n class_list = hmap[np.max(list(hmap))]\r\n return class_list[np.random.randint(0, len(class_list))]\r\n" ]
[ [ "numpy.std", "scipy.stats.norm", "numpy.mean" ] ]
wangjinjia1/dcase2019task5_YSU
[ "c307cd118bb27cb913850f80d14f327399145ee9" ]
[ "train.py" ]
[ "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 3 08:08:11 2019\n\n@author: barry\n\"\"\"\nimport os\nimport sys\nsys.path.insert(1, os.path.join(sys.path[0], '../utils'))\nimport numpy as np\nimport argparse\nimport h5py\nimport math\nimport time\nimport logging\nimport matplotlib.pyplot as plt\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n\nfrom utilities import (create_folder, get_filename, create_logging, \n load_scalar, get_labels)\nfrom data_generator import DataGenerator\nfrom models import TFSANN\nfrom losses import binary_cross_entropy\nfrom evaluate import Evaluator, StatisticsContainer\nfrom pytorch_utils import move_data_to_gpu\nimport config\n\n\n\ndef train(args):\n '''Training. Model will be saved after several iterations. \n \n Args: \n dataset_dir: string, directory of dataset\n workspace: string, directory of workspace\n taxonomy_level: 'fine' | 'coarse'\n model_type: string, e.g. 'Cnn_9layers_MaxPooling'\n holdout_fold: '1' | 'None', where '1' indicates using validation and \n 'None' indicates using full data for training\n batch_size: int\n cuda: bool\n mini_data: bool, set True for debugging on a small part of data\n '''\n\n # Arugments & parameters\n dataset_dir = args.dataset_dir\n workspace = args.workspace\n taxonomy_level = args.taxonomy_level\n model_type = args.model_type\n holdout_fold = args.holdout_fold\n batch_size = args.batch_size\n cuda = args.cuda and torch.cuda.is_available()\n mini_data = args.mini_data\n filename = args.filename\n \n seq_len = 640\n mel_bins = config.mel_bins\n frames_per_second = config.frames_per_second\n max_iteration = 10 # Number of mini-batches to evaluate on training data\n reduce_lr = True\n \n labels = get_labels(taxonomy_level)\n classes_num = len(labels)\n \n # Paths\n if mini_data:\n prefix = 'minidata_'\n else:\n prefix = ''\n \n train_hdf5_path = os.path.join(workspace, 'features', \n '{}logmel_{}frames_{}melbins'.format(prefix, frames_per_second, mel_bins), \n 'train.h5')\n \n validate_hdf5_path = os.path.join(workspace, 'features', \n '{}logmel_{}frames_{}melbins'.format(prefix, frames_per_second, mel_bins), \n 'validate.h5')\n \n scalar_path = os.path.join(workspace, 'scalars', \n '{}logmel_{}frames_{}melbins'.format(prefix, frames_per_second, mel_bins), \n 'train.h5')\n \n checkpoints_dir = os.path.join(workspace, 'checkpoints', filename, \n '{}logmel_{}frames_{}melbins'.format(prefix, frames_per_second, mel_bins), \n 'taxonomy_level={}'.format(taxonomy_level), \n 'holdout_fold={}'.format(holdout_fold), model_type)\n create_folder(checkpoints_dir)\n \n _temp_submission_path = os.path.join(workspace, '_temp_submissions', filename, \n '{}logmel_{}frames_{}melbins'.format(prefix, frames_per_second, mel_bins), \n 'taxonomy_level={}'.format(taxonomy_level), \n 'holdout_fold={}'.format(holdout_fold), model_type, '_submission.csv')\n create_folder(os.path.dirname(_temp_submission_path))\n \n validate_statistics_path = os.path.join(workspace, 'statistics', filename, \n '{}logmel_{}frames_{}melbins'.format(prefix, frames_per_second, mel_bins), \n 'taxonomy_level={}'.format(taxonomy_level), \n 'holdout_fold={}'.format(holdout_fold), model_type, \n 'validate_statistics.pickle')\n create_folder(os.path.dirname(validate_statistics_path))\n \n annotation_path = os.path.join(dataset_dir, 'annotations.csv')\n \n yaml_path = os.path.join(dataset_dir, 'dcase-ust-taxonomy.yaml')\n \n logs_dir = os.path.join(workspace, 'logs', filename, args.mode, \n '{}logmel_{}frames_{}melbins'.format(prefix, frames_per_second, mel_bins), \n 'taxonomy_level={}'.format(taxonomy_level), \n 'holdout_fold={}'.format(holdout_fold), model_type)\n create_logging(logs_dir, 'w')\n logging.info(args)\n\n if cuda:\n logging.info('Using GPU.')\n else:\n logging.info('Using CPU. Set --cuda flag to use GPU.')\n\n # Load scalar\n scalar = load_scalar(scalar_path)\n \n # Model\n Model = eval(model_type)\n model = Model(classes_num, seq_len, mel_bins, cuda)\n \n if cuda:\n model.cuda()\n \n # Optimizer\n optimizer = optim.Adam(model.parameters(), lr=1e-3, betas=(0.9, 0.999),\n eps=1e-08, weight_decay=0., amsgrad=True)\n print('cliqueNet parameters:', sum(param.numel() for param in model.parameters()))\n # Data generator\n data_generator = DataGenerator(\n train_hdf5_path=train_hdf5_path, \n validate_hdf5_path=validate_hdf5_path, \n holdout_fold=holdout_fold, \n scalar=scalar, \n batch_size=batch_size)\n \n # Evaluator\n evaluator = Evaluator(\n model=model, \n data_generator=data_generator, \n taxonomy_level=taxonomy_level, \n cuda=cuda, \n verbose=False)\n \n # Statistics\n validate_statistics_container = StatisticsContainer(validate_statistics_path)\n \n train_bgn_time = time.time()\n iteration = 0\n \n # Train on mini batches\n for batch_data_dict in data_generator.generate_train():\n \n # Evaluate\n if iteration % 200 == 0:\n logging.info('------------------------------------')\n logging.info('Iteration: {}, {} level statistics:'.format(\n iteration, taxonomy_level))\n\n train_fin_time = time.time()\n\n # Evaluate on training data\n if mini_data:\n raise Exception('`mini_data` flag must be set to False to use '\n 'the official evaluation tool!')\n \n train_statistics = evaluator.evaluate(\n data_type='train', \n max_iteration=None)\n \n # Evaluate on validation data\n if holdout_fold != 'none':\n validate_statistics = evaluator.evaluate(\n data_type='validate', \n submission_path=_temp_submission_path, \n annotation_path=annotation_path, \n yaml_path=yaml_path, \n max_iteration=None)\n \n validate_statistics_container.append_and_dump(\n iteration, validate_statistics)\n\n train_time = train_fin_time - train_bgn_time\n validate_time = time.time() - train_fin_time\n\n logging.info(\n 'Train time: {:.3f} s, validate time: {:.3f} s'\n ''.format(train_time, validate_time))\n\n train_bgn_time = time.time()\n\n # Save model\n if iteration % 1000 == 0 and iteration > 0:\n checkpoint = {\n 'iteration': iteration, \n 'model': model.state_dict(), \n 'optimizer': optimizer.state_dict()}\n\n checkpoint_path = os.path.join(\n checkpoints_dir, '{}_iterations.pth'.format(iteration))\n \n torch.save(checkpoint, checkpoint_path)\n logging.info('Model saved to {}'.format(checkpoint_path))\n \n # Reduce learning rate\n if reduce_lr and iteration % 200 == 0 and iteration > 0:\n for param_group in optimizer.param_groups:\n param_group['lr'] *= 0.9\n \n # Move data to GPU\n for key in batch_data_dict.keys():\n if key in ['feature', 'fine_target', 'coarse_target']:\n batch_data_dict[key] = move_data_to_gpu(\n batch_data_dict[key], cuda)\n \n # Train\n model.train()\n batch_output = model(batch_data_dict['feature'])\n \n # loss\n batch_target = batch_data_dict['{}_target'.format(taxonomy_level)]\n loss = binary_cross_entropy(batch_output, batch_target)\n\n # Backward\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # Stop learning\n if iteration == 3000:\n break\n \n iteration += 1\n \n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Example of parser. ')\n subparsers = parser.add_subparsers(dest='mode')\n\n parser_train = subparsers.add_parser('train')\n parser_train.add_argument('--dataset_dir', type=str, required=True, help='Directory of dataset.')\n parser_train.add_argument('--workspace', type=str, required=True, help='Directory of your workspace.')\n parser_train.add_argument('--taxonomy_level', type=str, choices=['fine', 'coarse'], required=True)\n parser_train.add_argument('--model_type', type=str, required=True, help='E.g., TFSANN.')\n parser_train.add_argument('--holdout_fold', type=str, choices=['1', 'none'], required=True)\n parser_train.add_argument('--batch_size', type=int, required=True)\n parser_train.add_argument('--cuda', action='store_true', default=True)\n parser_train.add_argument('--mini_data', action='store_true', default=False, help='Set True for debugging on a small part of data.')\n \n args = parser.parse_args()\n args.filename = get_filename(__file__)\n\n if args.mode == 'train':\n train(args)\n \n else:\n raise Exception('Error argument!')" ]
[ [ "torch.save", "torch.cuda.is_available" ] ]
ahmednader10/Machine_Learning
[ "fab0c7cd773b5e001b56c5349550085e34661e4d" ]
[ "Tensorflow/MNIST/Chapter1.py" ]
[ "import tensorflow as tf\n\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets('MNIST_data', one_hot=True)\n\nX = tf.placeholder(tf.float32, [None, 28, 28, 1])\nW = tf.Variable(tf.zeros([784,10]))\nb = tf.Variable(tf.zeros([10]))\n\nX = tf.reshape(X, [-1, 784])\n#model\nY = tf.nn.softmax(tf.matmul(X, W) + b)\n\n#placeholder for correct answers\nY_ = tf.placeholder(tf.float32, [None, 10])\n\n#loss function\ncross_entropy = -tf.reduce_sum(Y_ * tf.log(Y))\n\n# % of correct answers in batch\nis_correct = tf.equal(tf.argmax(Y,1), tf.argmax(Y_, 1))\naccuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))\n\noptimizer = tf.train.GradientDescentOptimizer(0.003)\ntrain_step = optimizer.minimize(cross_entropy)\n\ninit = tf.initialize_all_variables()\nsess = tf.Session()\nsess.run(init)\n\nfor i in range(10000):\n #load batch images and correct images\n batch_X, batch_Y = mnist.train.next_batch(100)\n\n train_data = {X: batch_X, Y_: batch_Y}\n #train\n sess.run(train_step, feed_dict = {X: batch_X, Y_: batch_Y})\n\n #print in case of success\n a,c = sess.run([accuracy, cross_entropy], feed_dict={X: batch_X, Y_: batch_Y})\n\n #success on test data?\n test_data = {X:mnist.test.images, Y_:mnist.test.labels}\n a,c = sess.run([accuracy, cross_entropy], feed_dict = {X:mnist.test.images, Y_:mnist.test.labels})\n\nprint(\"accuracy:\" + str(a) + \" loss: \" + str(c))\n" ]
[ [ "tensorflow.zeros", "tensorflow.initialize_all_variables", "tensorflow.argmax", "tensorflow.examples.tutorials.mnist.input_data.read_data_sets", "tensorflow.Session", "tensorflow.matmul", "tensorflow.reshape", "tensorflow.log", "tensorflow.placeholder", "tensorflow.cast", "tensorflow.train.GradientDescentOptimizer" ] ]
anonymousprojs/ISSTA2022-study
[ "94cef7fc4c098c03bb08ff8865d0c1d9a5de86b2", "94cef7fc4c098c03bb08ff8865d0c1d9a5de86b2" ]
[ "coverage/rq3/rq3_script.py", "coverage/tools/surprise_adequacy/sa.py" ]
[ "import argparse\r\nimport configparser\r\nimport os\r\nimport numpy as np\r\nfrom datetime import datetime, date\r\n\r\nfrom pandas import DataFrame\r\n\r\nfrom coverage import root_dir\r\nimport coverage.tools.dataloader as dataloader\r\nfrom coverage.tools import common_utils\r\nimport coverage.tools.model_utils as model_utils\r\nfrom coverage.tools.coverage_utils import execute_sampling, SurpriseCoverage\r\n\r\n\r\ndef get_aggregated_indices(labels, select_idx):\r\n sampled_indices_list = []\r\n for class_id in select_idx:\r\n sampled_indices = np.nonzero(labels == class_id)[0]\r\n sampled_indices_list.append(sampled_indices)\r\n aggregated_indices = np.concatenate(sampled_indices_list)\r\n return aggregated_indices\r\n\r\n\r\nif __name__ == \"__main__\":\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"--sample_capacity\", help=\"number of images\", type=int, default=800)\r\n parser.add_argument(\"--repeat_times\", help=\"number of selected classes\", type=int, default=2)\r\n parser.add_argument(\"--dataset_network\", help=\"selected class id\", type=str, default=\"cifar100_resnet32\")\r\n parser.add_argument(\"--attack\", help=\"adversarial attack\", type=str, default=\"cw\")\r\n parser.add_argument(\"--exp_date\", help=\"data_of_exp\", type=str,)\r\n parser.add_argument(\"--split_id\", help=\"id number of select split\", type=int, default=1)\r\n console_args = parser.parse_args()\r\n print(console_args)\r\n\r\n dataset_network = console_args.dataset_network\r\n\r\n exp_cfg = configparser.ConfigParser()\r\n coverage_parameters = {\"n_bucket\": 1000}\r\n exp_cfg.read(f\"{root_dir}/config/exp.conf\")\r\n total_group_nums = exp_cfg['parameters'].getint(\"group_nums\")\r\n coverage_parameters[\"kmnc_k_section\"] = exp_cfg['parameters'].getint(\"kmnc_k_section\")\r\n coverage_parameters[\"tknc_k_value\"] = exp_cfg['parameters'].getint(\"tknc_k_value\")\r\n coverage_parameters[\"nc_threshold\"] = exp_cfg['parameters'].getfloat(\"nc_threshold\")\r\n coverage_parameters[\"idc_relevant_neurons\"] = exp_cfg['parameters'].getint(\"idc_relevant_neurons\")\r\n\r\n rq3_path = exp_cfg['parameters'].get(\"rq3_path\")\r\n sa_dir_name = exp_cfg['parameters'].get(\"sa_intermediate\")\r\n sa_intermedia_path = os.path.join(root_dir, sa_dir_name)\r\n idc_dir_name = exp_cfg['parameters'].get(\"idc_intermediate\")\r\n idc_intermedia_path = os.path.join(root_dir, idc_dir_name)\r\n coverage_parameters[\"idc_intermedia_path\"] = idc_intermedia_path\r\n console_args.exp_date = str(date.today()) if console_args.exp_date is None else console_args.exp_date\r\n dataset_network_dir = os.path.join(root_dir, rq3_path, console_args.exp_date, dataset_network)\r\n common_utils.create_path(sa_intermedia_path, idc_intermedia_path, rq3_path, dataset_network_dir)\r\n\r\n dataset_name, network_name = tuple(dataset_network.split(\"_\"))\r\n num_classes = dataloader.class_num(dataset_name)\r\n test_sizes = dataloader.test_sizes[dataset_name]\r\n\r\n s0 = datetime.now()\r\n # load model and boundary\r\n classifier = model_utils.load_model(network=network_name, dataset=dataset_name)\r\n boundary = common_utils.load_boundary(dataset_name, network_name)\r\n # direct use `size_per_class` correctly classified images\r\n x_test, y_test = dataloader.load_dataset(dataset_name)\r\n x_test = dataloader.preprocess_dataset(dataset_name, network_name, x_test)\r\n print(f\"INFO: {dataset_name, network_name} value range of clean images :[{np.min(x_test)},{np.max(x_test)}]\")\r\n\r\n # the adversarial inputs are already preprocessed.\r\n adv_x, adv_y = dataloader.load_adversarial_images(dataset_name, network_name, console_args.attack, mode=\"full\")\r\n print(f\"INFO: {dataset_name, network_name} value range of adv images :[{np.min(adv_x)},{np.max(adv_x)}]\")\r\n\r\n # I skip loading train set here. We don't need train-set because we have generated SA and IDC intermediate files\r\n skip_train = True\r\n if skip_train:\r\n x_train = y_train = None\r\n else:\r\n # note that the y_train is not in one-vector format. It's just an array of class ids.\r\n x_train, y_train = dataloader.load_train_set(console_args.dataset)\r\n x_train = dataloader.preprocess_dataset(console_args.dataset, console_args.network, x_train)\r\n print(f\"INFO: {console_args.dataset, console_args.network} \"\r\n f\"value range of train images :[{np.min(x_train)},{np.max(x_train)}]\")\r\n print(f\"Data & Model preparing time:{datetime.now() - s0}\")\r\n\r\n sampling_indices = common_utils.sampling_indices_dict(500, dataset_model=dataset_network,\r\n test_size=console_args.sample_capacity)\r\n correct_indices = sampling_indices['pure_correct_indices']\r\n pure_correct_labels = y_test[correct_indices].copy()\r\n\r\n # we divide the classes into ten splits\r\n section_num = 10\r\n class_ids = np.arange(num_classes)\r\n section_length = int(num_classes / section_num)\r\n\r\n adv_lsa, adv_dsa, adv_mdsa = common_utils.cached_sa(dataset_network=dataset_network,\r\n attack_type=console_args.attack,\r\n test_size=test_sizes)\r\n clean_lsa, clean_dsa, clean_mdsa = common_utils.cached_sa(dataset_network=dataset_network,\r\n attack_type=\"normal\",\r\n test_size=test_sizes)\r\n sa_dict = dict()\r\n sa_dict[\"clean_lsa\"], sa_dict[\"adv_lsa\"] = clean_lsa, adv_lsa\r\n sa_dict[\"clean_dsa\"], sa_dict[\"adv_dsa\"] = clean_dsa, adv_dsa\r\n sa_dict[\"clean_mdsa\"], sa_dict[\"adv_mdsa\"] = clean_mdsa, adv_mdsa\r\n sa_dict[\"lsa_boundary\"] = SurpriseCoverage.filter_outliers(\"LSA\",np.concatenate([clean_lsa,adv_lsa]).copy())\r\n sa_dict[\"dsa_boundary\"] = SurpriseCoverage.filter_outliers(\"DSA\",np.concatenate([clean_dsa,adv_dsa]).copy())\r\n sa_dict[\"mdsa_boundary\"] = SurpriseCoverage.filter_outliers(\"MDSA\",np.concatenate([clean_mdsa,adv_mdsa]).copy())\r\n\r\n start_class_id = int(section_length * console_args.split_id)\r\n top_idx = class_ids[start_class_id:start_class_id + section_length]\r\n print(f\"Selecting spilt:{console_args.split_id},classes:{top_idx}\")\r\n df_titles = [\"Sampling_Name\", \"correct_proportion\", \"NC\", \"NBC\", \"SNAC\", \"TKNC\", 'KMNC', \"LSC\", \"DSC\", \"MDSC\",\r\n \"IDC\", \"error_rate\"]\r\n df_path = os.path.join(dataset_network_dir,\r\n f\"{console_args.dataset_network}_{console_args.attack}_size{console_args.sample_capacity}\"\r\n f\"_class_ratio-split{console_args.split_id}.xlsx\")\r\n\r\n df = DataFrame(columns=df_titles)\r\n row_id = 0\r\n\r\n _aggregated_correct_idx = get_aggregated_indices(pure_correct_labels, top_idx)\r\n aggregated_correct_idx = correct_indices[_aggregated_correct_idx]\r\n aggregated_wrong_idx = get_aggregated_indices(adv_y, top_idx)\r\n\r\n s0 = datetime.now()\r\n for rid in range(console_args.repeat_times):\r\n if len(aggregated_correct_idx) >= console_args.sample_capacity:\r\n adv_minimum = 0\r\n else:\r\n adv_minimum = console_args.sample_capacity - len(aggregated_correct_idx)\r\n adv_maximum = int(console_args.sample_capacity * 0.7)\r\n assert adv_maximum > adv_minimum, f\"Maximum {adv_maximum} <= Minimum {adv_minimum}. \" \\\r\n f\"Only {len(aggregated_correct_idx)} correct inputs are found.\"\r\n wrong_num = np.random.randint(low=adv_minimum, high=adv_maximum + 1)\r\n correct_num = console_args.sample_capacity - wrong_num\r\n print(f\"Repeat times: {rid} of {console_args.repeat_times}, correct: {correct_num}, wrong: {wrong_num}\")\r\n select_correct_idx = np.random.choice(a=aggregated_correct_idx, size=correct_num, replace=False)\r\n select_wrong_idx = np.random.choice(a=aggregated_wrong_idx, size=wrong_num, replace=False)\r\n select_correct_inputs, select_correct_labels = \\\r\n x_test[select_correct_idx].copy(), y_test[select_correct_idx].copy()\r\n select_wrong_inputs, select_wrong_labels = \\\r\n adv_x[select_wrong_idx].copy(), adv_y[select_wrong_idx].copy()\r\n selected_x = np.concatenate([select_correct_inputs, select_wrong_inputs])\r\n selected_y = np.concatenate([select_correct_labels, select_wrong_labels])\r\n row = execute_sampling(dataset_network=dataset_network, classifier=classifier, x=selected_x, y=selected_y,\r\n train_inputs=x_train, train_labels=y_train, boundary=boundary, sa_dict=sa_dict,\r\n coverage_parameters=coverage_parameters, normal_indices=select_correct_idx,\r\n adv_indices=select_wrong_idx,classification=True)\r\n\r\n row_str = [round(rate, 2) for rate in row]\r\n sampling_row = [f\"sample{console_args.split_id}_repeat_{rid}\",\r\n round(correct_num / console_args.sample_capacity, 2)]\r\n sampling_row.extend(row_str)\r\n df.loc[row_id] = sampling_row\r\n row_id += 1\r\n df.to_excel(df_path)\r\n\r\n elapsed = (datetime.now() - s0)\r\n print(f\"RQ2 Time used for {dataset_network}-{console_args.attack} \", elapsed)\r\n", "from warnings import warn\r\n\r\nimport os\r\n\r\nfrom multiprocessing import Pool\r\n\r\nimport numpy as np\r\nfrom tqdm import tqdm\r\nfrom keras.models import Model\r\nfrom scipy.stats import gaussian_kde\r\nfrom coverage.tools.surprise_adequacy.sa_utils import *\r\nfrom coverage.tools.common_utils import ScoreUtils\r\nfrom coverage.tools.deepspeech.deepspeech_utils import DSDataUtils\r\n\r\n\r\ndef _aggr_output(x):\r\n return [np.mean(x[..., j]) for j in range(x.shape[-1])]\r\n\r\n\r\ndef _get_saved_path(base_path, dataset, network, train_size: int, dtype, layer_names):\r\n \"\"\"Determine saved path of ats and pred\r\n Args:\r\n base_path (str): Base save path.\r\n dataset (str): Name of dataset.\r\n dtype (str): Name of dataset type (e.g., train, test, fgsm, ...).\r\n layer_names (list): List of layer names.\r\n Returns:\r\n ats_path: File path of ats.\r\n pred_path: File path of pred (independent of layers)\r\n \"\"\"\r\n\r\n joined_layer_names = \"_\".join(layer_names)\r\n return (\r\n os.path.join(\r\n base_path,\r\n dataset + \"_\" + network + \"_\" + dtype + \"_\" +\r\n str(train_size) + \"_\" + joined_layer_names + \"_ats\" + \".npy\",\r\n ),\r\n os.path.join(base_path, dataset + \"_\" + network + \"_\" +\r\n dtype + \"_\" + str(train_size) + \"_pred\" + \".npy\"),\r\n )\r\n\r\n\r\ndef get_ats(\r\n model,\r\n dataset,\r\n name,\r\n layer_names,\r\n save_path=None,\r\n batch_size=128,\r\n is_classification=True,\r\n num_classes=10,\r\n num_proc=10,\r\n dataset_name=None,\r\n):\r\n \"\"\"Extract activation traces of dataset from model.\r\n Args:\r\n model (keras model): Subject model.\r\n dataset (list): Set of inputs fed into the model.\r\n name (str): Name of input set.\r\n layer_names (list): List of selected layer names.\r\n save_path (tuple): Paths of being saved ats and pred.\r\n batch_size (int): Size of batch when serving.\r\n is_classification (bool): Task type, True if classification task or False.\r\n num_classes (int): The number of classes (labels) in the dataset.\r\n num_proc (int): The number of processes for multiprocessing.\r\n Returns:\r\n ats (list): List of (layers, inputs, neuron outputs).\r\n pred (list): List of predicted classes.\r\n \"\"\"\r\n\r\n temp_model = Model(\r\n inputs=model.input,\r\n outputs=[model.get_layer(\r\n layer_name).output for layer_name in layer_names],\r\n )\r\n\r\n prefix = info(\"[\" + name + \"] \")\r\n if is_classification:\r\n p = Pool(num_proc)\r\n print(prefix + \"Model serving\")\r\n # pred = model.predict_classes(dataset, batch_size=batch_size, verbose=1)\r\n predict = model.predict(dataset, batch_size=batch_size, verbose=1)\r\n if dataset_name == \"speech-commands\":\r\n pred_words = ScoreUtils.speech_commands_prediction(predict)\r\n pred = [DSDataUtils.get_words_idx(s) for s in pred_words]\r\n else:\r\n pred = np.argmax(predict, axis=1)\r\n\r\n if len(layer_names) == 1:\r\n layer_outputs = [\r\n temp_model.predict(dataset, batch_size=batch_size, verbose=1)\r\n ]\r\n else:\r\n layer_outputs = temp_model.predict(\r\n dataset, batch_size=batch_size, verbose=1\r\n )\r\n\r\n print(prefix + \"Processing ATs\")\r\n ats = None\r\n for layer_name, layer_output in zip(layer_names, layer_outputs):\r\n print(\"Layer: \" + layer_name)\r\n # (primarily for convolutional layers - note that kim et al used ndim==3)\r\n # I think here should be 2.\r\n # The output shape may be like (batch_size,channel1,channel2),\r\n # and we should change it to (batch_size,channel2)\r\n if layer_output[0].ndim >= 2:\r\n # For convolutional layers\r\n layer_matrix = np.array(\r\n p.map(_aggr_output, [layer_output[i]\r\n for i in range(len(dataset))])\r\n )\r\n else:\r\n layer_matrix = np.array(layer_output)\r\n\r\n if ats is None:\r\n ats = layer_matrix\r\n else:\r\n ats = np.append(ats, layer_matrix, axis=1)\r\n layer_matrix = None\r\n else:\r\n p = Pool(num_proc)\r\n pred = []\r\n print(prefix + \"Model serving\")\r\n if len(layer_names) == 1:\r\n layer_outputs = [\r\n temp_model.predict(dataset, batch_size=batch_size, verbose=1)\r\n ]\r\n else:\r\n layer_outputs = temp_model.predict(\r\n dataset, batch_size=batch_size, verbose=1\r\n )\r\n\r\n print(prefix + \"Processing ATs\")\r\n ats = None\r\n for layer_name, layer_output in zip(layer_names, layer_outputs):\r\n print(\"Layer: \" + layer_name)\r\n if layer_output[0].ndim == 3:\r\n # For convolutional layers\r\n layer_matrix = np.array(\r\n p.map(_aggr_output, [layer_output[i]\r\n for i in range(len(dataset))])\r\n )\r\n else:\r\n layer_matrix = np.array(layer_output)\r\n\r\n if ats is None:\r\n ats = layer_matrix\r\n else:\r\n ats = np.append(ats, layer_matrix, axis=1)\r\n layer_matrix = None\r\n\r\n # if save_path is not None:\r\n # np.save(save_path[0], ats)\r\n # np.save(save_path[1], pred)\r\n\r\n return ats, pred\r\n\r\n\r\ndef find_closest_at(at, train_ats):\r\n \"\"\"The closest distance between subject AT and training ATs.\r\n Args:\r\n at (list): List of activation traces of an input.\r\n train_ats (list): List of activation traces in training set (filtered)\r\n\r\n Returns:\r\n dist (int): The closest distance.\r\n at (list): Training activation trace that has the closest distance.\r\n \"\"\"\r\n\r\n dist = np.linalg.norm(at - train_ats, axis=1)\r\n return (min(dist), train_ats[np.argmin(dist)])\r\n\r\n\r\ndef _get_train_target_ats(model, x_train, x_target, target_name, layer_names, args):\r\n \"\"\"Extract ats of train and target inputs. If there are saved files, then skip it.\r\n Args:\r\n model (keras model): Subject model.\r\n x_train (list): Set of training inputs.\r\n x_target (list): Set of target (test or adversarial) inputs.\r\n target_name (str): Name of target set.\r\n layer_names (list): List of selected layer names.\r\n args: keyboard console_args.\r\n Returns:\r\n train_ats (list): ats of train set.\r\n train_pred (list): pred of train set.\r\n target_ats (list): ats of target set.\r\n target_pred (list): pred of target set.\r\n \"\"\"\r\n train_size = len(x_train)\r\n saved_train_path = _get_saved_path(\r\n args.save_path, args.dataset, args.network, train_size, \"train\", layer_names)\r\n if os.path.exists(saved_train_path[0]):\r\n print(infog(\"Found saved {} ATs, skip serving\".format(\"train\")))\r\n # In case train_ats is stored in a disk\r\n train_ats = np.load(saved_train_path[0])\r\n train_pred = np.load(saved_train_path[1])\r\n else:\r\n train_ats, train_pred = get_ats(\r\n model,\r\n x_train,\r\n \"train\",\r\n layer_names,\r\n num_classes=args.num_classes,\r\n is_classification=args.is_classification,\r\n save_path=saved_train_path,\r\n dataset_name=args.dataset,\r\n )\r\n print(infog(\"train ATs is saved at \" + saved_train_path[0]))\r\n if saved_train_path is not None:\r\n np.save(saved_train_path[0], train_ats)\r\n np.save(saved_train_path[1], train_pred)\r\n\r\n saved_target_path = _get_saved_path(\r\n args.save_path, args.dataset, args.network, train_size, target_name, layer_names\r\n )\r\n\r\n if True:\r\n target_ats, target_pred = get_ats(\r\n model,\r\n x_target,\r\n target_name,\r\n layer_names,\r\n num_classes=args.num_classes,\r\n is_classification=args.is_classification,\r\n save_path=saved_target_path,\r\n dataset_name=args.dataset,\r\n )\r\n print(infog(target_name + \" ATs is saved at \" + saved_target_path[0]))\r\n return train_ats, train_pred, target_ats, target_pred\r\n\r\n\r\ndef generate_at(model, x_train, args, layer_names):\r\n train_size = len(x_train)\r\n saved_train_path = _get_saved_path(\r\n args.save_path, args.dataset, args.network, train_size, \"train\", layer_names)\r\n if os.path.exists(saved_train_path[0]):\r\n print(infog(\"Found saved {} ATs, skip serving\".format(\"train\")))\r\n print(\"Skip training ats generation\")\r\n else:\r\n train_ats, train_pred = get_ats(\r\n model,\r\n x_train,\r\n \"train\",\r\n layer_names,\r\n num_classes=args.num_classes,\r\n is_classification=args.is_classification,\r\n save_path=saved_train_path,\r\n )\r\n print(infog(\"train ATs is saved at \" + saved_train_path[0]))\r\n if saved_train_path is not None:\r\n np.save(saved_train_path[0], train_ats)\r\n np.save(saved_train_path[1], train_pred)\r\n\r\n\r\ndef fetch_dsa(model, x_train, x_target, target_name, layer_names, args):\r\n # \"\"\"Distance-based SA\r\n # Args:\r\n # model (keras model): Subject model.\r\n # x_train (list): Set of training inputs.\r\n # x_target (list): Set of target (test or adversarial) inputs.\r\n # target_name (str): Name of target set.\r\n # sa_layer_names (list): List of selected layer names.\r\n # console_args: keyboard console_args.\r\n # Returns:\r\n # dsa (list): List of dsa for each target input.\r\n # \"\"\"\r\n\r\n assert args.is_classification\r\n\r\n prefix = info(\"[\" + target_name + \"] \")\r\n train_ats, train_pred, target_ats, target_pred = _get_train_target_ats(\r\n model, x_train, x_target, target_name, layer_names, args\r\n )\r\n\r\n class_matrix = {}\r\n all_idx = []\r\n for i, label in enumerate(train_pred):\r\n if label not in class_matrix:\r\n class_matrix[label] = []\r\n class_matrix[label].append(i)\r\n all_idx.append(i)\r\n\r\n dsa = []\r\n\r\n print(prefix + \"Fetching DSA\")\r\n for i, at in enumerate(tqdm(target_ats)):\r\n label = target_pred[i]\r\n a_dist, a_dot = find_closest_at(at, train_ats[class_matrix[label]])\r\n b_dist, _ = find_closest_at(\r\n a_dot, train_ats[list(set(all_idx) - set(class_matrix[label]))]\r\n )\r\n dsa.append(a_dist / b_dist)\r\n\r\n return dsa\r\n\r\n\r\ndef fetch_mdsa(model, x_train, x_target, target_name, layer_names, args):\r\n \"\"\"\r\n @param model: Subject model.\r\n @param x_train: Set of training inputs.\r\n @param x_target: Set of target (test or adversarial) inputs.\r\n @param target_name: name of targeted test inputs\r\n @param layer_names: List of selected layer names.\r\n @param args: keyboard console_args.\r\n @return: List of mdsa for each target input.\r\n \"\"\"\r\n\r\n assert args.is_classification\r\n\r\n prefix = info(\"[\" + target_name + \"] \")\r\n train_ats, train_pred, target_ats, target_pred = _get_train_target_ats(\r\n model, x_train, x_target, target_name, layer_names, args\r\n )\r\n\r\n class_matrix = {}\r\n all_idx = []\r\n for i, label in enumerate(train_pred):\r\n if label not in class_matrix:\r\n class_matrix[label] = []\r\n class_matrix[label].append(i)\r\n all_idx.append(i)\r\n mdsa = []\r\n\r\n print(prefix + \"Fetching MDSA\")\r\n train_size = len(x_train)\r\n mdsa_inter_path = os.path.join(\r\n args.save_path, f\"{args.dataset}_{args.network}_{train_size}_mdsa_inter.npz\")\r\n if os.path.exists(mdsa_inter_path):\r\n inter_dict = np.load(mdsa_inter_path, allow_pickle=True)\r\n to_keep_dict, mu_dict, Sinv_dict = inter_dict[\"to_keep\"][(\r\n )], inter_dict[\"mu\"][()], inter_dict[\"Sinv\"][()]\r\n else:\r\n # generate to_keep\r\n # here, train_ats should be like (test_size, cols_nums)\r\n to_keep_dict = dict()\r\n mu_dict = dict()\r\n Sinv_dict = dict()\r\n for label in range(args.num_classes):\r\n _to_keep = np.ones(train_ats.shape[1], dtype=np.bool_)\r\n # print(\"INFO\",train_ats[class_matrix[label]].shape)\r\n col_vectors = np.transpose(train_ats[class_matrix[label]])\r\n # print(\"INFO\",col_vectors.shape)\r\n for i in range(col_vectors.shape[0]):\r\n # print(np.var(col_vectors[i]))\r\n if np.var(col_vectors[i]) < args.var_threshold:\r\n _to_keep[i] = False\r\n refined_ats = col_vectors[_to_keep, :]\r\n to_keep_dict[label] = _to_keep\r\n _mu = np.mean(refined_ats, axis=1).transpose()\r\n mu_dict[label] = _mu.copy()\r\n _Sinv = np.linalg.inv(np.cov(refined_ats))\r\n Sinv_dict[label] = _Sinv.copy()\r\n np.savez(mdsa_inter_path, to_keep=to_keep_dict,\r\n mu=mu_dict, Sinv=Sinv_dict)\r\n\r\n for i, at in enumerate(tqdm(target_ats)):\r\n to_keep = to_keep_dict[target_pred[i]]\r\n col_vector = at.transpose()\r\n refined_col_vector = col_vector[to_keep].transpose()\r\n label = target_pred[i]\r\n mu, Sinv = mu_dict[label], Sinv_dict[label]\r\n tmp = np.dot((refined_col_vector - mu).transpose(), Sinv)\r\n mdsa.append(np.sqrt(np.dot(tmp, (refined_col_vector - mu))).item())\r\n\r\n return mdsa\r\n\r\n\r\ndef _get_kdes(train_ats, train_pred, class_matrix, args):\r\n \"\"\"Kernel density estimation\r\n Args:\r\n train_ats (list): List of activation traces in training set.\r\n train_pred (list): List of prediction of train set.\r\n class_matrix (list): List of index of classes.\r\n args: Keyboard console_args.\r\n Returns:\r\n kdes (list): List of kdes per label if classification task.\r\n removed_cols (list): List of removed columns by variance threshold.\r\n \"\"\"\r\n\r\n removed_cols = []\r\n if args.is_classification:\r\n for label in range(args.num_classes):\r\n col_vectors = np.transpose(train_ats[class_matrix[label]])\r\n for i in range(col_vectors.shape[0]):\r\n if (\r\n np.var(col_vectors[i]) < args.var_threshold\r\n and i not in removed_cols\r\n ):\r\n removed_cols.append(i)\r\n print(sorted(removed_cols))\r\n kdes = {}\r\n for label in tqdm(range(args.num_classes), desc=\"kde\"):\r\n refined_ats = np.transpose(train_ats[class_matrix[label]])\r\n refined_ats = np.delete(refined_ats, removed_cols, axis=0)\r\n print(refined_ats.shape)\r\n print(label)\r\n if refined_ats.shape[0] == 0:\r\n print(\r\n warn(\"ats were removed by threshold {}\".format(\r\n args.var_threshold))\r\n )\r\n break\r\n kdes[label] = gaussian_kde(refined_ats)\r\n\r\n else:\r\n if np.isnan(train_ats).any():\r\n print(\"Found nan in train ats\")\r\n col_vectors = np.transpose(train_ats)\r\n for i in range(col_vectors.shape[0]):\r\n if np.var(col_vectors[i]) < args.var_threshold:\r\n removed_cols.append(i)\r\n print(len(removed_cols))\r\n refined_ats = np.transpose(train_ats)\r\n refined_ats = np.delete(refined_ats, removed_cols, axis=0)\r\n if refined_ats.shape[0] == 0:\r\n print(warn(\"ats were removed by threshold {}\".format(args.var_threshold)))\r\n kdes = [gaussian_kde(refined_ats)]\r\n print(gaussian_kde(refined_ats))\r\n # print(type(kdes[0]))\r\n # if np.isnan(kdes[0]).any():\r\n # raise Exception(\"Found NaN in kde\")\r\n\r\n print(infog(\"The number of removed columns: {}\".format(len(removed_cols))))\r\n\r\n return kdes, removed_cols\r\n\r\n\r\ndef _get_lsa(kde, at, removed_cols):\r\n refined_at = np.delete(at, removed_cols, axis=0)\r\n # print(refined_at)\r\n # print(np.transpose(refined_at))\r\n transpose_refined_at = np.transpose(refined_at)\r\n _logpdf = -kde.logpdf(transpose_refined_at)\r\n res = np.asscalar(_logpdf)\r\n if np.isnan(res).any() or np.isinf(res).any():\r\n raise Exception()\r\n return np.asscalar(-kde.logpdf(np.transpose(refined_at)))\r\n\r\n\r\ndef fetch_lsa(model, x_train, x_target, target_name, layer_names, args):\r\n def check_nan(x):\r\n import math\r\n if isinstance(x, np.ndarray):\r\n if np.isnan(x).any() or np.isinf(x).any():\r\n raise Exception(\"nan\")\r\n if isinstance(x, list):\r\n for xi in x:\r\n if math.isnan(xi) or math.isinf(xi):\r\n raise Exception(\"nan\")\r\n print(\"No nan found\")\r\n\r\n # \"\"\"Likelihood-based SA\r\n # Args:\r\n # model (keras model): Subject model.\r\n # x_train (list): Set of training inputs.\r\n # x_target (list): Set of target (test or[] adversarial) inputs.\r\n # target_name (str): Name of target set.\r\n # sa_layer_names (list): List of selected layer names.\r\n # console_args: Keyboard console_args.\r\n # Returns:\r\n # lsa (list): List of lsa for each target input.\r\n # \"\"\"\r\n\r\n prefix = info(\"[\" + target_name + \"] \")\r\n train_ats, train_pred, target_ats, target_pred = _get_train_target_ats(\r\n model, x_train, x_target, target_name, layer_names, args\r\n )\r\n\r\n check_nan(train_ats)\r\n check_nan(train_pred)\r\n check_nan(target_ats)\r\n check_nan(target_pred)\r\n\r\n class_matrix = {}\r\n if args.is_classification:\r\n for i, label in enumerate(train_pred):\r\n if label not in class_matrix.keys():\r\n class_matrix[label] = []\r\n class_matrix[label].append(i)\r\n\r\n kdes, removed_cols = _get_kdes(train_ats, train_pred, class_matrix, args)\r\n\r\n lsa = []\r\n print(prefix + \"Fetching LSA\")\r\n if args.is_classification:\r\n for i, at in enumerate(tqdm(target_ats)):\r\n label = target_pred[i]\r\n kde = kdes[label]\r\n lsa.append(_get_lsa(kde, at, removed_cols))\r\n else:\r\n kde = kdes[0]\r\n for at in tqdm(target_ats):\r\n lsa.append(_get_lsa(kde, at, removed_cols))\r\n\r\n return lsa\r\n\r\n\r\ndef get_sc(lower, upper, k, sa):\r\n \"\"\"Surprise Coverage\r\n Args:\r\n lower (int): Lower bound.\r\n upper (int): Upper bound.\r\n k (int): The number of buckets.\r\n sa (list): List of lsa or dsa.\r\n Returns:\r\n cov (int): Surprise coverage.\r\n \"\"\"\r\n\r\n buckets = np.digitize(sa, np.linspace(lower, upper, k))\r\n return len(list(set(buckets))) / float(k) * 100\r\n\r\n\r\n# sa_selected_layers = {\r\n# 'alexnet' : [\"\"],\r\n# 'lenet5': ['dense_3'],\r\n# 'vgg16': ['dense_1'],\r\n# 'resnet20': ['activation_19'],\r\n# 'resnet32': ['activation_28'],\r\n# 'vgg19': ['block5_conv4'],\r\n# 'resnet50': ['activation_49'],\r\n# 'deepspeech': ['dense_1'],\r\n# 'dave-orig': ['fc4'],\r\n# }\r\n\r\nsa_selected_layers = {\r\n 'cifar10_alexnet': [\"dense_2\"], # -3\r\n # 'cifar10_alexnet': [\"dense_1\"], # -3\r\n \"fashion-mnist_lenet5\": [\"dense_3\"], # -2\r\n 'mnist_lenet5': ['dense_3'], # -2\r\n 'cifar10_vgg16': ['dense_1'], # -3\r\n 'cifar10_resnet20': ['flatten_1'], # -1\r\n 'cifar100_resnet32': ['flatten_1'], # -1\r\n 'imagenet_vgg19': ['block5_conv4'], # -6\r\n 'imagenet_resnet50': ['activation_49'], # -3\r\n 'speech-commands_deepspeech': ['dense_1'],\r\n 'driving_dave-orig': ['fc4'],\r\n 'driving_dave-dropout': ['fc3'],\r\n}\r\n" ]
[ [ "numpy.concatenate", "numpy.max", "numpy.random.choice", "pandas.DataFrame", "numpy.min", "numpy.nonzero", "numpy.random.randint", "numpy.arange" ], [ "numpy.dot", "numpy.argmin", "numpy.load", "numpy.mean", "numpy.linalg.norm", "numpy.save", "numpy.transpose", "numpy.asscalar", "numpy.argmax", "numpy.append", "numpy.delete", "numpy.array", "numpy.isinf", "numpy.isnan", "numpy.cov", "numpy.ones", "scipy.stats.gaussian_kde", "numpy.savez", "numpy.linspace", "numpy.var" ] ]
BolunDai0216/ConsensusControl
[ "12f36fa3a70897b9e6cbcdab19734ca8360211a5" ]
[ "series3/Exercise2.py" ]
[ "import numpy as np\nimport math\nfrom numpy.linalg import matrix_rank\n\n\ndef main():\n R = np.array([[-2, 0, 2, 0, 0, 0, 0, 0],\n [0, 0, 0, 2, 0, -2, 0, 0],\n [-2, 2, 0, 0, 2, -2, 0, 0],\n [(math.sqrt(14)-2)/2, (math.sqrt(14)+2)/2, 0, 0,\n 0, 0, (2-math.sqrt(14))/2, -(math.sqrt(14)+2)/2],\n [0, 0, 0, 0, (2+math.sqrt(14))/2, (math.sqrt(14)-2)/2, -(2+math.sqrt(14))/2, (2-math.sqrt(14))/2]])\n print(\"The rank for the rigidity matrix is {}\".format(matrix_rank(R)))\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.linalg.matrix_rank" ] ]
slps20425/reinforment-learn
[ "fcae362d1fe8458c2b8f00a624aae93c48318141" ]
[ "finlab-20210319T093946Z-001/finlab/crawler.py" ]
[ "import datetime\nimport requests\nimport pandas as pd\nimport pickle\nimport time\nimport urllib\nimport os\nfrom io import StringIO\nimport numpy as np\nimport warnings\nimport os\nimport datetime\nimport time\nfrom tqdm import tnrange, tqdm_notebook\nfrom requests.exceptions import ConnectionError\nfrom requests.exceptions import ReadTimeout\nimport ipywidgets as widgets\n\nimport pip\n\ndef import_or_install(package):\n try:\n __import__(package)\n except ImportError:\n print('Please install lxml(pip install lxml)')\n\nimport_or_install(\"lxml\")\n\nheaders = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}\n\ndate_range_record_file = os.path.join('history', 'date_range.pickle')\n\n\ndef requests_get(*args1, **args2):\n i = 3\n while i >= 0:\n try:\n return requests.get(*args1, **args2)\n except (ConnectionError, ReadTimeout) as error:\n print(error)\n print('retry one more time after 60s', i, 'times left')\n time.sleep(60)\n i -= 1\n return pd.DataFrame()\n\n### ----------\n### Helper\n### ----------\n\ndef otc_date_str(date):\n \"\"\"將datetime.date轉換成民國曆\n\n Args:\n date (datetime.date): 西元歷的日期\n\n Returns:\n str: 民國歷日期 ex: 109/01/01\n \"\"\"\n return str(date.year - 1911) + date.strftime('%Y/%m/%d')[4:]\n\n\ndef combine_index(df, n1, n2):\n\n \"\"\"將dataframe df中的股票代號與股票名稱合併\n\n Keyword arguments:\n\n Args:\n df (pandas.DataFrame): 此dataframe含有column n1, n2\n n1 (str): 股票代號\n n2 (str): 股票名稱\n\n Returns:\n df (pandas.DataFrame): 此dataframe的index為「股票代號+股票名稱」\n \"\"\"\n\n return df.set_index(df[n1].astype(str).str.replace(' ', '') + \\\n ' ' + df[n2].astype(str).str.replace(' ', '')).drop([n1, n2], axis=1)\n\ndef crawl_benchmark(date):\n\n date_str = date.strftime('%Y%m%d')\n res = requests_get(\"https://www.twse.com.tw/exchangeReport/MI_5MINS_INDEX?response=csv&date=\" +\n date_str + \"&_=1544020420045\")\n\n # 利用 pandas 將資料整理成表格\n\n if len(res.text) < 10:\n return pd.DataFrame()\n\n df = pd.read_csv(StringIO(res.text.replace(\"=\",\"\")), header=1, index_col='時間')\n\n # 資料處理\n\n df = df.dropna(how='all', axis=0).dropna(how='all', axis=1)\n df.index = pd.to_datetime(date.strftime('%Y %m %d ') + pd.Series(df.index))\n df = df.apply(lambda s: s.astype(str).str.replace(\",\", \"\").astype(float))\n df = df.reset_index().rename(columns={'時間':'date'})\n df['stock_id'] = '台股指數'\n return df.set_index(['stock_id', 'date'])\n\ndef crawl_capital():\n res = requests_get('https://dts.twse.com.tw/opendata/t187ap03_L.csv', headers=headers)\n res.encoding = 'utf-8'\n df = pd.read_csv(StringIO(res.text))\n time.sleep(5)\n res = requests_get('https://dts.twse.com.tw/opendata/t187ap03_O.csv', headers=headers)\n res.encoding = 'utf-8'\n df = df.append(pd.read_csv(StringIO(res.text)))\n\n df['date'] = pd.to_datetime(str(datetime.datetime.now().year) + df['出表日期'].str[3:])\n df.set_index([df['公司代號'].astype(str) + ' ' + df['公司簡稱'].astype(str), 'date'], inplace=True)\n df.index.levels[0].name = '股票名稱'\n return df\n\n\ndef interest():\n res = requests_get('https://www.twse.com.tw/exchangeReport/TWT48U_ALL?response=open_data', headers=headers)\n res.encoding = 'utf-8'\n df = pd.read_csv(StringIO(res.text))\n\n time.sleep(5)\n\n res = requests_get('https://www.tpex.org.tw/web/stock/exright/preAnnounce/prepost_result.php?l=zh-tw&o=data', headers=headers)\n res.encoding = 'utf-8'\n df = df.append(pd.read_csv(StringIO(res.text)))\n\n df['date'] = df['除權息日期'].str.replace('年', '/').str.replace('月', '/').str.replace('日', '')\n df['date'] = pd.to_datetime(str(datetime.datetime.now().year) + df['date'].str[3:])\n df = df.set_index([df['股票代號'].astype(str) + ' ' + df['名稱'].astype(str), 'date'])\n return df\n\n\ndef preprocess(df, date):\n df = df.dropna(axis=1, how='all').dropna(axis=0, how='all')\n df.columns = df.columns.str.replace(' ', '')\n df.index.name = 'stock_id'\n df.columns.name = ''\n df['date'] = pd.to_datetime(date)\n df = df.reset_index().set_index(['stock_id', 'date'])\n df = df.apply(lambda s: s.astype(str).str.replace(',',''))\n\n return df\n\n\n\ndef bargin_twe(date):\n datestr = date.strftime('%Y%m%d')\n \n res = requests_get('https://www.twse.com.tw/fund/T86?response=csv&date='\\\n +datestr+'&selectType=ALLBUT0999')\n try:\n df = pd.read_csv(StringIO(res.text.replace('=','')), header=1)\n except:\n print('holiday')\n return pd.DataFrame()\n \n df = combine_index(df, '證券代號', '證券名稱')\n df = preprocess(df, date)\n return df\n\ndef bargin_otc(date):\n datestr = otc_date_str(date)\n \n url = 'https://www.tpex.org.tw/web/stock/3insti/daily_trade/3itrade_hedge_result.php?l=zh-tw&o=csv&se=EW&t=D&d='+datestr+'&s=0,asc'\n res = requests_get(url, headers=headers)\n try:\n df = pd.read_csv(StringIO(res.text), header=1)\n except:\n print('holiday')\n return pd.DataFrame()\n\n df = combine_index(df, '代號', '名稱')\n df = preprocess(df, date)\n return df\n\ndef price_twe(date):\n date_str = date.strftime('%Y%m%d')\n res = requests_get('https://www.twse.com.tw/exchangeReport/MI_INDEX?response=csv&date='+date_str+'&type=ALLBUT0999', headers=headers, )\n\n if res.text == '':\n print('holiday')\n return pd.DataFrame()\n\n header = np.where(list(map(lambda l: '證券代號' in l, res.text.split('\\n')[:200])))[0][0]\n\n df = pd.read_csv(StringIO(res.text.replace('=','')), header=header-1)\n df = combine_index(df, '證券代號', '證券名稱')\n df = preprocess(df, date)\n return df\n\ndef price_otc(date):\n datestr = otc_date_str(date)\n link = 'https://www.tpex.org.tw/web/stock/aftertrading/daily_close_quotes/stk_quote_download.php?l=zh-tw&d='+datestr+'&s=0,asc,0'\n res = requests_get(link, headers=headers)\n df = pd.read_csv(StringIO(res.text), header=2)\n\n if len(df) < 30:\n print('holiday')\n return pd.DataFrame()\n\n df = combine_index(df, '代號', '名稱')\n df = preprocess(df, date)\n df = df[df['成交筆數'].str.replace(' ', '') != '成交筆數']\n return df\n\ndef pe_twe(date):\n datestr = date.strftime('%Y%m%d')\n res = requests_get('https://www.twse.com.tw/exchangeReport/BWIBBU_d?response=csv&date='+datestr+'&selectType=ALL', headers=headers)\n try:\n df = pd.read_csv(StringIO(res.text), header=1)\n except:\n print('holiday')\n return pd.DataFrame()\n\n df = combine_index(df, '證券代號', '證券名稱')\n df = preprocess(df, date)\n return df\n\ndef pe_otc(date):\n datestr = otc_date_str(date)\n res = requests_get('https://www.tpex.org.tw/web/stock/aftertrading/peratio_analysis/pera_result.php?l=zh-tw&o=csv&charset=UTF-8&d='+datestr+'&c=&s=0,asc', headers=headers)\n try:\n df = pd.read_csv(StringIO(res.text), header=3)\n df = combine_index(df, '股票代號', '名稱')\n df = preprocess(df, date)\n except:\n print('holiday')\n return pd.DataFrame()\n\n return df\n\ndef month_revenue(name, date):\n\n year = date.year - 1911\n month = (date.month+10)%12+1\n if month == 12:\n year -= 1\n url = 'https://mops.twse.com.tw/nas/t21/%s/t21sc03_%d_%d.html' % (name, year, month)\n print(url)\n res = requests_get(url, headers=headers)\n res.encoding = 'big5'\n\n try:\n dfs = pd.read_html(StringIO(res.text), encoding='big-5')\n except:\n print('MONTH ' + name + ': cannot parse ' + str(date))\n return pd.DataFrame()\n\n df = pd.concat([df for df in dfs if df.shape[1] <= 11 and df.shape[1] > 5])\n\n if 'levels' in dir(df.columns):\n df.columns = df.columns.get_level_values(1)\n else:\n df = df[list(range(0,10))]\n column_index = df.index[(df[0] == '公司代號')][0]\n df.columns = df.iloc[column_index]\n\n df = df.loc[:,~df.columns.isnull()]\n df = df.loc[~pd.to_numeric(df['當月營收'], errors='coerce').isnull()]\n df = df[df['公司代號'] != '合計']\n df = combine_index(df, '公司代號', '公司名稱')\n df = preprocess(df, datetime.date(date.year, date.month, 10))\n return df.drop_duplicates()\n\ndef crawl_split_twe():\n\n res = requests_get('https://www.twse.com.tw/exchangeReport/TWTAVU?response=csv&_=1537824706232', headers=headers)\n\n df = pd.read_csv(StringIO(res.text),header=1)\n df = df.dropna(how='all', axis=1).dropna(thresh=3, axis=0)\n\n def process_date(s):\n return pd.to_datetime(str(datetime.datetime.now().year) + s.str[3:])\n\n df['停止買賣日期'] = process_date(df['停止買賣日期'])\n df['恢復買賣日期'] = process_date(df['恢復買賣日期'])\n df['股票代號'] = df['股票代號'].astype(int).astype(str)\n df['stock_id'] = df['股票代號'] + ' ' + df['名稱']\n df['date'] = df['恢復買賣日期']\n df = df.set_index(['stock_id', 'date'])\n\n return df\n\n\ndef crawl_split_otc():\n res = requests_get(\"https://www.tpex.org.tw/web/stock/exright/decap/decap_download.php?l=zh-tw&d=107/09/21&s=0,asc,0\", headers=headers)\n df = pd.read_csv(StringIO(res.text), header=1)\n df = df.dropna(thresh=5, axis=0)\n df['stock_id'] = df['代號'] + ' ' + df['名稱']\n def process_date(s):\n ss = s.astype(int).astype(str)\n return pd.to_datetime(str(datetime.datetime.now().year) + '/' + ss.str[3:5] + '/' + ss.str[5:])\n\n df['停止買賣日期'] = process_date(df['停止買賣日期'])\n df['恢復買賣日期'] = process_date(df['恢復買賣日期'])\n df['date'] = df['恢復買賣日期']\n df = df.rename(columns={'代號':'股票代號'})\n df = df.set_index(['stock_id', 'date'])\n return df\n\nimport io\nimport json\nimport requests\nimport datetime\nimport pandas as pd\n\ndef crawl_twse_divide_ratio():\n\n datestr = datetime.datetime.now().strftime('%Y%m%d')\n res = requests_get(\"https://www.twse.com.tw/exchangeReport/TWT49U?response=csv&strDate=20040101&endDate=\"+datestr+\"&_=1551532565786\")\n\n df = pd.read_csv(io.StringIO(res.text.replace(\"=\", \"\")), header=1)\n\n df = df.dropna(thresh=5).dropna(how='all', axis=1)\n\n df = df[~df['資料日期'].isnull()]\n\n # set stock id\n df['stock_id'] = df['股票代號'] + ' ' + df['股票名稱']\n\n # set dates\n df = df[~df['資料日期'].isnull()]\n years = df['資料日期'].str.split('年').str[0].astype(int) + 1911\n years.loc[df['資料日期'].str[3] != '年'] = np.nan\n years.loc[years > datetime.datetime.now().year] = np.nan\n years.ffill(inplace=True)\n dates = years.astype(int).astype(str) +'/'+ df['資料日期'].str.split('年').str[1].str.replace('月', '/').str.replace('日', '')\n df['date'] = pd.to_datetime(dates, errors='coerce')\n\n # convert to float\n float_name_list = ['除權息前收盤價', '除權息參考價', '權值+息值', '漲停價格',\n '跌停價格', '開盤競價基準', '減除股利參考價' , '最近一次申報每股 (單位)淨值',\n '最近一次申報每股 (單位)盈餘']\n\n df[float_name_list] = df[float_name_list].astype(str).apply(lambda s:s.str.replace(',', '')).astype(float)\n\n\n df['twse_divide_ratio'] = df['除權息前收盤價'] / df['開盤競價基準']\n return df.set_index(['stock_id', 'date'])\n\ndef crawl_otc_divide_ratio():\n\n y = datetime.datetime.now().year\n m = datetime.datetime.now().month\n d = datetime.datetime.now().day\n\n y = str(y-1911)\n m = str(m) if m > 9 else '0' + str(m)\n d = str(d) if d > 9 else '0' + str(d)\n\n datestr = '%s/%s/%s' % (y,m,d)\n res_otc = requests_get('https://www.tpex.org.tw/web/stock/exright/dailyquo/exDailyQ_result.php?l=zh-tw&d=097/01/02&ed=' + datestr + '&_=1551594269115')\n\n df = pd.DataFrame(json.loads(res_otc.text)['aaData'])\n df.columns = ['除權息日期', '代號', '名稱', '除權息前收盤價', '除權息參考價',\n '權值', '息值',\"權+息值\",\"權/息\",\"漲停價格\",\"跌停價格\",\"開盤競價基準\",\n \"減除股利參考價\",\"現金股利\", \"每千股無償配股\", \"-\", \"現金增資股數\", \"現金增資認購價\",\n \"公開承銷股數\", \"員工認購股數\",\"原股東認購數\", \"按持股比例千股認購\"]\n\n\n float_name_list = [ '除權息前收盤價', '除權息參考價',\n '權值', '息值',\"權+息值\",\"漲停價格\",\"跌停價格\",\"開盤競價基準\",\n \"減除股利參考價\",\"現金股利\", \"每千股無償配股\", \"現金增資股數\", \"現金增資認購價\",\n \"公開承銷股數\", \"員工認購股數\",\"原股東認購數\", \"按持股比例千股認購\"\n ]\n df[float_name_list] = df[float_name_list].astype(str).apply(lambda s:s.str.replace(',', '')).astype(float)\n\n # set stock id\n df['stock_id'] = df['代號'] + ' ' + df['名稱']\n\n # set dates\n dates = df['除權息日期'].str.split('/')\n dates = (dates.str[0].astype(int) + 1911).astype(str) + '/' + dates.str[1] + '/' + dates.str[2]\n df['date'] = pd.to_datetime(dates)\n\n df['otc_divide_ratio'] = df['除權息前收盤價'] / df['開盤競價基準']\n return df.set_index(['stock_id', 'date'])\n\n\ndef crawl_twse_cap_reduction():\n\n datestr = datetime.datetime.now().strftime('%Y%m%d')\n res3 = requests_get(\"https://www.twse.com.tw/exchangeReport/TWTAUU?response=csv&strDate=20110101&endDate=\" + datestr + \"&_=1551597854043\")\n df = pd.read_csv(io.StringIO(res3.text), header=1)\n df = df.dropna(thresh=5).dropna(how='all',axis=1)\n dates = (df['恢復買賣日期'].str.split('/').str[0].astype(int) + 1911).astype(str) + df['恢復買賣日期'].str[3:]\n df['date'] = pd.to_datetime(dates, errors='coerce')\n df['stock_id'] = df['股票代號'].astype(int).astype(str) + ' ' + df['名稱']\n df.head()\n\n df['twse_cap_divide_ratio'] = df['停止買賣前收盤價格']/df['開盤競價基準']\n\n return df.set_index(['stock_id', 'date'])\n\ndef crawl_otc_cap_reduction():\n\n y = datetime.datetime.now().year\n m = datetime.datetime.now().month\n d = datetime.datetime.now().day\n\n y = str(y-1911)\n m = str(m) if m > 9 else '0' + str(m)\n d = str(d) if d > 9 else '0' + str(d)\n\n datestr = '%s/%s/%s' % (y,m,d)\n res4 = requests_get(\"https://www.tpex.org.tw/web/stock/exright/revivt/revivt_result.php?l=zh-tw&d=102/01/01&ed=\"+datestr+\"&_=1551611342446\")\n\n df = pd.DataFrame(json.loads(res4.text)['aaData'])\n\n name = ['恢復買賣日期', '股票代號', '股票名稱', '最後交易之收盤價格',\n '減資恢復買賣開始日參考價格', '漲停價格', '跌停價格', '開始交易基準價', '除權參考價', '減資源因', '詳細資料']\n\n float_name_list = ['最後交易之收盤價格', '減資恢復買賣開始日參考價格', '漲停價格', '跌停價格', '開始交易基準價', '除權參考價']\n df.columns = name\n df[float_name_list] = df[float_name_list].astype(str).apply(lambda s:s.str.replace(',', '')).astype(float)\n df['stock_id'] = df['股票代號'] + ' ' + df['股票名稱']\n dates = (df['恢復買賣日期'].astype(str).str[:-4].astype(int) + 1911).astype(str) + df['恢復買賣日期'].astype(str).str[-4:]\n df['date'] = pd.to_datetime(dates)\n df['date'] = pd.to_datetime(dates, errors='coerce')\n\n df['otc_cap_divide_ratio'] = df['最後交易之收盤價格'] / df['開始交易基準價']\n\n return df.set_index(['stock_id', 'date'])\n\n\n\n\no2tp = {'成交股數':'成交股數',\n '成交筆數':'成交筆數',\n '成交金額(元)':'成交金額',\n '收盤':'收盤價',\n '開盤':'開盤價',\n '最低':'最低價',\n '最高':'最高價',\n '最後買價':'最後揭示買價',\n '最後賣價':'最後揭示賣價',\n }\n\no2tpe = {\n '殖利率(%)':'殖利率(%)',\n '本益比':'本益比',\n '每股股利':'股利年度',\n '股價淨值比':'股價淨值比',\n}\n\no2tb = {\n '外資及陸資(不含外資自營商)-買進股數':'外陸資買進股數(不含外資自營商)',\n '外資及陸資買股數': '外陸資買進股數(不含外資自營商)',\n \n '外資及陸資(不含外資自營商)-賣出股數':'外陸資賣出股數(不含外資自營商)',\n '外資及陸資賣股數': '外陸資賣出股數(不含外資自營商)',\n \n '外資及陸資(不含外資自營商)-買賣超股數':'外陸資買賣超股數(不含外資自營商)',\n '外資及陸資淨買股數': '外陸資買賣超股數(不含外資自營商)',\n \n '外資自營商-買進股數':'外資自營商買進股數',\n '外資自營商-賣出股數':'外資自營商賣出股數',\n '外資自營商-買賣超股數':'外資自營商買賣超股數',\n '投信-買進股數':'投信買進股數',\n '投信買進股數': '投信買進股數',\n '投信-賣出股數': '投信賣出股數',\n '投信賣股數': '投信賣出股數',\n \n '投信-買賣超股數':'投信買賣超股數',\n '投信淨買股數': '投信買賣超股數',\n \n '自營商(自行買賣)-買進股數':'自營商買進股數(自行買賣)',\n '自營商(自行買賣)買股數':'自營商買進股數(自行買賣)',\n \n '自營商(自行買賣)-賣出股數':'自營商賣出股數(自行買賣)',\n '自營商(自行買賣)賣股數':'自營商賣出股數(自行買賣)',\n \n '自營商(自行買賣)-買賣超股數': '自營商買賣超股數(自行買賣)',\n '自營商(自行買賣)淨買股數': '自營商買賣超股數(自行買賣)',\n \n '自營商(避險)-買進股數':'自營商買進股數(避險)',\n '自營商(避險)買股數': '自營商買進股數(避險)',\n '自營商(避險)-賣出股數':'自營商賣出股數(避險)',\n '自營商(避險)賣股數': '自營商賣出股數(避險)',\n '自營商(避險)-買賣超股數': '自營商買賣超股數(避險)',\n '自營商(避險)淨買股數': '自營商買賣超股數(避險)',\n \n}\n\no2tm = {n:n for n in ['當月營收', '上月營收', '去年當月營收', '上月比較增減(%)', '去年同月增減(%)', '當月累計營收', '去年累計營收',\n '前期比較增減(%)']}\n\ndef merge(twe, otc, t2o):\n t2o2 = {k:v for k,v in t2o.items() if k in otc.columns}\n otc = otc[list(t2o2.keys())]\n otc = otc.rename(columns=t2o2)\n twe = twe[otc.columns & twe.columns]\n\n return twe.append(otc)\n\n\ndef crawl_price(date):\n dftwe = price_twe(date)\n time.sleep(5)\n dfotc = price_otc(date)\n if len(dftwe) != 0 and len(dfotc) != 0:\n df = merge(dftwe, dfotc, o2tp)\n return df\n else:\n return pd.DataFrame()\n\n\ndef crawl_bargin(date):\n dftwe = bargin_twe(date)\n dfotc = bargin_otc(date)\n if len(dftwe) != 0 and len(dfotc) != 0:\n return merge(dftwe, dfotc, o2tb)\n else:\n return pd.DataFrame()\n\n\ndef crawl_monthly_report(date):\n dftwe = month_revenue('sii', date)\n time.sleep(5)\n dfotc = month_revenue('otc', date)\n if len(dftwe) != 0 and len(dfotc) != 0:\n return merge(dftwe, dfotc, o2tm)\n else:\n return pd.DataFrame()\n\ndef crawl_pe(date):\n\n dftwe = pe_twe(date)\n dfotc = pe_otc(date)\n if len(dftwe) != 0 and len(dfotc) != 0:\n return merge(dftwe, dfotc, o2tpe)\n else:\n return pd.DataFrame()\n\nout = widgets.Output(layout={'border': '1px solid black'})\n\[email protected]()\ndef update_table(table_name, crawl_function, dates):\n\n if dates:\n if len(dates) == 0:\n print(\"該時間段沒有可以爬取之資料\")\n return\n print('start crawl ' + table_name + ' from ', dates[0] , 'to', dates[-1])\n else:\n print('起始、結束日期有點怪怪的,請重新選擇一下喔')\n return\n \n\n df = pd.DataFrame()\n dfs = {}\n\n progress = tqdm_notebook(dates, )\n\n for d in progress:\n\n print('crawling', d)\n progress.set_description('crawl' + table_name + str(d))\n\n data = crawl_function(d)\n\n if data is None or len(data) == 0:\n print('fail, check if ' + str(d) + ' is a holiday')\n\n # update multiple dataframes\n elif isinstance(data, dict):\n if len(dfs) == 0:\n dfs = {i:pd.DataFrame() for i in data.keys()}\n\n for i, d in data.items():\n dfs[i] = dfs[i].append(d)\n\n # update single dataframe\n else:\n df = df.append(data)\n print('success')\n\n time.sleep(5)\n\n\n\n if df is not None and len(df) != 0:\n to_pickle(df, table_name)\n\n if len(dfs) != 0:\n for i, d in dfs.items():\n print('saveing df', d.head(), len(d))\n if len(d) != 0:\n print('save df', d.head())\n to_pickle(df, table_name)\n \nimport datetime\nfrom dateutil.relativedelta import relativedelta\n\ndef check_monthly_revenue():\n \n df = pd.read_pickle(\"history/tables/monthly_report.pkl\")\n \n if df.loc['1101 台泥', '2017-10-10']['當月營收'] == '8387381':\n print(\"fix monthly report errors\")\n df = df.reset_index()\n df['date'] = [d + relativedelta(months=1) for d in df['date']]\n df.set_index(['stock_id', 'date'], inplace=True)\n df.to_pickle(\"history/tables/monthly_report.pkl\")\n print(\"done\")\n commit(\"monthlu_report\")\n\nimport pickle\ndef to_pickle(df, name):\n \n if not os.path.isdir('history'):\n os.mkdir('history')\n\n if not os.path.isdir(os.path.join('history', 'tables')):\n os.mkdir(os.path.join('history', 'tables'))\n\n\n fname = os.path.join('history', 'tables', name + '.pkl')\n newfname = os.path.join('history', 'tables', 'new' + name + '.pkl')\n \n # refine patch for monthly revenue\n \n if name == 'monthly_report' :\n check_monthly_revenue()\n\n if os.path.isfile(fname):\n old_df = pd.read_pickle(fname)\n old_df = old_df.append(df, sort=False)\n\n old_df = old_df[~old_df.index.duplicated(keep='last')]\n old_df = old_df.sort_index()\n old_df.to_pickle(newfname)\n os.remove(fname)\n os.rename(newfname, fname)\n else:\n df = df[~df.index.duplicated(keep='last')]\n df.to_pickle(fname)\n old_df = df\n \n if not os.path.isfile(date_range_record_file):\n pickle.dump({}, open(date_range_record_file, 'wb'))\n\n dates = pickle.load(open(date_range_record_file, 'rb'))\n dates[name] = (old_df.index.levels[1][0], old_df.index.levels[1][-1])\n pickle.dump(dates, open(date_range_record_file, 'wb'))\n \n commit(name)\n\n\nfrom datetime import date\nfrom dateutil.rrule import rrule, DAILY, MONTHLY\n\ndef date_range(start_date, end_date):\n return [dt.date() for dt in rrule(DAILY, dtstart=start_date, until=end_date)]\n\ndef month_range(start_date, end_date):\n return [dt.date() for dt in rrule(MONTHLY, dtstart=start_date, until=end_date)]\n\ndef season_range(start_date, end_date):\n\n if isinstance(start_date, datetime.datetime):\n start_date = start_date.date()\n\n if isinstance(end_date, datetime.datetime):\n end_date = end_date.date()\n\n ret = []\n for year in range(start_date.year-1, end_date.year+1):\n ret += [ datetime.date(year, 5, 15),\n datetime.date(year, 8, 14),\n datetime.date(year, 11, 14),\n datetime.date(year+1, 3, 31)]\n ret = [r for r in ret if start_date < r < end_date]\n\n return ret\n\nimport ipywidgets as widgets\nfrom IPython.display import display\n\ndef table_date_range(table_name):\n if os.path.isfile(date_range_record_file):\n with open(date_range_record_file, 'rb') as f:\n dates = pickle.load(f)\n if table_name in dates:\n return dates[table_name]\n else:\n return [None, None]\n else:\n return [None, None]\n\nfrom inspect import signature\n\n\ndef widget(table_name, crawl_func, range_date=None):\n\n\n sig = signature(crawl_func)\n\n if len(sig.parameters) == 0:\n @out.capture()\n def onupdate(x):\n print('updating ', table_name)\n df = crawl_func()\n to_pickle(df, table_name)\n print('done')\n\n btn = widgets.Button(description='update ')\n btn.on_click(onupdate)\n\n first_date, last_date = table_date_range(table_name)\n label = widgets.Label(table_name + ' | ' + str(first_date) + ' ~ ' + str(last_date))\n items = [btn]\n display(widgets.VBox([label, widgets.HBox(items)]))\n\n else:\n\n date_picker_from = widgets.DatePicker(\n description='from',\n disabled=False,\n )\n\n first_date, last_date = table_date_range(table_name)\n\n if last_date:\n date_picker_from.value = last_date\n\n date_picker_to = widgets.DatePicker(\n description='to',\n disabled=False,\n )\n\n date_picker_to.value = datetime.datetime.now().date()\n\n btn = widgets.Button(description='update ')\n\n def onupdate(x):\n dates = range_date(date_picker_from.value, date_picker_to.value)\n\n if len(dates) == 0:\n print('no data to parse')\n\n update_table(table_name, crawl_func, dates)\n\n btn.on_click(onupdate)\n\n\n label = widgets.Label(table_name + ' | ' + str(first_date) + ' ~ ' + str(last_date))\n\n items = [date_picker_from, date_picker_to, btn]\n display(widgets.VBox([label, widgets.HBox(items)]))\n\nimport requests\nfrom io import StringIO\nimport pandas as pd\nimport numpy as np\nfrom tqdm import tqdm_notebook as tqdm\nimport os\nimport pickle\nimport datetime\nimport random\n\ndef afterIFRS(year, season):\n season2date = [ datetime.datetime(year, 5, 15),\n datetime.datetime(year, 8, 14),\n datetime.datetime(year, 11, 14),\n datetime.datetime(year+1, 3, 31)]\n\n return pd.to_datetime(season2date[season-1].date())\n\ndef clean(year, season, balance_sheet):\n\n if len(balance_sheet) == 0:\n print('**WARRN: no data to parse')\n return balance_sheet\n balance_sheet = balance_sheet.transpose().reset_index().rename(columns={'index':'stock_id'})\n\n\n if '會計項目' in balance_sheet:\n s = balance_sheet['會計項目']\n balance_sheet = balance_sheet.drop('會計項目', axis=1).apply(pd.to_numeric)\n balance_sheet['會計項目'] = s.astype(str)\n\n balance_sheet['date'] = afterIFRS(year, season)\n\n balance_sheet['stock_id'] = balance_sheet['stock_id'].astype(str)\n balance = balance_sheet.set_index(['stock_id', 'date'])\n return balance\n\ndef download_html(year, season, stock_ids, report_type='C'):\n\n directory = os.path.join('history', 'financial_statement', str(year) + str(season))\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}\n files = [os.path.join(directory, str(i) + '.html') for i in stock_ids]\n pbar = tqdm([sid for file, sid in zip(files, stock_ids) if not os.path.exists(file) or os.stat(file).st_size < 10000])\n\n for sid in pbar:\n\n pbar.set_description('downloading stock %s in report type %s' % (sid, report_type))\n\n file = os.path.join(directory, str(sid) + '.html')\n\n # start parsing\n if int(year) >= 2013:\n url = ('https://mops.twse.com.tw/server-java/t164sb01?step=1&CO_ID=' + str(sid) + '&SYEAR=' + str(year) + '&SSEASON='+str(season)+'&REPORT_ID=' + str(report_type))\n else:\n url = ('https://mops.twse.com.tw/server-java/t147sb02?t203sb01Form=t203sb01Form&step=0&comp_id='+str(sid)+'&YEAR1='+str(year)+'&SEASON1='+str(season)+'&R_TYPE1=B')\n\n try:\n r = requests_get(url, headers=headers)\n except:\n print('**WARRN: requests cannot get stock:')\n print(url)\n continue\n\n r.encoding = 'big5'\n\n # write files\n f = open(file, 'w', encoding='utf-8')\n\n f.write('<meta charset=\"UTF-8\">\\n')\n f.write(r.text)\n f.close()\n\n # finish\n # print(percentage, i, 'end')\n\n # sleep a while\n time.sleep(random.uniform(0, 3))\n\nimport requests\nimport os\nimport time\nimport requests\nimport datetime\nimport random\nimport requests\nimport io\nimport shutil\nimport zipfile\nimport sys\nimport urllib.request\ndef crawl_finance_statement2019(year, season):\n\n def ifrs_url(year, season):\n url = \"https://mops.twse.com.tw/server-java/FileDownLoad?step=9&fileName=tifrs-\"+str(year)+\"Q\"+str(season)\\\n +\".zip&filePath=/home/html/nas/ifrs/\"+str(year)+\"/\"\n print(url)\n return url\n\n\n headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}\n\n from tqdm import tqdm\n class DownloadProgressBar(tqdm):\n def update_to(self, b=1, bsize=1, tsize=None):\n if tsize is not None:\n self.total = tsize\n self.update(b * bsize - self.n)\n\n\n def download_url(url, output_path):\n with DownloadProgressBar(unit='B', unit_scale=True,\n miniters=1, desc=url.split('/')[-1]) as t:\n urllib.request.urlretrieve(url, filename=output_path, reporthook=t.update_to)\n\n def download_file(url, filename):\n \"\"\"\n Helper method handling downloading large files from `url` to `filename`. Returns a pointer to `filename`.\n \"\"\"\n chunkSize = 1024\n r = requests.get(url, stream=True, verify=True)\n with open(filename, 'wb') as f:\n # pbar = tqdm( unit=\"B\", total=int( r.headers['content-length'] ) )\n for chunk in r.iter_content(chunk_size=chunkSize): \n if chunk: # filter out keep-alive new chunks\n # pbar.update (len(chunk))\n f.write(chunk)\n return r\n\n def ifrs_url(year, season):\n url = \"https://mops.twse.com.tw/server-java/FileDownLoad?step=9&fileName=tifrs-\"+str(year)+\"Q\"+str(season)\\\n +\".zip&filePath=/home/html/nas/ifrs/\"+str(year)+\"/\"\n print(url)\n return url\n\n url = ifrs_url(year,season)\n print('start download')\n download_file(url, 'temp.zip')\n print('finished!')\n \n\n path = os.path.join('history', 'financial_statement', str(year) + str(season))\n\n if os.path.isdir(path):\n shutil.rmtree(path)\n\n print('create new dir')\n\n zipfiles = zipfile.ZipFile(open('temp.zip', 'rb'))\n zipfiles.extractall(path=path)\n\n print('extract all files')\n\n fnames = [f for f in os.listdir(path) if f[-5:] == '.html']\n fnames = sorted(fnames)\n\n newfnames = [f.split(\"-\")[5] + '.html' for f in fnames]\n\n for fold, fnew in zip(fnames, newfnames):\n if len(fnew) != 9:\n print('remove strange code id', fnew)\n os.remove(os.path.join(path, fold))\n continue\n \n if not os.path.exists(os.path.join(path, fnew)):\n os.rename(os.path.join(path, fold), os.path.join(path, fnew))\n else:\n os.remove(os.path.join(path, fold))\n\n\ndef crawl_finance_statement(year, season, stock_ids):\n\n directory = os.path.join('history', 'financial_statement', str(year) + str(season))\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n if year >= 2013:\n download_html(year, season, stock_ids, 'C')\n download_html(year, season, stock_ids, 'B')\n download_html(year, season, stock_ids, 'A')\n\ndef remove_english(s):\n result = re.sub(r'[a-zA-Z()]', \"\", s)\n return result\n\ndef patch2019(df):\n df = df.copy()\n dfname = df.columns.levels[0][0]\n\n df = df.iloc[:,1:].rename(columns={'會計項目Accounting Title':'會計項目'})\n\n\n refined_name = df[(dfname,'會計項目')].str.split(\" \").str[0].str.replace(\" \", \"\").apply(remove_english)\n\n subdf = df[dfname].copy()\n subdf['會計項目'] = refined_name\n df[dfname] = subdf\n\n df.columns = pd.MultiIndex(levels=[df.columns.levels[1], df.columns.levels[0]],codes=[df.columns.codes[1], df.columns.codes[0]])\n\n def neg(s):\n\n if isinstance(s, float):\n return s\n\n if str(s) == 'nan':\n return np.nan\n\n s = s.replace(\",\", \"\")\n if s[0] == '(':\n return -float(s[1:-1])\n else:\n return float(s)\n\n df.iloc[:,1:] = df.iloc[:,1:].applymap(neg)\n return df\n\ndef read_html2019(file):\n dfs = pd.read_html(file)\n return [pd.DataFrame(), patch2019(dfs[0]), patch2019(dfs[1]), patch2019(dfs[2])]\n\n\nimport re\ndef pack_htmls(year, season, directory):\n balance_sheet = {}\n income_sheet = {}\n cash_flows = {}\n income_sheet_cumulate = {}\n pbar = tqdm(os.listdir(directory))\n\n for i in pbar:\n\n # 將檔案路徑建立好\n file = os.path.join(directory, i)\n\n # 假如檔案不是html結尾,或是太小,代表不是正常的檔案,略過\n if file[-4:] != 'html' or os.stat(file).st_size < 10000:\n continue\n\n # 顯示目前運行的狀況\n stock_id = i.split('.')[0]\n pbar.set_description('parse htmls %d season %d stock %s' % (year, season, stock_id))\n\n # 讀取html\n if year < 2019:\n dfs = pd.read_html(file)\n else:\n try:\n dfs = read_html2019(file)\n except:\n print(\"**ERROR: fail to parse \", file)\n continue\n\n # 處理pandas0.24.1以上,會把columns parse好的問題\n for df in dfs:\n if 'levels' in dir(df.columns):\n df.columns = list(range(df.values.shape[1]))\n\n # 假如html不完整,則略過\n if len(dfs) < 4:\n print('**WARRN html file broken', year, season, i)\n continue\n \n if year <= 2012:\n df = dfs[1]\n category = (df[0] == '會計科目').cumsum()\n df[category == 1]\n dfs = {\n 1: df[category == 0],\n 2: df[category == 1],\n 3: df[category == 2],\n }\n\n # 取得 balance sheet\n df = dfs[1].copy().drop_duplicates(subset=0, keep='last')\n df = df.set_index(0)\n balance_sheet[stock_id] = df[1].dropna()\n #balance_sheet = combine(balance_sheet, df[1].dropna(), stock_id)\n\n # 取得 income statement\n df = dfs[2].copy().drop_duplicates(subset=0, keep='last')\n df = df.set_index(0)\n\n # 假如有4個columns,則第1與第3條column是單季跟累計的income statement\n if len(df.columns) == 4:\n income_sheet[stock_id] = df[1].dropna()\n income_sheet_cumulate[stock_id] = df[3].dropna()\n # 假如有2個columns,則代表第3條column為累計的income statement,單季的從缺\n elif len(df.columns) == 2:\n income_sheet_cumulate[stock_id] = df[1].dropna()\n\n # 假如是第一季財報 累計 跟單季 的數值是一樣的\n if season == 1:\n income_sheet[stock_id] = df[1].dropna()\n\n # 取得 cash_flows\n df = dfs[3].copy().drop_duplicates(subset=0, keep='last')\n df = df.set_index(0)\n cash_flows[stock_id] = df[1].dropna()\n\n # 將dictionary整理成dataframe\n balance_sheet = pd.DataFrame(balance_sheet)\n income_sheet = pd.DataFrame(income_sheet)\n income_sheet_cumulate = pd.DataFrame(income_sheet_cumulate)\n cash_flows = pd.DataFrame(cash_flows)\n\n print('balance_sheet', balance_sheet.shape)\n print('income_sheet', income_sheet.shape)\n print('cumulate_income_sheet', income_sheet_cumulate.shape)\n print('cash_flows', cash_flows.shape)\n\n # 做清理\n ret = {'balance_sheet':clean(year, season, balance_sheet), 'income_sheet':clean(year, season, income_sheet),\n 'income_sheet_cumulate':clean(year, season, income_sheet_cumulate), 'cash_flows':clean(year, season, cash_flows)}\n\n # 假如是第一季的話,則 單季 跟 累計 是一樣的\n if season == 1:\n ret['income_sheet'] = ret['income_sheet_cumulate'].copy()\n\n ret['income_sheet_cumulate'].columns = '累計' + ret['income_sheet_cumulate'].columns\n\n pickle.dump(ret, open(os.path.join('history', 'financial_statement', 'pack' + str(year) + str(season) + '.pickle'), 'wb'))\n\n return ret\n\ndef get_all_pickles(directory):\n ret = {}\n for i in os.listdir(directory):\n if i[:4] != 'pack':\n continue\n ret[i[4:9]] = pd.read_pickle(os.path.join(directory, i))\n return ret\n\ndef combine(d):\n\n tnames = ['balance_sheet',\n 'cash_flows',\n 'income_sheet',\n 'income_sheet_cumulate']\n\n tbs = {t:pd.DataFrame() for t in tnames}\n\n for i, dfs in d.items():\n for tname in tnames:\n tbs[tname] = tbs[tname].append(dfs[tname])\n return tbs\n\n\ndef fill_season4(tbs):\n # copy income sheet (will modify it later)\n income_sheet = tbs['income_sheet'].copy()\n\n # calculate the overlap columns\n c1 = set(tbs['income_sheet'].columns)\n c2 = set(tbs['income_sheet_cumulate'].columns)\n\n overlap_columns = []\n for i in c1:\n if '累計' + i in c2:\n overlap_columns.append('累計' + i)\n\n # get all years\n years = set(tbs['income_sheet_cumulate'].index.levels[1].year)\n\n for y in years:\n\n # get rows of the dataframe that is season 4\n ys = tbs['income_sheet_cumulate'].reset_index('stock_id').index.year == y\n ds4 = tbs['income_sheet_cumulate'].reset_index('stock_id').index.month == 3\n df4 = tbs['income_sheet_cumulate'][ds4 & ys].apply(lambda s: pd.to_numeric(s, errors='coerce')).reset_index('date')\n\n # get rows of the dataframe that is season 3\n yps = tbs['income_sheet_cumulate'].reset_index('stock_id').index.year == y - 1\n ds3 = tbs['income_sheet_cumulate'].reset_index('stock_id').index.month == 11\n df3 = tbs['income_sheet_cumulate'][ds3 & yps].apply(lambda s: pd.to_numeric(s, errors='coerce')).reset_index('date')\n \n if len(df3) == 0:\n print('skip ', y)\n continue\n # calculate the differences of income_sheet_cumulate to get income_sheet single season\n diff = df4 - df3\n diff = diff.drop(['date'], axis=1)[overlap_columns]\n\n # remove 累計\n diff.columns = diff.columns.str[2:]\n\n # 加上第四季的日期\n diff['date'] = pd.to_datetime(str(y) + '-03-31')\n diff = diff[list(c1) + ['date']].reset_index().set_index(['stock_id','date'])\n\n # 新增資料於income_sheet尾部\n income_sheet = income_sheet.append(diff)\n\n # 排序好並更新tbs\n income_sheet = income_sheet.reset_index().sort_values(['stock_id', 'date']).set_index(['stock_id', 'date'])\n tbs['income_sheet'] = income_sheet\n\ndef to_db(tbs):\n\n for i, df in tbs.items():\n df = df.reset_index().sort_values(['stock_id', 'date']).drop_duplicates(['stock_id', 'date']).set_index(['stock_id', 'date'])\n df.to_pickle(os.path.join('history', 'tables', i + '.pkl'))\n\n if not os.path.isfile(date_range_record_file):\n pickle.dump({}, open(date_range_record_file, 'wb'))\n\n dates = pickle.load(open(date_range_record_file, 'rb'))\n dates['financial_statement'] = (df.index.levels[1][0], df.index.levels[1][-1])\n pickle.dump(dates, open(date_range_record_file, 'wb'))\n\n\ndef html2db(year, season):\n\n pack_htmls(year, season, os.path.join('history', 'financial_statement', str(year) + str(season)))\n d = get_all_pickles(os.path.join('history', 'financial_statement'))\n tbs = combine(d)\n fill_season4(tbs)\n to_db(tbs)\n return {}\n\ndef crawl_finance_statement_by_date(date):\n year = date.year\n if date.month == 3:\n season = 4\n year = year - 1\n month = 11\n elif date.month == 5:\n season = 1\n month = 2\n elif date.month == 8:\n season = 2\n month = 5\n elif date.month == 11:\n season = 3\n month = 8\n else:\n return None\n\n if year < 2019:\n df = crawl_monthly_report(datetime.datetime(year, month, 1))\n crawl_finance_statement(year, season, df.index.levels[0].str.split(' ').str[0])\n else:\n crawl_finance_statement2019(year, season)\n\n html2db(year, season)\n commit()\n return {}\n\n\nimport os\nimport gc\nimport shutil\nimport pandas as pd\nimport numpy as np\n\ndef commit(*commit_tables):\n \n ftables = os.path.join('history', 'tables')\n fitems = os.path.join('history', 'items')\n\n fnames = [os.path.join(ftables, f) for f in os.listdir(ftables)]\n tnames = [f[:-4] for f in os.listdir(ftables)]\n \n if len(commit_tables) == 0:\n commit_tables = tnames\n\n for fname, tname in zip(fnames, tnames):\n \n if tname not in commit_tables:\n continue\n \n if fname[-4:] != '.pkl':\n continue\n\n fdir = os.path.join(fitems, tname)\n \n if os.path.isdir(fdir) and os.path.getmtime(fname) < os.path.getmtime(fdir):\n print(\"已經成功commit過\", tname, \"了,跳過!\")\n continue\n \n if os.path.isdir(fdir):\n shutil.rmtree(fdir)\n os.mkdir(fdir)\n else:\n os.mkdir(fdir)\n \n try:\n df = pd.read_pickle(fname)\n except:\n print(\"**檔案過大,無法成功commit\", fname)\n continue\n\n # remove stock name\n df.reset_index(inplace=True)\n if sum(df['stock_id'].str.find(' ') >= 0) > 0:\n cond = df.stock_id.str[4] == ' '\n df = df[cond]\n gc.collect()\n new_sid = df['stock_id'].str[:4]\n df['stock_id'] = new_sid\n \n df.set_index(['stock_id', 'date'], inplace=True)\n\n # select 4 digit stock ids\n if tname == 'price':\n sids = df.index.get_level_values(0)\n df = df[sids.str.len()==4]\n gc.collect()\n\n if tname == 'monthly_report':\n check_monthly_revenue()\n\n df = df.apply(lambda s: pd.to_numeric(s, errors='coerce'))\n gc.collect()\n\n df[df == 0] = np.nan\n\n\n df = df[~df.index.duplicated(keep='first')]\n gc.collect()\n \n items = list(df.columns)\n df.reset_index(inplace=True)\n \n df = df.pivot(\"date\", \"stock_id\")\n gc.collect()\n\n for name, (_, series) in zip(items, df.items()):\n\n print(tname, '--', name)\n fitem = os.path.join(fdir, name.replace('+', '_').replace('/', '_'))\n #series.reset_index()\\\n # .pivot(\"date\", \"stock_id\")[name].to_pickle(fitem + '.pkl')\n df[name].to_pickle(fitem + '.pkl')" ]
[ [ "pandas.to_datetime", "pandas.read_pickle", "pandas.DataFrame", "pandas.to_numeric", "pandas.MultiIndex", "pandas.concat", "pandas.Series", "pandas.read_html" ] ]
abhishekkumkar/dockrized-neural-photo-editor-using-GAN
[ "d234cf1f80cf8c8f621f871dc704dc43e212201f" ]
[ "ML/discgen_utils.py" ]
[ "# Plot Image Grid function imported from Discriminative Regularization for Generative Models by Lamb et al:\n# https://github.com/vdumoulin/discgen\nimport six\nimport matplotlib\nmatplotlib.use('Agg')\nfrom matplotlib import cm, pyplot\nfrom mpl_toolkits.axes_grid1 import ImageGrid\n\n\n\ndef plot_image_grid(images, num_rows, num_cols, save_path=None):\n \"\"\"Plots images in a grid.\n\n Parameters\n ----------\n images : numpy.ndarray\n Images to display, with shape\n ``(num_rows * num_cols, num_channels, height, width)``.\n num_rows : int\n Number of rows for the image grid.\n num_cols : int\n Number of columns for the image grid.\n save_path : str, optional\n Where to save the image grid. Defaults to ``None``,\n which causes the grid to be displayed on screen.\n\n \"\"\"\n figure = pyplot.figure()\n grid = ImageGrid(figure, 111, (num_rows, num_cols), axes_pad=0.1)\n\n for image, axis in zip(images, grid):\n axis.imshow(image.transpose(1, 2, 0), interpolation='nearest')\n axis.set_yticklabels(['' for _ in range(image.shape[1])])\n axis.set_xticklabels(['' for _ in range(image.shape[2])])\n axis.axis('off')\n\n if save_path is None:\n pyplot.show()\n else:\n pyplot.savefig(save_path, transparent=True, bbox_inches='tight',dpi=212)\n pyplot.close()" ]
[ [ "matplotlib.use", "matplotlib.pyplot.savefig", "matplotlib.pyplot.close", "matplotlib.pyplot.figure", "matplotlib.pyplot.show" ] ]
bainro/loss_landscape
[ "30bdd84d6946facee973151128bf0ea108c12ca1" ]
[ "plot_surface.py" ]
[ "\"\"\"\n Calculate and visualize the loss surface.\n Usage example:\n >> python plot_surface.py --x=-1:1:101 --y=-1:1:101 --model resnet56 --cuda\n\"\"\"\nimport argparse\nimport copy\nimport h5py\nimport torch\nimport time\nimport socket\nimport os\nimport sys\nimport numpy as np\nimport torchvision\nimport torch.nn as nn\nimport dataloader\nimport evaluation\nimport projection as proj\nimport net_plotter\nimport plot_2D\nimport plot_1D\nimport model_loader\nimport scheduler\nimport mpi4pytorch as mpi\n\ndef name_surface_file(args, dir_file):\n # skip if surf_file is specified in args\n if args.surf_file:\n return args.surf_file\n\n # use args.dir_file as the perfix\n surf_file = dir_file\n\n # resolution\n surf_file += '_x[%s,%s,%d]' % (str(args.xmin), str(args.xmax), int(args.xnum))\n if args.y:\n surf_file += 'y[%s,%s,%d]' % (str(args.ymin), str(args.ymax), int(args.ynum))\n if args.z:\n surf_file += 'z[%s,%s,%d]' % (str(args.zmin), str(args.zmax), int(args.znum))\n if args.t:\n surf_file += 't[%s,%s,%d]' % (str(args.tmin), str(args.tmax), int(args.tnum))\n\n # dataloder parameters\n if args.raw_data: # without data normalization\n surf_file += '_rawdata'\n if args.data_split > 1:\n surf_file += '_datasplit=' + str(args.data_split) + '_splitidx=' + str(args.split_idx)\n\n return surf_file + \".h5\"\n\n\ndef setup_surface_file(args, surf_file, dir_file):\n # skip if the direction file already exists\n if os.path.exists(surf_file):\n f = h5py.File(surf_file, 'r')\n if (args.y and 'ycoordinates' in f.keys()) or 'xcoordinates' in f.keys():\n f.close()\n print (\"%s is already set up\" % surf_file)\n return\n\n f = h5py.File(surf_file, 'a')\n f['dir_file'] = dir_file\n\n # Create the coordinates(resolutions) at which the function is evaluated\n xcoordinates = np.linspace(int(args.xmin), int(args.xmax), num=int(args.xnum))\n f['xcoordinates'] = xcoordinates\n\n if args.y:\n ycoordinates = np.linspace(int(args.ymin), int(args.ymax), num=int(args.ynum))\n f['ycoordinates'] = ycoordinates\n\n if args.z:\n zcoordinates = np.linspace(int(args.zmin), int(args.zmax), num=int(args.znum))\n f['zcoordinates'] = zcoordinates\n\n if args.t:\n tcoordinates = np.linspace(int(args.tmin), int(args.tmax), num=int(args.tnum))\n f['tcoordinates'] = tcoordinates\n\n f.close()\n\n return surf_file\n\n\ndef crunch(surf_file, net, w, s, d, dataloader, loss_key, acc_key, comm, rank, args):\n \"\"\"\n Calculate the loss values and accuracies of modified models in parallel\n using MPI reduce.\n \"\"\"\n\n #print(surf_file,234)\n f = h5py.File(surf_file, 'r+' if rank == 0 else 'r')\n losses, accuracies = [], []\n xcoordinates = f['xcoordinates'][:]\n ycoordinates = f['ycoordinates'][:] if 'ycoordinates' in f.keys() else None\n zcoordinates = f['zcoordinates'][:] if 'zcoordinates' in f.keys() else None\n tcoordinates = f['tcoordinates'][:] if 'tcoordinates' in f.keys() else None\n\n if loss_key not in f.keys():\n shape = xcoordinates.shape if ycoordinates is None else (len(xcoordinates),len(ycoordinates))\n if ycoordinates is not None:\n if zcoordinates is not None:\n if tcoordinates is not None:\n shape = (len(xcoordinates),len(ycoordinates),len(zcoordinates),len(tcoordinates))\n else: \n shape = (len(xcoordinates),len(ycoordinates),len(zcoordinates))\n else:\n shape = (len(xcoordinates),len(ycoordinates))\n else:\n shape = xcoordinates.shape\n losses = -np.ones(shape=shape)\n accuracies = -np.ones(shape=shape)\n if rank == 0:\n f[loss_key] = losses\n f[acc_key] = accuracies\n else:\n losses = f[loss_key][:]\n accuracies = f[acc_key][:]\n\n # Generate a list of indices of 'losses' that need to be filled in.\n # The coordinates of each unfilled index (with respect to the direction vectors\n # stored in 'd') are stored in 'coords'.\n inds, coords, inds_nums = scheduler.get_job_indices(losses, xcoordinates, ycoordinates, zcoordinates, tcoordinates, comm)\n\n print('Computing %d values for rank %d'% (len(inds), rank))\n start_time = time.time()\n total_sync = 0.0\n\n criterion = nn.CrossEntropyLoss()\n if args.loss_name == 'mse':\n criterion = nn.MSELoss()\n\n # Loop over all uncalculated loss values\n for count, ind in enumerate(inds):\n # Get the coordinates of the loss value being calculated\n coord = coords[count]\n\n # Load the weights corresponding to those coordinates into the net\n if args.dir_type == 'weights':\n net_plotter.set_weights(net.module if args.ngpu > 1 else net, w, d, coord)\n elif args.dir_type == 'states':\n net_plotter.set_states(net.module if args.ngpu > 1 else net, s, d, coord)\n\n # Record the time to compute the loss value\n loss_start = time.time()\n loss, acc = evaluation.eval_loss(net, criterion, dataloader, args.cuda)\n loss_compute_time = time.time() - loss_start\n\n # Record the result in the local array\n losses.ravel()[ind] = loss\n accuracies.ravel()[ind] = acc\n\n # Send updated plot data to the master node\n syc_start = time.time()\n losses = mpi.reduce_max(comm, losses)\n accuracies = mpi.reduce_max(comm, accuracies)\n syc_time = time.time() - syc_start\n total_sync += syc_time\n\n # Only the master node writes to the file - this avoids write conflicts\n if rank == 0:\n f[loss_key][:] = losses\n f[acc_key][:] = accuracies\n f.flush()\n\n print('Evaluating rank %d %d/%d (%.1f%%) coord=%s \\t%s= %.3f \\t%s=%.2f \\ttime=%.2f \\tsync=%.2f' % (\n rank, count, len(inds), 100.0 * count/len(inds), str(coord), loss_key, loss,\n acc_key, acc, loss_compute_time, syc_time))\n\n # This is only needed to make MPI run smoothly. If this process has less work than\n # the rank0 process, then we need to keep calling reduce so the rank0 process doesn't block\n for i in range(max(inds_nums) - len(inds)):\n losses = mpi.reduce_max(comm, losses)\n accuracies = mpi.reduce_max(comm, accuracies)\n\n total_time = time.time() - start_time\n print('Rank %d done! Total time: %.2f Sync: %.2f' % (rank, total_time, total_sync))\n\n f.close()\n\n###############################################################\n# MAIN\n###############################################################\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='plotting loss surface')\n parser.add_argument('--mpi', '-m', action='store_true', help='use mpi')\n parser.add_argument('--cuda', '-c', action='store_true', help='use cuda')\n parser.add_argument('--threads', default=2, type=int, help='number of threads')\n parser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use for each rank, useful for data parallel evaluation')\n parser.add_argument('--batch_size', default=128, type=int, help='minibatch size')\n\n # data parameters\n parser.add_argument('--dataset', default='cifar10', help='cifar10 | imagenet')\n parser.add_argument('--datapath', default='cifar10/data', metavar='DIR', help='path to the dataset')\n parser.add_argument('--raw_data', action='store_true', default=False, help='no data preprocessing')\n parser.add_argument('--data_split', default=1, type=int, help='the number of splits for the dataloader')\n parser.add_argument('--split_idx', default=0, type=int, help='the index of data splits for the dataloader')\n parser.add_argument('--trainloader', default='', help='path to the dataloader with random labels')\n parser.add_argument('--test_loader', default='', help='path to the test_loader with random labels')\n parser.add_argument('--eval_count', default=None, type=int, help='the number of test examples to evaluate the avg loss.')\n\n # model parameters\n parser.add_argument('--model', default='resnet56', help='model name')\n parser.add_argument('--model_folder', default='', help='the common folder that contains model_file and model_file2')\n parser.add_argument('--model_file', default='', help='path to the trained model file')\n parser.add_argument('--model_file2', default='', help='use (model_file2 - model_file) as the xdirection')\n parser.add_argument('--model_file3', default='', help='use (model_file3 - model_file) as the ydirection')\n parser.add_argument('--loss_name', '-l', default='crossentropy', help='loss functions: crossentropy | mse')\n\n # direction parameters\n parser.add_argument('--dir_file', default='', help='specify the name of direction file, or the path to an eisting direction file')\n parser.add_argument('--dir_type', default='weights', help='direction type: weights | states (including BN\\'s running_mean/var)')\n parser.add_argument('--x', default='-1:1:51', help='A string with format xmin:x_max:xnum')\n parser.add_argument('--y', default=None, help='A string with format ymin:ymax:ynum')\n parser.add_argument('--z', default=None, help='A string with format zmin:zmax:znum')\n parser.add_argument('--t', default=None, help='A string with format tmin:tmax:tnum')\n parser.add_argument('--xnorm', default='', help='direction normalization: filter | layer | weight')\n parser.add_argument('--ynorm', default='', help='direction normalization: filter | layer | weight')\n parser.add_argument('--znorm', default='', help='direction normalization: filter | layer | weight')\n parser.add_argument('--tnorm', default='', help='direction normalization: filter | layer | weight')\n parser.add_argument('--xignore', default='', help='ignore bias and BN parameters: biasbn')\n parser.add_argument('--yignore', default='', help='ignore bias and BN parameters: biasbn')\n parser.add_argument('--zignore', default='', help='ignore bias and BN parameters: biasbn')\n parser.add_argument('--tignore', default='', help='ignore bias and BN parameters: biasbn')\n parser.add_argument('--same_dir', action='store_true', default=False, help='use the same random direction for both x-axis and y-axis')\n parser.add_argument('--idx', default=0, type=int, help='the index for the repeatness experiment')\n parser.add_argument('--surf_file', default='', help='customize the name of surface file, could be an existing file.')\n\n # plot parameters\n parser.add_argument('--proj_file', default='', help='the .h5 file contains projected optimization trajectory.')\n parser.add_argument('--loss_max', default=5, type=float, help='Maximum value to show in 1D plot')\n parser.add_argument('--vmax', default=10, type=float, help='Maximum value to map')\n parser.add_argument('--vmin', default=0.1, type=float, help='Miminum value to map')\n parser.add_argument('--vlevel', default=0.5, type=float, help='plot contours every vlevel')\n parser.add_argument('--show', action='store_true', default=False, help='show plotted figures')\n parser.add_argument('--log', action='store_true', default=False, help='use log scale for loss values')\n parser.add_argument('--plot', action='store_true', default=False, help='plot figures after computation')\n parser.add_argument('--seed', default=123, type=int, help='sets torch random seed, not numpys')\n\n args = parser.parse_args()\n\n torch.manual_seed(args.seed)\n\n #--------------------------------------------------------------------------\n # Environment setup\n #--------------------------------------------------------------------------\n if args.mpi:\n comm = mpi.setup_MPI()\n rank, nproc = comm.Get_rank(), comm.Get_size()\n else:\n comm, rank, nproc = None, 0, 1\n\n # in case of multiple GPUs per node, set the GPU to use for each rank\n if args.cuda:\n if not torch.cuda.is_available():\n raise Exception('User selected cuda option, but cuda is not available on this machine')\n gpu_count = torch.cuda.device_count()\n # torch.cuda.set_device(rank % gpu_count)\n # print('Rank %d use GPU %d of %d GPUs on %s' %\n # (rank, torch.cuda.current_device(), gpu_count, socket.gethostname()))\n\n #--------------------------------------------------------------------------\n # Check plotting resolution\n #--------------------------------------------------------------------------\n try:\n args.xmin, args.xmax, args.xnum = [float(a) for a in args.x.split(':')]\n args.ymin, args.ymax, args.ynum = (None, None, None)\n args.zmin, args.zmax, args.znum = (None, None, None)\n args.tmin, args.tmax, args.tnum = (None, None, None)\n if args.y:\n args.ymin, args.ymax, args.ynum = [float(a) for a in args.y.split(':')]\n assert args.ymin and args.ymax and args.ynum, \\\n 'You specified some arguments for the y axis, but not all'\n if args.z:\n args.zmin, args.zmax, args.znum = [float(a) for a in args.z.split(':')]\n if args.t:\n args.tmin, args.tmax, args.tnum = [float(a) for a in args.t.split(':')]\n except:\n raise Exception('Improper format for x- or y-coordinates. Try something like -1:1:51')\n\n #--------------------------------------------------------------------------\n # Load models and extract parameters\n #--------------------------------------------------------------------------\n net = model_loader.load(args.dataset, args.model, args.model_file)\n w = net_plotter.get_weights(net) # initial parameters\n s = copy.deepcopy(net.state_dict()) # deepcopy since state_dict are references\n if args.ngpu > 1:\n # data parallel with multiple GPUs on a single node\n net = nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))\n\n #--------------------------------------------------------------------------\n # Setup the direction file and the surface file\n #--------------------------------------------------------------------------\n dir_file = net_plotter.name_direction_file(args) # name the direction file\n #print(dir_file,123)\n if rank == 0:\n #print(\"LOLOL\")\n net_plotter.setup_direction(args, dir_file, net)\n\n surf_file = name_surface_file(args, dir_file)\n if rank == 0:\n setup_surface_file(args, surf_file, dir_file)\n\n #print(dir_file, surf_file)\n\n # wait until master has setup the direction file and surface file\n mpi.barrier(comm)\n\n # load directions\n #print(dir_file)\n d = net_plotter.load_directions(dir_file)\n #print(d);exit()\n # calculate the consine similarity of the two directions\n if len(d) == 2 and rank == 0:\n similarity = proj.cal_angle(proj.nplist_to_tensor(d[0]), proj.nplist_to_tensor(d[1]))\n print('cosine similarity between x-axis and y-axis: %f' % similarity)\n\n #--------------------------------------------------------------------------\n # Setup dataloader\n #--------------------------------------------------------------------------\n # download CIFAR10 if it does not exit\n if rank == 0 and args.dataset == 'cifar10':\n torchvision.datasets.CIFAR10(root=args.dataset + '/data', train=True, download=True)\n\n mpi.barrier(comm)\n\n trainloader, test_loader = dataloader.load_dataset(args.dataset, args.datapath,\n args.batch_size, args.threads, args.raw_data,\n args.data_split, args.split_idx,\n args.trainloader, args.test_loader, eval_count=args.eval_count) \n #print(\"# of train ex's:\", len(trainloader), len(test_loader))\n\n #--------------------------------------------------------------------------\n # Start the computation\n #--------------------------------------------------------------------------\n #crunch(surf_file, net, w, s, d, trainloader, 'train_loss', 'train_acc', comm, rank, args)\n crunch(surf_file, net, w, s, d, test_loader, 'test_loss', 'test_acc', comm, rank, args)\n \n #--------------------------------------------------------------------------\n # Plot figures\n #--------------------------------------------------------------------------\n if args.plot and rank == 0:\n if args.y and args.proj_file:\n plot_2D.plot_contour_trajectory(surf_file, dir_file, args.proj_file, 'train_loss', args.show)\n elif args.y:\n if args.z:\n if args.t:\n #plot_2D.plot_4d_path(surf_file, 'test_loss', args.show)\n print(\"congrats! Now you have to implement the 4d plot...\")\n else:\n plot_2D.plot_3d_scatter(surf_file, 'test_loss', args.show)\n else:\n #plot_2D.plot_2d_contour(surf_file + '_train_loss', 'train_loss', args.vmin, args.vmax, args.vlevel, args.show)\n plot_2D.plot_2d_contour(surf_file, 'test_loss', args.vmin, args.vmax, args.vlevel, args.show)\n else:\n plot_1D.plot_1d_loss_err(surf_file, args.xmin, args.xmax, args.loss_max, args.log, args.show)\n" ]
[ [ "torch.nn.MSELoss", "numpy.ones", "torch.cuda.device_count", "torch.manual_seed", "torch.cuda.is_available", "torch.nn.CrossEntropyLoss" ] ]
WangYuxuan93/IJCAI2019-dp-sa
[ "02ca4234160a102e5481761522a149257bedcc6a" ]
[ "biaffine-parser-sa-bert/data/Dataloader.py" ]
[ "from data.Vocab import *\nimport numpy as np\nimport torch\nfrom torch.autograd import Variable\n\ndef read_corpus(file_path, vocab=None):\n data = []\n with open(file_path, 'r') as infile:\n for sentence in readDepTree(infile, vocab):\n data.append(sentence)\n return data\n\ndef sentences_numberize(sentences, vocab):\n for sentence in sentences:\n yield sentence2id(sentence, vocab)\n\ndef sentence2id(sentence, vocab):\n result = []\n for dep in sentence:\n wordid = vocab.word2id(dep.form)\n extwordid = vocab.extword2id(dep.form)\n tagid = vocab.tag2id(dep.tag)\n head = dep.head\n relid = vocab.rel2id(dep.rel)\n word = dep.form\n charid = dep.charid\n senid = dep.senid\n id=dep.id\n result.append([wordid, extwordid, tagid, head, relid, word, charid, id, senid])\n\n return result\n\n\n\ndef batch_slice(data, batch_size):\n batch_num = int(np.ceil(len(data) / float(batch_size)))\n for i in range(batch_num):\n cur_batch_size = batch_size if i < batch_num - 1 else len(data) - batch_size * i\n sentences = [data[i * batch_size + b] for b in range(cur_batch_size)]\n\n yield sentences\n\n\ndef data_iter(data, batch_size, shuffle=True):\n \"\"\"\n randomly permute data, then sort by source length, and partition into batches\n ensure that the length of sentences in each batch\n \"\"\"\n\n batched_data = []\n if shuffle: np.random.shuffle(data)\n batched_data.extend(list(batch_slice(data, batch_size)))\n\n if shuffle: np.random.shuffle(batched_data)\n for batch in batched_data:\n yield batch\n\n\ndef batch_data_variable(batch, vocab):\n length = len(batch[0])\n batch_size = len(batch)\n for b in range(1, batch_size):\n if len(batch[b]) > length: length = len(batch[b])\n\n words = Variable(torch.LongTensor(batch_size, length).zero_(), requires_grad=False)\n extwords = Variable(torch.LongTensor(batch_size, length).zero_(), requires_grad=False)\n tags = Variable(torch.LongTensor(batch_size, length).zero_(), requires_grad=False)\n masks = Variable(torch.Tensor(batch_size, length).zero_(), requires_grad=False)\n positions = Variable(torch.LongTensor(batch_size, length).zero_(), requires_grad=False)\n heads = []\n rels = []\n lengths = []\n sentences = []\n elmosens=[]\n berts=[]\n\n b = 0\n for sentence in sentences_numberize(batch, vocab):\n index = 0\n sen=[]\n elmosen=[]\n length = len(sentence)\n lengths.append(length)\n elmosen.append(length)\n head = np.zeros((length), dtype=np.int32)\n rel = np.zeros((length), dtype=np.int32)\n for dep in sentence:\n words[b, index] = dep[0]\n extwords[b, index] = dep[1]\n tags[b, index] = dep[2]\n head[index] = dep[3]\n rel[index] = dep[4]\n sen.append(dep[5])\n masks[b, index] = 1\n positions[b,index] = index\n index += 1\n if dep[7] == 1:\n startcharid = dep[6]\n berts.append(dep[8])\n '''\n if startcharid == 0:\n print(\"the char id is 0:\",dep[5])\n print(\"the sen is is 0:\",dep[8])\n if startcharid == 55:\n print(\"the char id is 8\",dep[5])\n print(\"the sen is is 2:\",dep[8])\n if startcharid == 37:\n print(\"the char id is 37\",dep[5])\n print(\"the sen is is 1:\",dep[8])\n if startcharid == 83:\n print(\"the char id is 83\",dep[5])\n print(\"the sen is is 2:\",dep[8])\n '''\n \n elmosen.append(startcharid)\n \n b += 1\n heads.append(head)\n rels.append(rel)\n sentences.append(sen)\n elmosens.append(elmosen)\n \n #use_cuda=True\n #if use_cuda:\n # positions=positions.cuda()\n\n return words, extwords, tags, heads, rels, lengths, masks, positions, sentences,elmosens,berts\n\ndef batch_variable_depTree(trees, heads, rels, lengths, vocab):\n for tree, head, rel, length in zip(trees, heads, rels, lengths):\n sentence = []\n for idx in range(length):\n sentence.append(Dependency(idx, tree[idx].org_form, tree[idx].tag, head[idx], vocab.id2rel(rel[idx]),tree[idx].charid,tree[idx].senid))\n yield sentence\n\n\n\n" ]
[ [ "torch.Tensor", "torch.LongTensor", "numpy.zeros", "numpy.random.shuffle" ] ]
VIGNESHinZONE/dgl-lifesci
[ "9a892fd0935a7d8ab125530f54ce1e2a38b2377a" ]
[ "python/dgllife/model/pretrain/__init__.py" ]
[ "# -*- coding: utf-8 -*-\n#\n# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\n# SPDX-License-Identifier: Apache-2.0\n#\n# pylint: disable= no-member, arguments-differ, invalid-name\n#\n# Utilities for using pre-trained models.\n\nimport torch\n\nfrom dgl.data.utils import _get_dgl_url, download\n\nfrom .moleculenet import *\nfrom .generative_models import *\nfrom .property_prediction import *\nfrom .reaction import *\n\n__all__ = ['load_pretrained']\n\nurl = {**moleculenet_url, **generative_url, **property_url, **reaction_url}\n\ndef download_and_load_checkpoint(model_name, model, model_postfix,\n local_pretrained_path='pre_trained.pth', log=True):\n \"\"\"Download pretrained model checkpoint\n\n The model will be loaded to CPU.\n\n Parameters\n ----------\n model_name : str\n Name of the model\n model : nn.Module\n Instantiated model instance\n model_postfix : str\n Postfix for pretrained model checkpoint\n local_pretrained_path : str\n Local name for the downloaded model checkpoint\n log : bool\n Whether to print progress for model loading\n\n Returns\n -------\n model : nn.Module\n Pretrained model\n \"\"\"\n url_to_pretrained = _get_dgl_url(model_postfix)\n local_pretrained_path = '_'.join([model_name, local_pretrained_path])\n download(url_to_pretrained, path=local_pretrained_path, log=log)\n checkpoint = torch.load(local_pretrained_path, map_location='cpu')\n model.load_state_dict(checkpoint['model_state_dict'])\n\n if log:\n print('Pretrained model loaded')\n\n return model\n\n# pylint: disable=I1101\ndef load_pretrained(model_name, log=True):\n \"\"\"Load a pretrained model\n\n Parameters\n ----------\n model_name : str\n Currently supported options include\n\n * ``'GCN_Tox21'``: A GCN-based model for molecular property prediction on Tox21\n * ``'GAT_Tox21'``: A GAT-based model for molecular property prediction on Tox21\n * ``'Weave_Tox21'``: A Weave model for molecular property prediction on Tox21\n * ``'AttentiveFP_Aromaticity'``: An AttentiveFP model for predicting number of\n aromatic atoms on a subset of Pubmed\n * ``'DGMG_ChEMBL_canonical'``: A DGMG model trained on ChEMBL with a canonical\n atom order\n * ``'DGMG_ChEMBL_random'``: A DGMG model trained on ChEMBL for molecule generation\n with a random atom order\n * ``'DGMG_ZINC_canonical'``: A DGMG model trained on ZINC for molecule generation\n with a canonical atom order\n * ``'DGMG_ZINC_random'``: A DGMG model pre-trained on ZINC for molecule generation\n with a random atom order\n * ``'JTNN_ZINC'``: A JTNN model pre-trained on ZINC for molecule generation\n * ``'wln_center_uspto'``: A WLN model pre-trained on USPTO for reaction prediction\n * ``'wln_rank_uspto'``: A WLN model pre-trained on USPTO for candidate product ranking\n * ``'gin_supervised_contextpred'``: A GIN model pre-trained with supervised learning\n and context prediction\n * ``'gin_supervised_infomax'``: A GIN model pre-trained with supervised learning\n and deep graph infomax\n * ``'gin_supervised_edgepred'``: A GIN model pre-trained with supervised learning\n and edge prediction\n * ``'gin_supervised_masking'``: A GIN model pre-trained with supervised learning\n and attribute masking\n * ``'GCN_canonical_BACE'``: A GCN model trained on BACE with canonical\n featurization for atoms\n * ``'GCN_attentivefp_BACE'``: A GCN model trained on BACE with attentivefp\n featurization for atoms\n * ``'GAT_canonical_BACE'``: A GAT model trained on BACE with canonical\n featurization for atoms\n * ``'GAT_attentivefp_BACE'``: A GAT model trained on BACE with attentivefp\n featurization for atoms\n * ``'Weave_canonical_BACE'``: A Weave model trained on BACE with canonical\n featurization for atoms and bonds\n * ``'Weave_attentivefp_BACE'``: A Weave model trained on BACE with attentivefp\n featurization for atoms and bonds\n * ``'MPNN_canonical_BACE'``: An MPNN model trained on BACE with canonical\n featurization for atoms and bonds\n * ``'MPNN_attentivefp_BACE'``: An MPNN model trained on BACE with attentivefp\n featurization for atoms and bonds\n * ``'AttentiveFP_canonical_BACE'``: An AttentiveFP model trained on BACE with\n canonical featurization for atoms and bonds\n * ``'AttentiveFP_attentivefp_BACE'``: An AttentiveFP model trained on BACE with\n attentivefp featurization for atoms and bonds\n * ``'gin_supervised_contextpred_BACE'``: A GIN model pre-trained with supervised\n learning and context prediction, and fine-tuned on BACE\n * ``'gin_supervised_infomax_BACE'``: A GIN model pre-trained with supervised learning\n and infomax, and fine-tuned on BACE\n * ``'gin_supervised_edgepred_BACE'``: A GIN model pre-trained with supervised learning\n and edge prediction, and fine-tuned on BACE\n * ``'gin_supervised_masking_BACE'``: A GIN model pre-trained with supervised learning\n and masking, and fine-tuned on BACE\n * ``'NF_canonical_BACE'``: An NF model trained on BACE with canonical\n featurization for atoms\n * ``'GCN_canonical_BBBP'``: A GCN model trained on BBBP with canonical\n featurization for atoms\n * ``'GCN_attentivefp_BBBP'``: A GCN model trained on BBBP with attentivefp\n featurization for atoms\n * ``'GAT_canonical_BBBP'``: A GAT model trained on BBBP with canonical\n featurization for atoms\n * ``'GAT_attentivefp_BBBP'``: A GAT model trained on BBBP with attentivefp\n featurization for atoms\n * ``'Weave_canonical_BBBP'``: A Weave model trained on BBBP with canonical\n featurization for atoms and bonds\n * ``'Weave_attentivefp_BBBP'``: A Weave model trained on BBBP with attentivefp\n featurization for atoms and bonds\n * ``'MPNN_canonical_BBBP'``: An MPNN model trained on BBBP with canonical\n featurization for atoms and bonds\n * ``'MPNN_attentivefp_BBBP'``: An MPNN model trained on BBBP with attentivefp\n featurization for atoms and bonds\n * ``'AttentiveFP_canonical_BBBP'``: An AttentiveFP model trained on BBBP with\n canonical featurization for atoms and bonds\n * ``'AttentiveFP_attentivefp_BBBP'``: An AttentiveFP model trained on BBBP with\n attentivefp featurization for atoms and bonds\n * ``'gin_supervised_contextpred_BBBP'``: A GIN model pre-trained with supervised\n learning and context prediction, and fine-tuned on BBBP\n * ``'gin_supervised_infomax_BBBP'``: A GIN model pre-trained with supervised learning\n and infomax, and fine-tuned on BBBP\n * ``'gin_supervised_edgepred_BBBP'``: A GIN model pre-trained with supervised learning\n and edge prediction, and fine-tuned on BBBP\n * ``'gin_supervised_masking_BBBP'``: A GIN model pre-trained with supervised learning\n and masking, and fine-tuned on BBBP\n * ``'NF_canonical_BBBP'``: An NF model pre-trained on BBBP with canonical\n featurization for atoms\n * ``'GCN_canonical_ClinTox'``: A GCN model trained on ClinTox with canonical\n featurization for atoms\n * ``'GCN_attentivefp_ClinTox'``: A GCN model trained on ClinTox with attentivefp\n featurization for atoms\n * ``'GAT_canonical_ClinTox'``: A GAT model trained on ClinTox with canonical\n featurization for atoms\n * ``'GAT_attentivefp_ClinTox'``: A GAT model trained on ClinTox with attentivefp\n featurization for atoms\n * ``'Weave_canonical_ClinTox'``: A Weave model trained on ClinTox with canonical\n featurization for atoms and bonds\n * ``'Weave_attentivefp_ClinTox'``: A Weave model trained on ClinTox with attentivefp\n featurization for atoms and bonds\n * ``'MPNN_canonical_ClinTox'``: An MPNN model trained on ClinTox with canonical\n featurization for atoms and bonds\n * ``'MPNN_attentivefp_ClinTox'``: An MPNN model trained on ClinTox with attentivefp\n featurization for atoms and bonds\n * ``'AttentiveFP_canonical_ClinTox'``: An AttentiveFP model trained on ClinTox with\n canonical featurization for atoms and bonds\n * ``'AttentiveFP_attentivefp_BACE'``: An AttentiveFP model trained on ClinTox with\n attentivefp featurization for atoms and bonds\n * ``'GCN_canonical_ESOL'``: A GCN model trained on ESOL with canonical\n featurization for atoms\n * ``'GCN_attentivefp_ESOL'``: A GCN model trained on ESOL with attentivefp\n featurization for atoms\n * ``'GAT_canonical_ESOL'``: A GAT model trained on ESOL with canonical\n featurization for atoms\n * ``'GAT_attentivefp_ESOL'``: A GAT model trained on ESOL with attentivefp\n featurization for atoms\n * ``'Weave_canonical_ESOL'``: A Weave model trained on ESOL with canonical\n featurization for atoms and bonds\n * ``'Weave_attentivefp_ESOL'``: A Weave model trained on ESOL with attentivefp\n featurization for atoms and bonds\n * ``'MPNN_canonical_ESOL'``: An MPNN model trained on ESOL with canonical\n featurization for atoms and bonds\n * ``'MPNN_attentivefp_ESOL'``: An MPNN model trained on ESOL with attentivefp\n featurization for atoms and bonds\n * ``'AttentiveFP_canonical_ESOL'``: An AttentiveFP model trained on ESOL with\n canonical featurization for atoms and bonds\n * ``'AttentiveFP_attentivefp_ESOL'``: An AttentiveFP model trained on ESOL with\n attentivefp featurization for atoms and bonds\n * ``'gin_supervised_contextpred_ESOL'``: A GIN model pre-trained with supervised\n learning and context prediction, and fine-tuned on ESOL\n * ``'gin_supervised_infomax_ESOL'``: A GIN model pre-trained with supervised learning\n and infomax, and fine-tuned on ESOL\n * ``'gin_supervised_edgepred_ESOL'``: A GIN model pre-trained with supervised learning\n and edge prediction, and fine-tuned on ESOL\n * ``'gin_supervised_masking_ESOL'``: A GIN model pre-trained with supervised learning\n and masking, and fine-tuned on ESOL\n * ``'GCN_canonical_FreeSolv'``: A GCN model trained on FreeSolv with canonical\n featurization for atoms\n * ``'GCN_attentivefp_FreeSolv'``: A GCN model trained on FreeSolv with attentivefp\n featurization for atoms\n * ``'GAT_canonical_FreeSolv'``: A GAT model trained on FreeSolv with canonical\n featurization for atoms\n * ``'GAT_attentivefp_FreeSolv'``: A GAT model trained on FreeSolv with attentivefp\n featurization for atoms\n * ``'Weave_canonical_FreeSolv'``: A Weave model trained on FreeSolv with canonical\n featurization for atoms and bonds\n * ``'Weave_attentivefp_FreeSolv'``: A Weave model trained on FreeSolv with attentivefp\n featurization for atoms and bonds\n * ``'MPNN_canonical_FreeSolv'``: An MPNN model trained on FreeSolv with canonical\n featurization for atoms and bonds\n * ``'MPNN_attentivefp_FreeSolv'``: An MPNN model trained on FreeSolv with attentivefp\n featurization for atoms and bonds\n * ``'AttentiveFP_canonical_FreeSolv'``: An AttentiveFP model trained on FreeSolv with\n canonical featurization for atoms and bonds\n * ``'AttentiveFP_attentivefp_FreeSolv'``: An AttentiveFP model trained on FreeSolv with\n attentivefp featurization for atoms and bonds\n * ``'gin_supervised_contextpred_FreeSolv'``: A GIN model pre-trained with supervised\n learning and context prediction, and fine-tuned on FreeSolv\n * ``'gin_supervised_infomax_FreeSolv'``: A GIN model pre-trained with supervised learning\n and infomax, and fine-tuned on FreeSolv\n * ``'gin_supervised_edgepred_FreeSolv'``: A GIN model pre-trained with supervised learning\n and edge prediction, and fine-tuned on FreeSolv\n * ``'gin_supervised_masking_FreeSolv'``: A GIN model pre-trained with supervised learning\n and masking, and fine-tuned on FreeSolv\n * ``'GCN_canonical_HIV'``: A GCN model trained on HIV with canonical\n featurization for atoms\n * ``'GCN_attentivefp_HIV'``: A GCN model trained on HIV with attentivefp\n featurization for atoms\n * ``'GAT_canonical_HIV'``: A GAT model trained on BACE with canonical\n featurization for atoms\n * ``'GAT_attentivefp_HIV'``: A GAT model trained on BACE with attentivefp\n featurization for atoms\n * ``'Weave_canonical_HIV'``: A Weave model trained on HIV with canonical\n featurization for atoms and bonds\n * ``'Weave_attentivefp_HIV'``: A Weave model trained on HIV with attentivefp\n featurization for atoms and bonds\n * ``'MPNN_canonical_HIV'``: An MPNN model trained on HIV with canonical\n featurization for atoms and bonds\n * ``'MPNN_attentivefp_HIV'``: An MPNN model trained on HIV with attentivefp\n featurization for atoms and bonds\n * ``'AttentiveFP_canonical_HIV'``: An AttentiveFP model trained on HIV with canonical\n featurization for atoms and bonds\n * ``'AttentiveFP_attentivefp_HIV'``: An AttentiveFP model trained on HIV with attentivefp\n featurization for atoms and bonds\n * ``'gin_supervised_contextpred_HIV'``: A GIN model pre-trained with supervised learning\n and context prediction, and fine-tuned on HIV\n * ``'gin_supervised_infomax_HIV'``: A GIN model pre-trained with supervised learning\n and infomax, and fine-tuned on HIV\n * ``'gin_supervised_edgepred_HIV'``: A GIN model pre-trained with supervised learning\n and edge prediction, and fine-tuned on HIV\n * ``'gin_supervised_masking_HIV'``: A GIN model pre-trained with supervised learning\n and masking, and fine-tuned on HIV\n * ``'NF_canonical_HIV'``: An NF model trained on HIV with canonical\n featurization for atoms\n * ``'GCN_canonical_Lipophilicity'``: A GCN model trained on Lipophilicity with canonical\n featurization for atoms\n * ``'GCN_attentivefp_Lipophilicity'``: A GCN model trained on Lipophilicity with\n attentivefp featurization for atoms\n * ``'GAT_canonical_Lipophilicity'``: A GAT model trained on Lipophilicity with canonical\n featurization for atoms\n * ``'GAT_attentivefp_Lipophilicity'``: A GAT model trained on Lipophilicity with\n attentivefp featurization for atoms\n * ``'Weave_canonical_Lipophilicity'``: A Weave model trained on Lipophilicity with\n canonical featurization for atoms and bonds\n * ``'Weave_attentivefp_Lipophilicity'``: A Weave model trained on Lipophilicity with\n attentivefp featurization for atoms and bonds\n * ``'MPNN_canonical_Lipophilicity'``: An MPNN model trained on Lipophilicity with\n canonical featurization for atoms and bonds\n * ``'MPNN_attentivefp_Lipophilicity'``: An MPNN model trained on Lipophilicity with\n attentivefp featurization for atoms and bonds\n * ``'AttentiveFP_canonical_Lipophilicity'``: An AttentiveFP model trained on\n Lipophilicity with canonical featurization for atoms and bonds\n * ``'AttentiveFP_attentivefp_Lipophilicity'``: An AttentiveFP model trained on\n Lipophilicity with attentivefp featurization for atoms and bonds\n * ``'gin_supervised_contextpred_Lipophilicity'``: A GIN model pre-trained with supervised\n learning and context prediction, and fine-tuned on Lipophilicity\n * ``'gin_supervised_infomax_Lipophilicity'``: A GIN model pre-trained with supervised\n learning and infomax, and fine-tuned on Lipophilicity\n * ``'gin_supervised_edgepred_Lipophilicity'``: A GIN model pre-trained with supervised\n learning and edge prediction, and fine-tuned on Lipophilicity\n * ``'gin_supervised_masking_Lipophilicity'``: A GIN model pre-trained with supervised\n learning and masking, and fine-tuned on Lipophilicity\n * ``'GCN_canonical_MUV'``: A GCN model trained on MUV with canonical\n featurization for atoms\n * ``'GCN_attentivefp_MUV'``: A GCN model trained on MUV with attentivefp\n featurization for atoms\n * ``'GAT_canonical_MUV'``: A GAT model trained on MUV with canonical\n featurization for atoms\n * ``'GAT_attentivefp_MUV'``: A GAT model trained on MUV with attentivefp\n featurization for atoms\n * ``'Weave_canonical_MUV'``: A Weave model trained on MUV with canonical\n featurization for atoms and bonds\n * ``'Weave_attentivefp_MUV'``: A Weave model trained on MUV with attentivefp\n featurization for atoms and bonds\n * ``'MPNN_canonical_MUV'``: An MPNN model trained on MUV with canonical\n featurization for atoms and bonds\n * ``'MPNN_attentivefp_MUV'``: An MPNN model trained on MUV with attentivefp\n featurization for atoms and bonds\n * ``'AttentiveFP_canonical_MUV'``: An AttentiveFP model trained on MUV with canonical\n featurization for atoms and bonds\n * ``'AttentiveFP_attentivefp_MUV'``: An AttentiveFP model trained on MUV with attentivefp\n featurization for atoms and bonds\n * ``'gin_supervised_contextpred_MUV'``: A GIN model pre-trained with supervised learning\n and context prediction, and fine-tuned on MUV\n * ``'gin_supervised_infomax_MUV'``: A GIN model pre-trained with supervised learning\n and infomax, and fine-tuned on MUV\n * ``'gin_supervised_edgepred_MUV'``: A GIN model pre-trained with supervised learning\n and edge prediction, and fine-tuned on MUV\n * ``'gin_supervised_masking_MUV'``: A GIN model pre-trained with supervised learning\n and masking, and fine-tuned on MUV\n * ``'GCN_canonical_PCBA'``: A GCN model trained on PCBA with canonical\n featurization for atoms\n * ``'GCN_attentivefp_PCBA'``: A GCN model trained on PCBA with attentivefp\n featurization for atoms\n * ``'GAT_canonical_PCBA'``: A GAT model trained on PCBA with canonical\n featurization for atoms\n * ``'GAT_attentivefp_PCBA'``: A GAT model trained on PCBA with attentivefp\n featurization for atoms\n * ``'Weave_canonical_PCBA'``: A Weave model trained on PCBA with canonical\n featurization for atoms and bonds\n * ``'Weave_attentivefp_PCBA'``: A Weave model trained on PCBA with attentivefp\n featurization for atoms and bonds\n * ``'MPNN_canonical_PCBA'``: An MPNN model trained on PCBA with canonical\n featurization for atoms and bonds\n * ``'MPNN_attentivefp_PCBA'``: An MPNN model trained on PCBA with attentivefp\n featurization for atoms and bonds\n * ``'AttentiveFP_canonical_PCBA'``: An AttentiveFP model trained on PCBA with\n canonical featurization for atoms and bonds\n * ``'AttentiveFP_attentivefp_PCBA'``: An AttentiveFP model trained on PCBA with\n attentivefp featurization for atoms and bonds\n * ``'GCN_canonical_SIDER'``: A GCN model trained on SIDER with canonical\n featurization for atoms\n * ``'GCN_attentivefp_SIDER'``: A GCN model trained on SIDER with attentivefp\n featurization for atoms\n * ``'GAT_canonical_SIDER'``: A GAT model trained on SIDER with canonical\n featurization for atoms\n * ``'GAT_attentivefp_SIDER'``: A GAT model trained on SIDER with attentivefp\n featurization for atoms\n * ``'Weave_canonical_SIDER'``: A Weave model trained on SIDER with canonical\n featurization for atoms and bonds\n * ``'Weave_attentivefp_SIDER'``: A Weave model trained on SIDER with attentivefp\n featurization for atoms and bonds\n * ``'MPNN_canonical_SIDER'``: An MPNN model trained on SIDER with canonical\n featurization for atoms and bonds\n * ``'MPNN_attentivefp_SIDER'``: An MPNN model trained on SIDER with attentivefp\n featurization for atoms and bonds\n * ``'AttentiveFP_canonical_SIDER'``: An AttentiveFP model trained on SIDER with\n canonical featurization for atoms and bonds\n * ``'AttentiveFP_attentivefp_SIDER'``: An AttentiveFP model trained on SIDER with\n attentivefp featurization for atoms and bonds\n * ``'gin_supervised_contextpred_SIDER'``: A GIN model pre-trained with supervised learning\n and context prediction, and fine-tuned on SIDER\n * ``'gin_supervised_infomax_SIDER'``: A GIN model pre-trained with supervised learning\n and infomax, and fine-tuned on SIDER\n * ``'gin_supervised_edgepred_SIDER'``: A GIN model pre-trained with supervised learning\n and edge prediction, and fine-tuned on SIDER\n * ``'gin_supervised_masking_SIDER'``: A GIN model pre-trained with supervised learning\n and masking, and fine-tuned on SIDER\n * ``'NF_canonical_SIDER'``: An NF model trained on SIDER with canonical\n featurization for atoms\n * ``'GCN_canonical_Tox21'``: A GCN model trained on Tox21 with canonical\n featurization for atoms\n * ``'GCN_attentivefp_Tox21'``: A GCN model trained on Tox21 with attentivefp\n featurization for atoms\n * ``'GAT_canonical_Tox21'``: A GAT model trained on Tox21 with canonical\n featurization for atoms\n * ``'GAT_attentivefp_Tox21'``: A GAT model trained on Tox21 with attentivefp\n featurization for atoms\n * ``'Weave_canonical_Tox21'``: A Weave model trained on Tox21 with canonical\n featurization for atoms and bonds\n * ``'Weave_attentivefp_Tox21'``: A Weave model trained on Tox21 with attentivefp\n featurization for atoms and bonds\n * ``'MPNN_canonical_Tox21'``: An MPNN model trained on Tox21 with canonical\n featurization for atoms and bonds\n * ``'MPNN_attentivefp_Tox21'``: An MPNN model trained on Tox21 with attentivefp\n featurization for atoms and bonds\n * ``'AttentiveFP_canonical_Tox21'``: An AttentiveFP model trained on Tox21 with\n canonical featurization for atoms and bonds\n * ``'AttentiveFP_attentivefp_Tox21'``: An AttentiveFP model trained on Tox21 with\n attentivefp featurization for atoms and bonds\n * ``'gin_supervised_contextpred_Tox21'``: A GIN model pre-trained with supervised\n learning and context prediction, and fine-tuned on Tox21\n * ``'gin_supervised_infomax_Tox21'``: A GIN model pre-trained with supervised learning\n and infomax, and fine-tuned on Tox21\n * ``'gin_supervised_edgepred_Tox21'``: A GIN model pre-trained with supervised learning\n and edge prediction, and fine-tuned on Tox21\n * ``'gin_supervised_masking_Tox21'``: A GIN model pre-trained with supervised learning\n and masking, and fine-tuned on Tox21\n * ``'NF_canonical_Tox21'``: An NF model trained on Tox21 with canonical\n featurization for atoms\n * ``'GCN_canonical_ToxCast'``: A GCN model trained on ToxCast with canonical\n featurization for atoms\n * ``'GCN_attentivefp_ToxCast'``: A GCN model trained on ToxCast with attentivefp\n featurization for atoms\n * ``'GAT_canonical_ToxCast'``: A GAT model trained on ToxCast with canonical\n featurization for atoms\n * ``'GAT_attentivefp_ToxCast'``: A GAT model trained on ToxCast with attentivefp\n featurization for atoms\n * ``'Weave_canonical_ToxCast'``: A Weave model trained on ToxCast with canonical\n featurization for atoms and bonds\n * ``'Weave_attentivefp_ToxCast'``: A Weave model trained on ToxCast with attentivefp\n featurization for atoms and bonds\n * ``'MPNN_canonical_ToxCast'``: An MPNN model trained on ToxCast with canonical\n featurization for atoms and bonds\n * ``'MPNN_attentivefp_ToxCast'``: An MPNN model trained on ToxCast with attentivefp\n featurization for atoms and bonds\n * ``'AttentiveFP_canonical_ToxCast'``: An AttentiveFP model trained on ToxCast with\n canonical featurization for atoms and bonds\n * ``'AttentiveFP_attentivefp_ToxCast'``: An AttentiveFP model trained on ToxCast with\n attentivefp featurization for atoms and bonds\n * ``'gin_supervised_contextpred_ToxCast'``: A GIN model pre-trained with supervised\n learning and context prediction, and fine-tuned on ToxCast\n * ``'gin_supervised_infomax_ToxCast'``: A GIN model pre-trained with supervised learning\n and infomax, and fine-tuned on ToxCast\n * ``'gin_supervised_edgepred_ToxCast'``: A GIN model pre-trained with supervised learning\n and edge prediction, and fine-tuned on ToxCast\n * ``'gin_supervised_masking_ToxCast'``: A GIN model pre-trained with supervised learning\n and masking, and fine-tuned on ToxCast\n * ``'NF_canonical_ToxCast'``: An NF model trained on ToxCast with canonical\n featurization for atoms and bonds\n\n log : bool\n Whether to print progress for model loading\n\n Returns\n -------\n model\n \"\"\"\n if model_name not in url:\n raise RuntimeError(\"Cannot find a pretrained model with name {}\".format(model_name))\n\n for func in [create_moleculenet_model, create_generative_model,\n create_property_model, create_reaction_model]:\n model = func(model_name)\n if model is not None:\n break\n\n return download_and_load_checkpoint(model_name, model, url[model_name], log=log)\n" ]
[ [ "torch.load" ] ]
siddheshshaji/FLAML
[ "ffee24e8afd9009ccb5d269e72f5d50c894da531" ]
[ "test/reg.py" ]
[ "from flaml import AutoML\nfrom sklearn.datasets import fetch_california_housing\n\n# Initialize an AutoML instance\nautoml = AutoML()\n# Specify automl goal and constraint\nautoml_settings = {\n \"time_budget\": 1, # in seconds\n \"metric\": \"r2\",\n \"task\": \"regression\",\n \"log_file_name\": \"test/california.log\",\n}\nX_train, y_train = fetch_california_housing(return_X_y=True)\n# Train with labeled input data\nautoml.fit(X_train=X_train, y_train=y_train, **automl_settings)\nprint(automl.model)\nprint(automl.model.estimator)\n\nprint(automl.best_estimator)\nprint(automl.best_config)\nprint(automl.best_config_per_estimator)\n\nprint(automl.best_config_train_time)\nprint(automl.best_iteration)\nprint(automl.best_loss)\nprint(automl.time_to_find_best_model)\nprint(automl.config_history)\n" ]
[ [ "sklearn.datasets.fetch_california_housing" ] ]
tanishqjha2298/Toxic-message-filtering-app
[ "bc182b5e2503d5b332e8928aa0e42cc9b58dae2d" ]
[ "flask_api_output.py" ]
[ "# Load libraries\nimport flask\nimport pandas as pd\nimport tensorflow as tf\nimport keras\nfrom keras.models import load_model\n\n# instantiate flask \napp = flask.Flask(__name__)\n\n# load the model, and pass in the custom metric function\nglobal graph\ngraph = tf.get_default_graph()\nmodel = load_model('Model_final.h5')\n\[email protected]('/apitest/<arg>')\ndef apitest(arg):\n return 'API working'+arg\n\napp.run(host='0.0.0.0', debug=False, port=5005)\n" ]
[ [ "tensorflow.get_default_graph" ] ]
mlopezarango/Python
[ "2d3d660155241113b23e4ed810e05479b2fc4bba" ]
[ "machine_learning/random_forest_regressor.py" ]
[ "# Random Forest Regressor Example\n\nfrom sklearn.datasets import load_boston\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.metrics import mean_squared_error\n\n\ndef main():\n\n \"\"\"\n Random Forest Regressor Example using sklearn function.\n Boston house price dataset is used to demonstrate the algorithm.\n \"\"\"\n\n # Load Boston house price dataset\n boston = load_boston()\n print(boston.keys())\n\n # Split dataset into train and test data\n X = boston[\"data\"] # features\n Y = boston[\"target\"]\n x_train, x_test, y_train, y_test = train_test_split(\n X, Y, test_size=0.3, random_state=1\n )\n\n # Random Forest Regressor\n rand_for = RandomForestRegressor(random_state=42, n_estimators=300)\n rand_for.fit(x_train, y_train)\n\n # Predict target for test data\n predictions = rand_for.predict(x_test)\n predictions = predictions.reshape(len(predictions), 1)\n\n # Error printing\n print(f\"Mean Absolute Error:\\t {mean_absolute_error(y_test, predictions)}\")\n print(f\"Mean Square Error :\\t {mean_squared_error(y_test, predictions)}\")\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "sklearn.metrics.mean_squared_error", "sklearn.metrics.mean_absolute_error", "sklearn.ensemble.RandomForestRegressor", "sklearn.model_selection.train_test_split", "sklearn.datasets.load_boston" ] ]
mengzaiqiao/TVBR
[ "cdac86a753c41f8f3c55a025be8d88dd305325f5" ]
[ "beta_rec/models/ngcf.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.sparse as sparse\n\nfrom beta_rec.models.torch_engine import ModelEngine\n\n\nclass NGCF(torch.nn.Module):\n \"\"\"Model initialisation, embedding generation and prediction of NGCF.\"\"\"\n\n def __init__(self, config, norm_adj):\n \"\"\"Initialize NGCF Class.\"\"\"\n super(NGCF, self).__init__()\n self.config = config\n self.n_users = config[\"n_users\"]\n self.n_items = config[\"n_items\"]\n self.emb_dim = config[\"emb_dim\"]\n self.layer_size = config[\"layer_size\"]\n self.norm_adj = norm_adj\n self.n_layers = len(self.layer_size)\n self.dropout = nn.ModuleList()\n self.GC_weights = nn.ModuleList()\n self.Bi_weights = nn.ModuleList()\n self.dropout_list = list(config[\"mess_dropout\"])\n self.layer_size = [self.emb_dim] + self.layer_size\n # Create GNN layers\n\n for i in range(self.n_layers):\n self.GC_weights.append(\n nn.Linear(self.layer_size[i], self.layer_size[i + 1])\n )\n self.Bi_weights.append(\n nn.Linear(self.layer_size[i], self.layer_size[i + 1])\n )\n self.dropout.append(nn.Dropout(self.dropout_list[i]))\n\n self.user_embedding = nn.Embedding(self.n_users, self.emb_dim)\n self.item_embedding = nn.Embedding(self.n_items, self.emb_dim)\n self.init_emb()\n\n def init_emb(self):\n \"\"\"Initialize users and itmes' embeddings.\"\"\"\n # Initialize users and items' embeddings\n nn.init.xavier_uniform_(self.user_embedding.weight)\n nn.init.xavier_uniform_(self.item_embedding.weight)\n\n def forward(self, norm_adj):\n \"\"\"Perform GNN function on users and item embeddings.\n\n Args:\n norm_adj (torch sparse tensor): the norm adjacent matrix of the user-item interaction matrix.\n Returns:\n u_g_embeddings (tensor): processed user embeddings.\n i_g_embeddings (tensor): processed item embeddings.\n \"\"\"\n ego_embeddings = torch.cat(\n (self.user_embedding.weight, self.item_embedding.weight), dim=0\n )\n all_embeddings = [ego_embeddings]\n\n norm_adj = norm_adj.to(self.device)\n for i in range(self.n_layers):\n side_embeddings = sparse.mm(norm_adj, ego_embeddings)\n sum_embeddings = F.leaky_relu(self.GC_weights[i](side_embeddings))\n bi_embeddings = torch.mul(ego_embeddings, side_embeddings)\n bi_embeddings = F.leaky_relu(self.Bi_weights[i](bi_embeddings))\n ego_embeddings = sum_embeddings + bi_embeddings\n ego_embeddings = self.dropout[i](ego_embeddings)\n\n norm_embeddings = F.normalize(ego_embeddings, p=2, dim=1)\n all_embeddings += [norm_embeddings]\n\n all_embeddings = torch.cat(all_embeddings, dim=1)\n u_g_embeddings, i_g_embeddings = torch.split(\n all_embeddings, [self.n_users, self.n_items], dim=0\n )\n\n return u_g_embeddings, i_g_embeddings\n\n def predict(self, users, items):\n \"\"\"Predict result with the model.\n\n Args:\n users (int, or list of int): user id.\n items (int, or list of int): item id.\n Return:\n scores (int): dot product.\n \"\"\"\n users_t = torch.tensor(users, dtype=torch.int64, device=self.device)\n items_t = torch.tensor(items, dtype=torch.int64, device=self.device)\n\n with torch.no_grad():\n ua_embeddings, ia_embeddings = self.forward(self.norm_adj)\n u_g_embeddings = ua_embeddings[users_t]\n i_g_embeddings = ia_embeddings[items_t]\n scores = torch.mul(u_g_embeddings, i_g_embeddings).sum(dim=1)\n return scores\n\n\nclass NGCFEngine(ModelEngine):\n \"\"\"NGCFEngine Class.\"\"\"\n\n # A class includes train an epoch and train a batch of NGCF\n\n def __init__(self, config):\n \"\"\"Initialize NGCFEngine Class.\"\"\"\n self.config = config\n self.regs = config[\"model\"][\"regs\"] # reg is the regularisation\n self.decay = self.regs[0]\n self.batch_size = config[\"model\"][\"batch_size\"]\n self.norm_adj = config[\"model\"][\"norm_adj\"]\n self.model = NGCF(config[\"model\"], self.norm_adj)\n super(NGCFEngine, self).__init__(config)\n self.model.to(self.device)\n\n def train_single_batch(self, batch_data):\n \"\"\"Train the model in a single batch.\n\n Args:\n batch_data (list): batch users, positive items and negative items.\n Return:\n loss (float): batch loss.\n \"\"\"\n assert hasattr(self, \"model\"), \"Please specify the exact model !\"\n self.optimizer.zero_grad()\n norm_adj = self.norm_adj\n ua_embeddings, ia_embeddings = self.model.forward(norm_adj)\n\n batch_users, pos_items, neg_items = batch_data\n\n u_g_embeddings = ua_embeddings[batch_users]\n pos_i_g_embeddings = ia_embeddings[pos_items]\n neg_i_g_embeddings = ia_embeddings[neg_items]\n\n batch_mf_loss, batch_emb_loss, batch_reg_loss = self.bpr_loss(\n u_g_embeddings, pos_i_g_embeddings, neg_i_g_embeddings\n )\n\n batch_loss = batch_mf_loss + batch_emb_loss + batch_reg_loss\n\n batch_loss.backward()\n self.optimizer.step()\n loss = batch_loss.item()\n return loss, batch_reg_loss\n\n def train_an_epoch(self, train_loader, epoch_id):\n \"\"\"Train the model in one epoch.\n\n Args:\n epoch_id (int): the number of epoch.\n train_loader (function): user, pos_items and neg_items generator.\n \"\"\"\n assert hasattr(self, \"model\"), \"Please specify the exact model !\"\n self.model.train()\n total_loss = 0.0\n regularizer = 0.0\n for batch_data in train_loader:\n loss, reg = self.train_single_batch(batch_data)\n total_loss += loss\n regularizer += reg\n print(f\"[Training Epoch {epoch_id}], Loss {loss}, Regularizer {regularizer}\")\n self.writer.add_scalar(\"model/loss\", total_loss, epoch_id)\n self.writer.add_scalar(\"model/regularizer\", regularizer, epoch_id)\n\n def bpr_loss(self, users, pos_items, neg_items):\n \"\"\"Bayesian Personalised Ranking (BPR) pairwise loss function.\n\n Note that the sizes of pos_scores and neg_scores should be equal.\n\n Args:\n pos_scores (tensor): Tensor containing predictions for known positive items.\n neg_scores (tensor): Tensor containing predictions for sampled negative items.\n\n Returns:\n loss.\n \"\"\"\n # Calculate BPR loss\n pos_scores = torch.sum(torch.mul(users, pos_items), dim=1)\n neg_scores = torch.sum(torch.mul(users, neg_items), dim=1)\n\n regularizer = (\n 1.0 / 2 * (users ** 2).sum()\n + 1.0 / 2 * (pos_items ** 2).sum()\n + 1.0 / 2 * (neg_items ** 2).sum()\n )\n regularizer = regularizer / self.batch_size\n\n maxi = F.logsigmoid(pos_scores - neg_scores)\n mf_loss = -torch.mean(maxi)\n\n emb_loss = self.decay * regularizer\n reg_loss = 0.0\n return mf_loss, emb_loss, reg_loss\n" ]
[ [ "torch.nn.Linear", "torch.nn.functional.normalize", "torch.cat", "torch.mul", "torch.nn.Dropout", "torch.nn.ModuleList", "torch.split", "torch.nn.functional.logsigmoid", "torch.nn.init.xavier_uniform_", "torch.no_grad", "torch.tensor", "torch.mean", "torch.nn.Embedding", "torch.sparse.mm" ] ]
Tim232/Python-Things
[ "05f0f373a4cf298e70d9668c88a6e3a9d1cd8146", "05f0f373a4cf298e70d9668c88a6e3a9d1cd8146", "05f0f373a4cf298e70d9668c88a6e3a9d1cd8146", "05f0f373a4cf298e70d9668c88a6e3a9d1cd8146" ]
[ "Lectures/DeepLearningClass/chapter5/train_neuralnet_mnist_3_layer_momentum.py", "Books/DeepLearningfromScratch/P01_HelloPython/numpy_pyplot.py", "Books/LearningTensorFlow/Chapter8_Queue_Thread_DataLoading/subchapter_02_tfrecords_read_write.py", "Books/DeepLearningfromScratch/P07_CNN/p01_convolutional_pooling_layer.py" ]
[ "# epoch - 0 , train_acc - 0.0754 , test_acc - 0.0728\n# epoch - 1 , train_acc - 0.86505 , test_acc - 0.865\n# epoch - 2 , train_acc - 0.9139 , test_acc - 0.9139\n# epoch - 3 , train_acc - 0.938466666667 , test_acc - 0.9385\n# epoch - 4 , train_acc - 0.95845 , test_acc - 0.9538\n# epoch - 5 , train_acc - 0.967166666667 , test_acc - 0.9631\n# epoch - 6 , train_acc - 0.971666666667 , test_acc - 0.9654\n# epoch - 7 , train_acc - 0.97515 , test_acc - 0.9669\n# epoch - 8 , train_acc - 0.978633333333 , test_acc - 0.9683\n# epoch - 9 , train_acc - 0.982266666667 , test_acc - 0.9711\n# epoch - 10 , train_acc - 0.984766666667 , test_acc - 0.9729\n# epoch - 11 , train_acc - 0.985766666667 , test_acc - 0.9733\n# epoch - 12 , train_acc - 0.986483333333 , test_acc - 0.9726\n# epoch - 13 , train_acc - 0.989583333333 , test_acc - 0.9761\n# epoch - 14 , train_acc - 0.991133333333 , test_acc - 0.9736\n# epoch - 15 , train_acc - 0.990016666667 , test_acc - 0.9744\n# epoch - 16 , train_acc - 0.993816666667 , test_acc - 0.9761\nimport sys, os\n\nsys.path.append(os.pardir)\n\nimport numpy as np\nfrom DeepLearningClass.dataset.mnist import load_mnist\nfrom DeepLearningClass.chapter5.two_layer_net_3_layer import TwoLayerNet\nfrom DeepLearningClass.common.optimizer import Momentum\n\n# 데이터 읽기\n(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, one_hot_label=True)\n\nnetwork = TwoLayerNet(input_size=784, hidden_size1=200, hidden_size2=200, output_size=10)\n\niters_num = 10000\ntrain_size = x_train.shape[0]\nbatch_size = 100\nlearning_rate = 0.1\n\ntrain_loss_list = []\ntrain_acc_list = []\ntest_acc_list = []\n\niter_per_epoch = max(train_size / batch_size, 1)\n\nmomentum = Momentum()\n\nfor i in range(iters_num):\n batch_mask = np.random.choice(train_size, batch_size)\n x_batch = x_train[batch_mask]\n t_batch = t_train[batch_mask]\n\n # 기울기 계산\n # grad = network.numerical_gradient(x_batch, t_batch) # 수치 미분 방식\n grad = network.gradient(x_batch, t_batch) # 오차역전파법 방식(훨씬 빠르다)\n\n # 갱신\n momentum.update(network.params, grad)\n\n loss = network.loss(x_batch, t_batch)\n train_loss_list.append(loss)\n\n if i % iter_per_epoch == 0:\n train_acc = network.accuracy(x_train, t_train)\n test_acc = network.accuracy(x_test, t_test)\n train_acc_list.append(train_acc)\n test_acc_list.append(test_acc)\n print('epoch -', int(i / iter_per_epoch), ', train_acc -', train_acc, ', test_acc -', test_acc)", "import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.image import imread\n\n# 데이터 준비\nx = np.arange(0, 6, 0.1) # 0에서 6까지 0.1 간격으로 생성\ny1 = np.sin(x)\ny2 = np.cos(x)\n\n# 그래프 그리기\nplt.plot(x, y1, label='sin')\nplt.plot(x, y2, linestyle='--', label='cos') # cos 함수는 점선으로 그리기\nplt.xlabel('x') # x축 이름\nplt.ylabel('y') # y축 이름\nplt.title('sin & cos') # 제목\nplt.legend()\n\n# 이미지 표시\nimg = imread('background.jpg')\nplt.imshow(img)\nplt.show()", "import os\nimport tensorflow as tf\nfrom tensorflow.contrib.learn.python.learn.datasets import mnist\nimport numpy as np\n\nsave_dir = './Mnist_data'\n\n# save_dir 에 데이터 내려받기\ndata_sets = mnist.read_data_sets(save_dir,\n dtype=tf.uint8,\n reshape=False,\n validation_size=1000)\n\ndata_splits = ['train', 'test', 'validation']\n\n#todo mnist dataset -> tfrecord 변환\nfor d in range(len(data_splits)):\n print('saving:' + data_splits[d])\n data_set = data_sets[d]\n print('data_set.images shape:', data_set.images.shape, ', data_set.labels shape:', data_set.labels.shape)\n\n filename = os.path.join(save_dir, 'tfrecord', data_splits[d] + '.tfrecords')\n writer = tf.python_io.TFRecordWriter(filename)\n\n for index in range(data_set.images.shape[0]):\n image = data_set.images[index].tostring()\n example = tf.train.Example(features=tf.train.Features(feature={\n 'height': tf.train.Feature(int64_list=tf.train.Int64List(value=[data_set.images.shape[1]])),\n 'width': tf.train.Feature(int64_list=tf.train.Int64List(value=[data_set.images.shape[2]])),\n 'depth': tf.train.Feature(int64_list=tf.train.Int64List(value=[data_set.images.shape[3]])),\n 'label': tf.train.Feature(int64_list=tf.train.Int64List(value=[int(data_set.labels[index])])),\n 'image_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[image]))\n }))\n\n writer.write(example.SerializeToString())\n\n writer.close()\n\n#todo tfrecord data read\nfilename = os.path.join(save_dir, 'tfrecord', 'train.tfrecords')\nrecord_iterator = tf.python_io.tf_record_iterator(filename)\nserialized_img_example = next(record_iterator)\n\nexample = tf.train.Example()\nexample.ParseFromString(serialized_img_example)\nimage = example.features.feature['image_raw'].bytes_list.value\nlabel = example.features.feature['label'].int64_list.value[0]\nwidth = example.features.feature['width'].int64_list.value[0]\nheight = example.features.feature['height'].int64_list.value[0]\n\nimg_flat = np.fromstring(image[0], dtype=np.uint8)\nimg_reshaped = img_flat.reshape((height, width, -1))\n\nprint(img_reshaped)", "import numpy as np\n\nx = np.random.rand(10, 1, 28, 28) # 높이 28, 너비 28, 채널 1, 데이터 10\nprint(x.shape)\nprint(x[0].shape) # 첫 번째 데이터\nprint(x[1].shape) # 두 번째 데이터\nprint(x[0, 0].shape) # 첫 번째 데이터의 첫 채널의 공간 데이터\n\nimport sys, os\nsys.path.append(os.pardir)\nfrom DeepLearningfromScratch.common.util import im2col\n\nx1 = np.random.rand(1, 3, 7, 7) # (데이터 수, 채널 수, 높이, 너비)\ncol1 = im2col(x1, 5, 5, stride=1, pad=0)\nprint(col1.shape)\n\nx2 = np.random.rand(10, 3, 7, 7)\ncol2 = im2col(x2, 5, 5, stride=1, pad=0)\nprint(col2.shape)\n\nclass Convolution:\n '''합성곱 계층\n ▣ __init__() method\n parameters\n ----------\n W : 필터\n b : 편향\n stride : 스트라이드\n pad : 패딩\n\n ▣ forward() method\n parameters\n ----------\n x : 입력 값\n '''\n def __init__(self, W, b, stride=1, pad=0):\n self.W = W\n self.b = b\n self.stride = stride\n self.pad = pad\n\n def forward(self, x):\n FN, C, FH, FW = self.W.shape\n N, C, H, W = x.shape\n out_h = int(1 + (H + 2*self.pad - FH) / self.stride)\n out_w = int(1 + (W + 2*self.pad - FW) / self.stride)\n\n col = im2col(x, FH, FW, self.stride, self.pad) # 입력 값 변경\n col_W = self.W.reshape(FN, -1).T # 필터 변경\n out = np.dot(col, col_W) + self.b # 입력 값과 필터간의 내적 수행\n\n out = out.reshape(N, out_h, out_w, -1).transpose(0, 3, 1, 2)\n\n return out\n\nclass Pooling:\n def __init__(self, pool_h, pool_w, stride=1, pad=0):\n self.pool_h = pool_h\n self.pool_w = pool_w\n self.stride = stride\n self.pad = pad\n\n def forward(self, x):\n N, C, H, W = x.shape\n out_h = int(1 + (H - self.pool_h) / self.stride)\n out_w = int(1 + (H - self.pool_w) / self.stride)\n\n # 전개 (1)\n col = im2col(x, self.pool_h, self.pool_w, self.stride, self.pad)\n col = col.reshape(-1, self.pool_w*self.pool_h)\n\n # 최대값 (2)\n out = np.max(col, axis=1)\n\n # 성형 (3)\n out = out.reshape(N, out_h, out_w, C).transpose(0, 3, 1, 2)\n\n return out" ]
[ [ "numpy.random.choice" ], [ "numpy.sin", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.plot", "matplotlib.pyplot.title", "matplotlib.pyplot.legend", "matplotlib.image.imread", "numpy.arange", "numpy.cos", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.show", "matplotlib.pyplot.imshow" ], [ "tensorflow.train.BytesList", "tensorflow.python_io.tf_record_iterator", "tensorflow.train.Int64List", "tensorflow.contrib.learn.python.learn.datasets.mnist.read_data_sets", "tensorflow.python_io.TFRecordWriter", "tensorflow.train.Example", "numpy.fromstring" ], [ "numpy.max", "numpy.dot", "numpy.random.rand" ] ]
astrojhgu/ares
[ "42008c8e4bf79f0b000cc833e02a86510bce7611", "42008c8e4bf79f0b000cc833e02a86510bce7611", "42008c8e4bf79f0b000cc833e02a86510bce7611" ]
[ "ares/static/Grid.py", "ares/static/VolumeGlobal.py", "ares/simulations/MetaGalacticBackground.py" ]
[ "\"\"\"\n\nGrid.py\n\nAuthor: Jordan Mirocha\nAffiliation: University of Colorado at Boulder\nCreated on: Thu Sep 20 14:18:27 2012\n\nDescription: \n\n\"\"\"\n\nimport copy, types\nimport numpy as np\nfrom ..util.Stats import rebin\nfrom collections import Iterable\nfrom ..physics.Hydrogen import Hydrogen\nfrom ..physics.Cosmology import Cosmology\nfrom ..util.ParameterFile import ParameterFile\nfrom ..physics.CrossSections import PhotoIonizationCrossSection\nfrom ..physics.Constants import k_B, cm_per_kpc, s_per_myr, m_H, mH_amu, \\\n mHe_amu\n\nclass fake_chianti:\n def __init__(self):\n pass\n\n def z2element(self, i):\n if i == 1:\n return 'h'\n elif i == 2:\n return 'he'\n\n def element2z(self, name):\n if name == 'h':\n return 1\n elif name == 'he':\n return 2 \n\n def zion2name(self, Z, i):\n if Z == 1:\n if i == 1:\n return 'h_1'\n elif i == 2:\n return 'h_2'\n elif Z == 2:\n if i == 1:\n return 'he_1'\n elif i == 2:\n return 'he_2'\n elif i == 3:\n return 'he_3' \n\n def convertName(self, species):\n element, i = species.split('_')\n \n Z = self.element2z(element)\n \n tmp = {}\n tmp['Element'] = element\n tmp['Ion'] = self.zion2name(Z, int(i))\n tmp['Z'] = self.element2z(element)\n\n return tmp\n \nutil = fake_chianti()\n\ntiny_number = 1e-8 # A relatively small species fraction\n\nclass Grid(object):\n def __init__(self, **kwargs):\n \"\"\"\n Initialize grid object.\n \n Parameters\n ----------\n dims : int\n Number of resolution elements in grid.\n length_units : float\n Size of domain in centimeters.\n start_radius : float\n Radius (in code units) within which to ignore.\n \n \"\"\"\n \n self.pf = ParameterFile(**kwargs)\n \n self.dims = int(self.pf['grid_cells'])\n self.length_units = self.pf['length_units']\n self.start_radius = self.pf['start_radius']\n self.approx_Salpha = self.pf['approx_Salpha']\n self.log_grid = self.pf['logarithmic_grid']\n\n # Compute cell centers and edges\n if self.pf['logarithmic_grid']:\n self.r_edg = self.r = \\\n np.logspace(np.log10(self.R0), np.log10(self.length_units), \n self.dims + 1) \n else:\n self.r_edg = self.r = \\\n np.linspace(self.R0, self.length_units, self.dims + 1)\n \n # Compute interior cell walls, spacing, and mid-points \n self.r_int = self.r_edg[0:-1]\n self.dr = np.diff(self.r_edg)\n self.r_mid = rebin(self.r_edg)\n \n self.zi = 0\n \n # Override, to set ICs by cosmology\n self.cosmological_ics = self.pf['cosmological_ics']\n \n @property\n def zeros_absorbers(self):\n return np.zeros(self.N_absorbers)\n \n @property\n def zeros_absorbers2(self):\n return np.zeros([self.N_absorbers] * 2) \n \n @property\n def zeros_grid_x_absorbers(self):\n return np.zeros([self.dims, self.N_absorbers])\n \n @property\n def zeros_grid_x_absorbers2(self):\n return np.zeros([self.dims, self.N_absorbers, self.N_absorbers]) \n \n @property\n def R0(self):\n \"\"\" Start radius in length_units. \"\"\"\n return self.start_radius * self.length_units\n \n @property\n def Vsh(self):\n \"\"\" Shell volume in length_units**3. \"\"\"\n if not hasattr(self, '_Vsh_all'):\n self._Vsh_all = self.ShellVolume(self.r_edg[0:-1], self.dr)\n \n return self._Vsh_all\n \n @property \n def neutrals(self):\n \"\"\" Return list of all neutral species. \"\"\" \n if not hasattr(self, '_neutral_species'):\n self._neutral_species = []\n for element in self.elements:\n self._neutral_species.append('%s_1' % element)\n\n return self._neutral_species\n \n @property \n def ions(self):\n \"\"\" Return list of all ionized species. \"\"\" \n if not hasattr(self, '_ionized_species'):\n neutrals = self.neutrals\n self._ionized_species = []\n for ion in self.all_ions:\n if ion in neutrals:\n continue\n \n self._ionized_species.append(ion)\n \n return self._ionized_species\n \n @property\n def absorbers(self): \n \"\"\" Return list of absorbers (don't include electrons). \"\"\"\n if not hasattr(self, '_absorbing_species'):\n self._absorbing_species = copy.copy(self.neutrals)\n for parent in self.ions_by_parent:\n self._absorbing_species.extend(self.ions_by_parent[parent][1:-1])\n\n return self._absorbing_species\n \n @property\n def N_absorbers(self):\n \"\"\" Return number of absorbing species. \"\"\"\n if not hasattr(self, 'self._num_of_absorbers'):\n absorbers = self.absorbers\n self._num_of_absorbers = int(len(absorbers))\n \n return self._num_of_absorbers\n \n @property\n def species_abundances(self):\n \"\"\"\n Return dictionary containing abundances of parent\n elements of all ions.\n \"\"\"\n if not hasattr(self, '_species_abundances'):\n self._species_abundances = {}\n for ion in self.ions_by_parent:\n for state in self.ions_by_parent[ion]:\n self._species_abundances[state] = \\\n self.element_abundances[self.elements.index(ion)]\n \n return self._species_abundances\n \n @property\n def species(self):\n if not hasattr(self, '_species'):\n self._species = []\n for parent in self.ions_by_parent:\n for ion in self.ions_by_parent[parent]:\n self._species.append(ion)\n \n return self._species\n \n @property\n def types(self):\n \"\"\"\n Return list (matching evolving_fields) with integers describing\n species type:\n 0 = neutral\n +1 = ion\n -1 = other\n \"\"\"\n \n if not hasattr(self, '_species_types'):\n self._species_types = []\n for species in self.evolving_fields:\n if species in self.neutrals:\n self._species_types.append(0)\n elif species in self.ions:\n self._species_types.append(1)\n else:\n self._species_types.append(-1) \n \n self._species_types = np.array(self._species_types) \n \n return self._species_types \n \n @property\n def ioniz_thresholds(self):\n \"\"\"\n Return dictionary containing ionization threshold energies (in eV)\n for all absorbers.\n \"\"\" \n \n if not hasattr(self, '_ioniz_thresholds'):\n self._ioniz_thresholds = {}\n #for absorber in self.absorbers:\n #if absorber == 'h_1':\n self._ioniz_thresholds['h_1'] = 13.6\n #elif absorber == 'he_1':\n self._ioniz_thresholds['he_1'] = 24.4\n #elif absorber == 'he_2':\n self._ioniz_thresholds['he_2'] = 54.4\n \n return self._ioniz_thresholds\n \n @property\n def bf_cross_sections(self):\n \"\"\"\n Return dictionary containing functions that compute the bound-free \n absorption cross-sections for all absorbers.\n \"\"\" \n \n if not hasattr(self, 'all_xsections'):\n self._bf_xsections = {}\n #for absorber in self.absorbers:\n #ion = cc.continuum(absorber)\n #ion.vernerCross(energy = np.logspace(1, 5, 1000))\n #if absorber == 'h_1':\n self._bf_xsections['h_1'] = lambda E: \\\n PhotoIonizationCrossSection(E, species=0)\n #elif absorber == 'he_1':\n self._bf_xsections['he_1'] = lambda E: \\\n PhotoIonizationCrossSection(E, species=1)\n #elif absorber == 'he_2':\n self._bf_xsections['he_2'] = lambda E: \\\n PhotoIonizationCrossSection(E, species=2) \n \n return self._bf_xsections\n \n @property\n def x_to_n(self):\n \"\"\"\n Return dictionary containing conversion factor between species\n fraction and number density for all species.\n \"\"\"\n if not hasattr(self, '_x_to_n_converter'):\n self._x_to_n_converter = {}\n for ion in self.all_ions:\n self._x_to_n_converter[ion] = self.n_ref \\\n * self.species_abundances[ion] \n \n return self._x_to_n_converter\n \n @property\n def expansion(self):\n if not hasattr(self, '_expansion'):\n self.set_physics()\n return self._expansion\n \n @property\n def isothermal(self):\n if not hasattr(self, '_isothermal'):\n self.set_physics()\n return self._isothermal\n \n @property\n def secondary_ionization(self):\n if not hasattr(self, '_secondary_ionization'):\n self.set_physics()\n return self._secondary_ionization\n \n @property\n def compton_scattering(self):\n if not hasattr(self, '_compton_scattering'):\n self.set_physics()\n return self._compton_scattering\n \n @property\n def recombination(self):\n if not hasattr(self, '_recombination'):\n self.set_physics()\n return self._recombination\n \n @property\n def collisional_ionization(self):\n if not hasattr(self, '_collisional_ionization'):\n self.set_physics()\n return self._collisional_ionization \n \n @property\n def clumping_factor(self):\n if not hasattr(self, '_clumping_factor'):\n self.set_physics()\n return self._clumping_factor\n \n @property\n def hydr(self):\n if not hasattr(self, '_hydr'):\n self._hydr = Hydrogen(self.cosm, **self.pf)\n return self._hydr \n \n @property\n def cosm(self):\n if not hasattr(self, '_cosm'):\n self._cosm = Cosmology()\n return self._cosm \n \n def set_properties(self, **kwargs):\n \"\"\"\n Initialize grid properties all in one go.\n \"\"\" \n\n self.set_physics(\n isothermal=kwargs['isothermal'], \n compton_scattering=kwargs['compton_scattering'],\n secondary_ionization=kwargs['secondary_ionization'], \n expansion=kwargs['expansion'],\n recombination=kwargs['recombination'],\n clumping_factor=kwargs['clumping_factor'],\n collisional_ionization=kwargs['collisional_ionization']\n )\n\n self.set_cosmology(\n initial_redshift=kwargs['initial_redshift'], \n omega_m_0=kwargs[\"omega_m_0\"], \n omega_l_0=kwargs[\"omega_l_0\"], \n omega_b_0=kwargs[\"omega_b_0\"], \n hubble_0=kwargs[\"hubble_0\"], \n helium_by_number=kwargs['helium_by_number'], \n cmb_temp_0=kwargs[\"cmb_temp_0\"],\n approx_highz=kwargs[\"approx_highz\"])\n\n self.set_chemistry(kwargs['include_He'])\n self.set_density(kwargs['density_units'])\n self.set_ionization(kwargs['initial_ionization'])\n self.set_temperature(kwargs['initial_temperature'])\n\n def set_physics(self, isothermal=False, compton_scattering=False,\n secondary_ionization=0, expansion=False, recombination='B',\n clumping_factor=1.0, collisional_ionization=True):\n self._isothermal = isothermal\n self._compton_scattering = compton_scattering\n self._secondary_ionization = secondary_ionization\n self._expansion = expansion\n self._recombination = recombination\n self._collisional_ionization = collisional_ionization\n\n if type(clumping_factor) is not types.FunctionType:\n self._clumping_factor = lambda z: clumping_factor\n else:\n self._clumping_factor = clumping_factor\n \n if self._expansion:\n self.set_cosmology()\n \n @property\n def is_cgm_patch(self): \n if not hasattr(self, '_is_cgm_patch'):\n self.set_recombination_rate()\n \n return self._is_cgm_patch\n \n def set_recombination_rate(self, is_cgm_patch=False):\n self._is_cgm_patch = is_cgm_patch \n \n def set_cosmology(self, initial_redshift=1e3, omega_m_0=0.272, \n omega_l_0=0.728, omega_b_0=0.044, hubble_0=0.702, \n helium_by_number=None, helium_by_mass=0.2454, cmb_temp_0=2.725, \n approx_highz=False):\n \n self.zi = initial_redshift\n self._cosm = Cosmology(omega_m_0=omega_m_0, \n omega_l_0=omega_l_0, omega_b_0=omega_b_0,\n hubble_0=hubble_0, \n helium_by_mass=helium_by_mass,\n cmb_temp_0=cmb_temp_0, \n approx_highz=approx_highz) \n \n def set_chemistry(self, include_He=False):\n \"\"\"\n Initialize chemistry.\n \n This routine sets the chemical composition of the medium being \n simulated.\n \n Parameters\n ----------\n include_He : bool\n Solve for helium?\n\n Example\n -------\n grid = Grid(dims=32)\n grid.set_chemistry() # H-only\n \n \"\"\" \n \n self.Z = [1] \n self.abundances = [1.]\n if include_He:\n self.Z.append(2)\n self.abundances.append(self.cosm.helium_by_number)\n \n self.Z = np.array(self.Z)\n self.ions_by_parent = {} # Ions sorted by parent element in dictionary\n self.parents_by_ion = {} # From ion name, determine parent element\n self.elements = [] # Just a list of element names\n self.all_ions = [] # All ion species \n self.evolving_fields = []# Anything with an ODE we'll later solve\n \n for i, element in enumerate(self.Z):\n element_name = util.z2element(element)\n \n self.ions_by_parent[element_name] = []\n self.elements.append(element_name)\n for ion in xrange(element + 1):\n name = util.zion2name(element, ion + 1)\n self.all_ions.append(name)\n self.ions_by_parent[element_name].append(name)\n self.parents_by_ion[name] = element_name\n self.evolving_fields.append(name)\n\n self.solve_ge = False \n self.evolving_fields.append('e')\n if not self.isothermal:\n self.evolving_fields.append('Tk')\n\n # Create blank data fields \n if not hasattr(self, 'data'): \n self.data = {}\n for field in self.evolving_fields:\n self.data[field] = np.zeros(self.dims)\n \n self.abundances_by_number = self.abundances\n self.element_abundances = [1.0]\n if include_He:\n self.element_abundances.append(self.cosm.helium_by_number)\n \n # Initialize mapping between q-vector and physical quantities (dengo) \n self._set_qmap()\n\n def set_density(self, nH=None):\n \"\"\"\n Initialize hydrogen number density.\n \n Setting the gas density is necessary for computing the hydrogen \n number density, which normalizes fractional abundances of elements\n to proper number densities of all species.\n\n Parameters\n ----------\n rho0 : float, array\n Density of medium in g / cm**3. Can be a float (uniform medium),\n or an array of values the same size as the grid itself.\n \n \"\"\"\n \n if self.cosmological_ics:\n self.n_H = self.cosm.nH(self.zi) \n elif isinstance(nH, Iterable): \n self.n_H = nH\n else:\n self.n_H = nH * np.ones(self.dims) \n \n if 2 in self.Z:\n self.n_He = self.n_H * self.abundances[1]\n else:\n self.n_He = 0.0 \n \n self.n_ref = self.n_H \n \n self.data['rho'] = m_H * (self.n_H * mH_amu + self.n_He * mHe_amu)\n \n def set_temperature(self, T0):\n \"\"\"\n Set initial temperature in grid. \n \n Parameters\n ----------\n T0 : float, array\n Initial temperature in grid. Can be constant value (corresponding\n to uniform medium), or an array of values like the grid.\n \"\"\"\n \n if self.cosmological_ics:\n Tgas = self.cosm.Tgas(self.zi)\n if isinstance(T0, Iterable):\n self.data['Tk'] = np.array(Tgas)\n else:\n self.data['Tk'] = Tgas * np.ones(self.dims)\n elif isinstance(T0, Iterable):\n self.data['Tk'] = np.array(T0)\n else:\n self.data['Tk'] = T0 * np.ones(self.dims)\n \n def set_ionization(self, x=None):\n \"\"\"\n Set initial ionization state. \n \n Parameters\n ----------\n x : float, list\n Initial ionization state for all species. Must be a 1:1 mapping\n between values in this list and values in self.species.\n \n \"\"\" \n \n if x is not None:\n\n for j, species in enumerate(self.species):\n element, state = species.split('_')\n Z = util.element2z(element)\n i = int(state)\n \n name = util.zion2name(Z, i)\n self.data[name].fill(x[j])\n \n # Otherwise assume neutral\n else:\n for sp in self.ions:\n self.data[sp].fill(1e-8)\n for sp in self.neutrals:\n self.data[sp].fill(1.0 - 1e-8)\n \n # Set electron density\n self._set_electron_fraction()\n \n if self.solve_ge:\n self.set_gas_energy()\n \n def set_ics(self, data):\n \"\"\"\n Simple way of setting all initial conditions at once with a data \n dictionary.\n \"\"\"\n \n self.data = {}\n for key in data.keys():\n if type(data[key]) is float:\n self.data[key] = data[key]\n continue\n \n self.data[key] = data[key].copy()\n \n def create_slab(self, **kwargs):\n \"\"\" Create a slab. \"\"\"\n \n if not kwargs['slab']:\n return \n \n # Figure out where the clump is\n gridarr = np.linspace(0, 1, self.dims)\n isslab = (gridarr >= (kwargs['slab_position'] - kwargs['slab_radius'])) \\\n & (gridarr <= (kwargs['slab_position'] + kwargs['slab_radius']))\n \n # First, modify density and temperature\n if kwargs['slab_profile'] == 0:\n self.data['rho'][isslab] *= kwargs['slab_overdensity']\n self.n_H[isslab] *= kwargs['slab_overdensity']\n self.data['Tk'][isslab] = kwargs['slab_temperature']\n else:\n raise NotImplemented('only know uniform slabs')\n \n # Ionization state - could generalize this more\n for j, species in enumerate(self.species):\n element, state = species.split('_')\n Z = util.element2z(element)\n i = int(state)\n \n name = util.zion2name(Z, i)\n self.data[name][isslab] = np.ones(isslab.sum()) \\\n * kwargs['slab_ionization'][j]\n \n # Reset electron density, particle density, and gas energy\n self._set_electron_fraction()\n \n if hasattr(self, '_x_to_n_converter'): \n del self._x_to_n_converter\n \n def _set_electron_fraction(self):\n \"\"\"\n Set electron density - must have run set_density beforehand.\n \"\"\"\n \n self.data['e'] = np.zeros(self.dims)\n for i, Z in enumerate(self.Z):\n for j in np.arange(1, 1 + Z): # j = number of electrons donated by ion j + 1\n x_i_jp1 = self.data[util.zion2name(Z, j + 1)]\n self.data['e'] += j * x_i_jp1 * self.n_ref \\\n * self.element_abundances[i] \n \n self.data['e'] /= self.n_H \n \n def particle_density(self, data, z=0):\n \"\"\"\n Compute total particle number density.\n \"\"\" \n \n n = data['e'].copy()\n #for ion in self.all_ions:\n # n += data[ion] * self.x_to_n[ion] * (1. + z)**3 \\\n # / (1. + self.zi)**3\n \n if self.expansion:\n n *= self.cosm.nH(z)\n n += self.cosm.nH(z)\n \n if 2 in self.Z:\n n += self.cosm.nHe(z)\n \n else:\n n *= self.n_H\n \n n += self.n_H\n \n if 2 in self.Z:\n n += self.n_H * self.cosm.helium_by_number\n \n return n \n \n def electron_fraction(self, data, z):\n de = np.zeros(self.dims)\n for i, Z in enumerate(self.Z):\n for j in np.arange(1, 1 + Z): # j = number of electrons donated by ion j + 1\n x_i_jp1 = data[util.zion2name(Z, j + 1)]\n de += j * x_i_jp1 * self.n_ref * (1. + z)**3 / (1. + self.zi)**3 \\\n * self.element_abundances[i]\n\n return de / self.n_H\n\n def ColumnDensity(self, data):\n \"\"\" Compute column densities for all absorbing species. \"\"\" \n \n N = {}\n Nc = {}\n logN = {}\n for absorber in self.absorbers:\n Nc[absorber] = self.dr * data[absorber] * self.x_to_n[absorber] \n N[absorber] = np.cumsum(Nc[absorber])\n logN[absorber] = np.log10(N[absorber])\n \n return N, logN, Nc\n\n def _set_qmap(self):\n \"\"\"\n The vector 'q' is an array containing the values of all ion fractions and the\n gas energy. This routine sets up the mapping between elements in q and the\n corrresponding physical quantities.\n \n Will be in order of increasing Z, then de, then ge.\n \"\"\"\n \n self.qmap = []\n for species in self.evolving_fields:\n self.qmap.append(species)\n \n def ShellVolume(self, r, dr):\n \"\"\"\n Return volume of shell at distance r, thickness dr.\n \"\"\"\n \n return 4. * np.pi * ((r + dr)**3 - r**3) / 3. \n\n \n\n ", "\"\"\"\n\nIntergalacticMedium.py\n\nAuthor: Jordan Mirocha\nAffiliation: University of Colorado at Boulder\nCreated on: Fri May 24 11:31:06 2013\n\nDescription: \n\n\"\"\"\n\nimport numpy as np\nfrom ..util.Warnings import *\nfrom ..util import ProgressBar\nfrom ..physics.Constants import *\nimport types, os, re, sys, pickle\nfrom ..util.Misc import num_freq_bins\nfrom ..physics import SecondaryElectrons\nfrom scipy.integrate import dblquad, romb, simps, quad, trapz\nfrom ..util.Warnings import tau_tab_z_mismatch, tau_tab_E_mismatch\n\ntry:\n import h5py\n have_h5py = True \nexcept ImportError:\n have_h5py = False\n\ntry:\n from mpi4py import MPI\n rank = MPI.COMM_WORLD.rank\n size = MPI.COMM_WORLD.size\nexcept ImportError:\n rank = 0\n size = 1\n\nlog10 = np.log(10.)\nE_th = np.array([13.6, 24.4, 54.4])\n\ndefkwargs = \\\n{\n 'zf':None, \n 'xray_flux':None, \n 'epsilon_X': None,\n 'Gamma': None,\n 'gamma': None,\n 'return_rc': False, \n 'energy_units':False, \n 'Emax': None,\n #'zxavg':0.0,\n #'igm':True,\n 'xavg': 0.0,\n 'igm_h_1': 1.0,\n 'igm_h_2': 0.0,\n 'igm_he_2': 0.0,\n 'igm_he_3': 0.0,\n 'cgm_h_1': 1.0,\n 'cgm_h_2': 0.0,\n 'cgm_he_2': 0.0,\n 'cgm_he_3': 0.0,\n 'igm_e': 0.0,\n}\n\nspecies_i_to_str = {0:'h_1', 1:'he_1', 2:'he_2'}\n\nclass GlobalVolume(object):\n def __init__(self, background):\n \"\"\"\n Initialize a GlobalVolume.\n \n Parameters\n ----------\n background : ares.solvers.UniformBackground instance.\n \n \"\"\"\n\n self.background = background\n self.pf = background.pf\n self.grid = background.grid\n self.cosm = background.cosm\n self.hydr = background.hydr\n self.pops = background.pops\n self.Npops = len(self.pops)\n \n # Include helium opacities approximately?\n self.approx_He = self.pf['include_He'] and self.pf['approx_He']\n \n # Include helium opacities self-consistently?\n self.self_consistent_He = self.pf['include_He'] \\\n and (not self.pf['approx_He'])\n\n self.esec = \\\n SecondaryElectrons(method=self.pf[\"secondary_ionization\"]) \n\n # Choose function for computing bound-free absorption cross-sections \n if self.pf['approx_sigma']:\n from ..physics.CrossSections import \\\n ApproximatePhotoIonizationCrossSection as sigma\n else:\n from ..physics.CrossSections import \\\n PhotoIonizationCrossSection as sigma\n\n self.sigma = sigma\n self.sigma0 = sigma(E_th[0]) # Hydrogen ionization threshold\n\n self._set_integrator()\n\n @property\n def rates_no_RT(self):\n if not hasattr(self, '_rates_no_RT'):\n self._rates_no_RT = \\\n {'k_ion': np.zeros((self.grid.dims,\n self.grid.N_absorbers)),\n 'k_heat': np.zeros((self.grid.dims,\n self.grid.N_absorbers)),\n 'k_ion2': np.zeros((self.grid.dims,\n self.grid.N_absorbers, self.grid.N_absorbers)),\n }\n\n return self._rates_no_RT\n\n #def _fetch_tau(self, pop, zpf, Epf):\n # \"\"\"\n # Look for optical depth tables. Supply corrected energy and redshift\n # arrays if there is a mistmatch between those generated from information\n # in the parameter file and those found in the optical depth table.\n # \n # .. note:: This will only be called from UniformBackground, and on\n # populations which are using the generator framework.\n # \n # Parameters\n # ----------\n # popid : int\n # ID # for population of interest.\n # zpf : np.ndarray\n # What the redshifts should be according to the parameter file. \n # Epf : np.ndarray\n # What the energies should be according to the parameter file.\n # \n # Returns\n # -------\n # Energies and redshifts, potentially revised from Epf and zpf.\n # \n # \"\"\"\n # \n # for i in range(self.Npops):\n # if pop == self.pops[i]:\n # band = self.background.bands_by_pop[i]\n # break\n # \n # # First, look in CWD or $ARES (if it exists)\n # self.tabname = self._load_tau(pop, pop.pf['tau_prefix'])\n # \n # if not self.tabname:\n # return zpf, Epf, None\n # \n # # If we made it this far, we found a table that may be suitable\n # ztab, Etab, tau = self._read_tau(self.tabname)\n # \n # # Return right away if there's no potential for conflict\n # if (zpf is None) and (Epf is None):\n # return ztab, Etab, tau\n # \n # # Figure out if the tables need fixing \n # zmax_ok = \\\n # (ztab.max() >= zpf.max()) or \\\n # np.allclose(ztab.max(), zpf.max())\n # zmin_ok = \\\n # (ztab.min() <= zpf.min()) or \\\n # np.allclose(ztab.min(), zpf.min())\n # \n # Emin_ok = \\\n # (Etab.min() <= Epf.min()) or \\\n # np.allclose(Etab.min(), Epf.min())\n # \n # # Results insensitive to Emax (so long as its relatively large)\n # # so be lenient with this condition (100 eV or 1% difference\n # # between parameter file and lookup table)\n # Emax_ok = np.allclose(Etab.max(), Epf.max(), atol=100., rtol=1e-2)\n # \n # # Check redshift bounds\n # if not (zmax_ok and zmin_ok):\n # if not zmax_ok:\n # tau_tab_z_mismatch(self, zmin_ok, zmax_ok, ztab)\n # sys.exit(1)\n # else:\n # if self.pf['verbose']:\n # tau_tab_z_mismatch(self, zmin_ok, zmax_ok, ztab)\n # \n # if not (Emax_ok and Emin_ok):\n # if self.pf['verbose']:\n # tau_tab_E_mismatch(pop, self.tabname, Emin_ok, Emax_ok, Etab)\n # \n # if Etab.max() < Epf.max():\n # sys.exit(1)\n # \n # # Correct for inconsistencies between parameter file and table\n # # By effectively masking out those elements with tau -> inf\n # if Epf.min() > Etab.min():\n # Ediff = Etab - Epf.min()\n # i_E0 = np.argmin(np.abs(Ediff))\n # if Ediff[i_E0] < 0:\n # i_E0 += 1\n # \n # #tau[:,0:i_E0+1] = np.inf\n # else:\n # i_E0 = 0\n # \n # if Epf.max() < Etab.max():\n # Ediff = Etab - Epf.max()\n # i_E1 = np.argmin(np.abs(Ediff))\n # if Ediff[i_E1] < 0:\n # i_E1 += 1\n # \n # #tau[:,i_E1+1:] = np.inf\n # else:\n # i_E1 = None\n # \n # # We're done!\n # return ztab, Etab[i_E0:i_E1], tau[:,i_E0:i_E1]\n\n @property\n def E(self):\n if not hasattr(self, '_E'):\n self._tabulate_atomic_data()\n \n return self._E\n\n @property\n def sigma_E(self):\n if not hasattr(self, '_sigma_E'):\n self._tabulate_atomic_data()\n \n return self._sigma_E\n \n def _tabulate_atomic_data(self):\n \"\"\"\n Pre-compute cross sections and such for each source population.\n \n Returns\n -------\n Nothing. Sets the following attributes:\n \n sigma_E\n log_sigma_E\n fheat, flya, fion\n \n \"\"\"\n\n # Remember: these will all be [Npops, Nbands/pop, Nenergies/band]\n self._E = self.background.energies\n self.logE = [[] for k in range(self.Npops)]\n self.dlogE = [[] for k in range(self.Npops)]\n self.fheat = [[] for k in range(self.Npops)]\n self.flya = [[] for k in range(self.Npops)]\n \n # These are species dependent\n self._sigma_E = {}\n self.fion = {}\n for species in ['h_1', 'he_1', 'he_2']:\n self._sigma_E[species] = [[] for k in range(self.Npops)]\n self.fion[species] = [[] for k in range(self.Npops)]\n \n ##\n # Note: If secondary_ionization > 1, there will be an ionized fraction\n # dimension in fion and fheat.\n ## \n \n # Loop over populations\n for i, pop in enumerate(self.pops):\n \n # This means the population is completely approximate\n if not np.any(self.background.solve_rte[i]):\n self.logE[i] = [None]\n self.dlogE[i] = [None]\n self.fheat[i] = [None]\n self.flya[i] = [None]\n \n for species in ['h_1', 'he_1', 'he_2']:\n self.fion[species][i] = [None]\n self._sigma_E[species][i] = [None]\n \n continue\n \n ##\n # If we make it here, the population has at least one band that\n # requires a detailed solution to the RTE \n ##\n \n Nbands = len(self.background.energies[i])\n \n self.logE[i] = [None for k in range(Nbands)]\n self.dlogE[i] = [None for k in range(Nbands)]\n self.fheat[i] = [None for k in range(Nbands)]\n self.flya[i] = [None for k in range(Nbands)]\n for species in ['h_1', 'he_1', 'he_2']:\n self.fion[species][i] = [None for k in range(Nbands)]\n self._sigma_E[species][i] = [None for k in range(Nbands)]\n\n # Loop over each band for this population\n for j, band in enumerate(self.background.bands_by_pop[i]):\n\n if band is None:\n continue\n \n need_tab = self.pops[i].is_xray_src \\\n and np.any(np.array(band) > E_LL)\n \n if (not self.background.solve_rte[i][j]) or \\\n (not need_tab):\n continue\n else: \n self.fheat[i][j] = \\\n [np.ones([self.background.energies[i][j].size, \n len(self.esec.x)]) \\\n for j in range(Nbands)]\n self.flya[i] = \\\n [np.ones([self.background.energies[i][j].size, \n len(self.esec.x)]) \\\n for j in range(Nbands)]\n \n for species in ['h_1', 'he_1', 'he_2']:\n if self.esec.method > 1:\n self._sigma_E[species][i] = \\\n [np.ones([self.background.energies[i][j].size, \n len(self.esec.x)]) \\\n for j in range(Nbands)]\n self.fion[species][i] = \\\n [np.ones([self.background.energies[i][j].size, \n len(self.esec.x)]) \\\n for j in range(Nbands)]\n\n else:\n self._sigma_E[species][i] = [None for k in range(Nbands)]\n self.fion[species][i] = [None for k in range(Nbands)]\n self.fheat[i] = [None for k in range(Nbands)]\n self.flya[i] = [None for k in range(Nbands)] \n \n # More convenient variables\n E = self._E[i][j]\n N = E.size\n\n # Compute some things we need, like bound-free cross-section\n self.logE[i][j] = np.log10(E)\n self.dlogE[i][j] = np.diff(self.logE[i][j])\n \n # \n for k, species in enumerate(['h_1', 'he_1', 'he_2']):\n self._sigma_E[species][i][j] = \\\n np.array(map(lambda E: self.sigma(E, k), E))\n\n # Pre-compute secondary ionization and heating factors\n if self.esec.method > 1:\n \n # Don't worry: we'll fill these in in a sec!\n self.fheat[i][j] = np.ones([N, len(self.esec.x)])\n self.flya[i][j] = np.ones([N, len(self.esec.x)])\n \n # Must evaluate at ELECTRON energy, not photon energy\n for k, nrg in enumerate(E - E_th[0]):\n self.fheat[i][j][k] = \\\n self.esec.DepositionFraction(self.esec.x, E=nrg, \n channel='heat')\n self.fion['h_1'][i][j][k] = \\\n self.esec.DepositionFraction(self.esec.x, E=nrg, \n channel='h_1')\n \n if self.pf['secondary_lya']:\n self.flya[i][j][k] = \\\n self.esec.DepositionFraction(self.esec.x, E=nrg, \n channel='lya') \n \n # Helium\n if self.pf['include_He'] and not self.pf['approx_He']:\n \n # Don't worry: we'll fill these in in a sec!\n self.fion['he_1'][i][j] = np.ones([N, len(self.esec.x)])\n self.fion['he_2'][i][j] = np.ones([N, len(self.esec.x)])\n \n for k, nrg in enumerate(E - E_th[1]):\n self.fion['he_1'][i][j][k] = \\\n self.esec.DepositionFraction(self.esec.x, \n E=nrg, channel='he_1')\n \n for k, nrg in enumerate(E - E_th[2]):\n self.fion['he_2'][i][j][k] = \\\n self.esec.DepositionFraction(self.esec.x, \n E=nrg, channel='he_2') \n \n else:\n self.fion['he_1'][i][j] = np.zeros([N, len(self.esec.x)])\n self.fion['he_2'][i][j] = np.zeros([N, len(self.esec.x)])\n \n \n \n \n \n return \n \n def _set_integrator(self):\n self.integrator = self.pf[\"unsampled_integrator\"]\n self.sampled_integrator = self.pf[\"sampled_integrator\"]\n self.rtol = self.pf[\"integrator_rtol\"]\n self.atol = self.pf[\"integrator_atol\"]\n self.divmax = int(self.pf[\"integrator_divmax\"])\n \n #def _read_tau(self, fn):\n # \"\"\" Read optical depth table. \"\"\"\n # \n # if type(fn) is dict:\n # \n # E0 = fn['E'].min()\n # E1 = fn['E'].max()\n # E = fn['E']\n # z = fn['z']\n # x = z + 1\n # N = E.size\n # \n # R = x[1] / self.x[0]\n # \n # tau = fn['tau']\n #\n # elif re.search('hdf5', fn):\n #\n # f = h5py.File(self.tabname, 'r')\n #\n # E0 = min(f['photon_energy'].value)\n # E1 = max(f['photon_energy'].value)\n # E = f['photon_energy'].value\n # z = f['redshift'].value\n # x = z + 1\n # N = E.size\n # \n # R = x[1] / x[0]\n # \n # tau = f['tau'].value\n # f.close()\n #\n # elif re.search('npz', fn) or re.search('pkl', fn): \n #\n # if re.search('pkl', fn):\n # f = open(fn, 'rb')\n # data = pickle.load(f)\n # else:\n # f = open(fn, 'r')\n # data = dict(np.load(f))\n # \n # E0 = data['E'].min()\n # E1 = data['E'].max() \n # E = data['E']\n # z = data['z']\n # x = z + 1\n # N = E.size\n # \n # R = x[1] / x[0]\n # \n # tau = tau = data['tau']\n # f.close()\n # else:\n # raise NotImplemented('Don\\'t know how to read %s.' % fn)\n #\n # return z, E, tau\n \n #def _tau_name(self, pop, suffix='hdf5'):\n # \"\"\"\n # Return name of table based on its properties.\n # \"\"\"\n #\n # if not have_h5py:\n # suffix == 'pkl'\n #\n # HorHe = 'He' if self.pf['include_He'] else 'H'\n #\n # zf = self.pf['final_redshift']\n # zi = self.pf['initial_redshift']\n #\n # L, N = self._tau_shape(pop)\n #\n # E0 = pop.pf['pop_Emin']\n # E1 = pop.pf['pop_Emax']\n #\n # fn = lambda z1, z2, E1, E2: \\\n # 'optical_depth_%s_%ix%i_z_%i-%i_logE_%.2g-%.2g.%s' \\\n # % (HorHe, L, N, z1, z2, E1, E2, suffix)\n #\n # return fn(zf, zi, np.log10(E0), np.log10(E1)), fn\n \n #def _load_tau(self, pop, prefix=None):\n # \"\"\"\n # Find an optical depth table.\n # \"\"\"\n # \n # fn, fn_func = self._tau_name(pop)\n #\n # if prefix is None:\n # ares_dir = os.environ.get('ARES')\n # if not ares_dir:\n # print \"No ARES environment variable.\"\n # return None\n # \n # input_dirs = [os.path.join(ares_dir,'input','optical_depth')]\n #\n # else:\n # if type(prefix) is str:\n # input_dirs = [prefix]\n # else:\n # input_dirs = prefix\n #\n # guess = os.path.join(input_dirs[0], fn)\n # if os.path.exists(guess):\n # return guess\n #\n # ## Find exactly what table should be\n # zmin, zmax, Nz, lEmin, lEmax, chem, pre, post = self._parse_tab(fn)\n #\n # ok_matches = []\n # perfect_matches = []\n # \n # # Loop through input directories\n # for input_dir in input_dirs:\n # \n # # Loop over files in input_dir, look for best match\n # for fn1 in os.listdir(input_dir):\n # \n # if re.search('hdf5', fn1) and (not have_h5py):\n # continue\n #\n # tab_name = os.path.join(input_dir, fn1)\n # \n # try:\n # zmin_f, zmax_f, Nz_f, lEmin_f, lEmax_f, chem_f, p1, p2 = \\\n # self._parse_tab(fn1)\n # except:\n # continue\n #\n # # Dealbreakers\n # if Nz_f != Nz:\n # continue\n # if zmax_f < zmax:\n # continue\n # if chem_f != chem:\n # continue\n #\n # # Continue with possible matches\n # for fmt in ['pkl', 'npz', 'hdf5']:\n #\n # if fn1 == fn and fmt == self.pf['preferred_format']:\n # perfect_matches.append(tab_name)\n # continue\n #\n # if c and fmt == self.pf['preferred_format']:\n # perfect_matches.append(tab_name)\n # continue\n #\n # # If number of redshift bins and energy range right...\n # if re.search(pre, fn1) and re.search(post, fn1):\n # if re.search(fmt, fn1) and fmt == self.pf['preferred_format']:\n # perfect_matches.append(tab_name)\n # else:\n # ok_matches.append(tab_name)\n # \n # # If number of redshift bins is right...\n # elif re.search(pre, fn1):\n # \n # if re.search(fmt, fn1) and fmt == self.pf['preferred_format']:\n # perfect_matches.append(tab_name)\n # else:\n # ok_matches.append(tab_name)\n # \n # if perfect_matches:\n # return perfect_matches[0]\n # elif ok_matches:\n # return ok_matches[0]\n # else:\n # return None\n \n #def _parse_tab(self, fn):\n # \n # tmp1, tmp2 = fn.split('_z_')\n # pre = tmp1[0:tmp1.rfind('x')]\n # red, tmp3 = fn.split('_logE_')\n # post = '_logE_' + tmp3.replace('.hdf5', '')\n # \n # # Find exactly what table should be\n # zmin, zmax = map(float, red[red.rfind('z')+2:].partition('-')[0::2])\n # logEmin, logEmax = map(float, tmp3[tmp3.rfind('E')+1:tmp3.rfind('.')].partition('-')[0::2])\n # \n # Nz = pre[pre.rfind('_')+1:]\n # \n # # Hack off Nz string and optical_depth_\n # chem = pre.strip(Nz)[14:-1]#.strip('optical_depth_')\n # \n # return zmin, zmax, int(Nz), logEmin, logEmax, chem, pre, post\n # \n #def _tau_shape(self, pop):\n # \"\"\"\n # Determine dimensions of optical depth table.\n # \n # Unfortunately, this is a bit redundant with the procedure in\n # self._init_xrb, but that's the way it goes.\n # \"\"\"\n # \n # # Set up log-grid in parameter x = 1 + z\n # x = np.logspace(np.log10(1+self.pf['final_redshift']),\n # np.log10(1+self.pf['initial_redshift']),\n # int(pop.pf['pop_tau_Nz']))\n # z = x - 1.\n # logx = np.log10(x)\n # logz = np.log10(z)\n #\n # # Constant ratio between elements in x-grid\n # R = x[1] / x[0]\n # logR = np.log10(R)\n # \n # E0 = pop.pf['pop_Emin']\n # \n # # Create mapping to frequency space\n # E = 1. * E0\n # n = 1\n # while E < pop.pf['pop_Emax']:\n # E = E0 * R**(n - 1)\n # n += 1 \n # \n # # Set attributes for dimensions of optical depth grid\n # L = len(x)\n # \n # # Frequency grid must be index 1-based.\n # N = num_freq_bins(L, zi=self.pf['initial_redshift'], \n # zf=self.pf['final_redshift'], Emin=E0, \n # Emax=pop.pf['pop_Emax'])\n # N -= 1\n # \n # return L, N\n \n def RestFrameEnergy(self, z, E, zp):\n \"\"\"\n Return energy of a photon observed at (z, E) and emitted at zp.\n \"\"\"\n \n return E * (1. + zp) / (1. + z)\n \n def ObserverFrameEnergy(self, z, Ep, zp):\n \"\"\"\n What is the energy of a photon observed at redshift z and emitted \n at redshift zp and energy Ep?\n \"\"\"\n\n return Ep * (1. + z) / (1. + zp)\n\n def Jc(self, z, E):\n \"\"\"\n Flux corresponding to one photon per hydrogen atom at redshift z.\n \"\"\"\n\n return c * self.cosm.nH0 * (1. + z)**3 / 4. / np.pi \\\n / (E * erg_per_ev / h)\n\n def rate_to_coefficient(self, z, species=0, zone='igm', **kw):\n \"\"\"\n Convert an ionization/heating rate to a rate coefficient.\n \n Provides units of per atom.\n \"\"\"\n\n if self.pf['photon_counting']:\n prefix = zone\n else:\n prefix = 'igm'\n \n if species == 0: \n weight = 1. / self.cosm.nH(z) / kw['%s_h_1' % prefix]\n elif species == 1:\n weight = 1. / self.cosm.nHe(z) / kw['%s_he_1' % prefix]\n elif species == 2:\n weight = 1. / self.cosm.nHe(z) / kw['%s_he_2' % prefix]\n\n return weight\n\n def coefficient_to_rate(self, z, species=0, **kw):\n return 1. / self.rate_to_coefficient(z, species, **kw)\n\n def _fix_kwargs(self, functionify=False, popid=0, band=0, **kwargs):\n\n kw = defkwargs.copy()\n kw.update(kwargs)\n\n pop = self.pops[popid]\n\n if functionify and type(kw['xavg']) is not types.FunctionType:\n tmp = kw['xavg']\n kw['xavg'] = lambda z: tmp\n\n if kw['zf'] is None and pop is not None:\n kw['zf'] = pop.zform\n \n if not self.background.solve_rte[popid][band]:\n pass\n elif (kw['Emax'] is None) and self.background.solve_rte[popid][band] and \\\n np.any(self.background.bands_by_pop[popid] > pop.pf['pop_EminX']):\n \n kw['Emax'] = self.background.energies[popid][band][-1]\n \n return kw\n \n def HeatingRate(self, z, species=0, popid=0, band=0, **kwargs):\n \"\"\"\n Compute heating rate density due to emission from this population. \n \n Parameters\n ----------\n z : int, float\n Redshift of interest.\n species : int\n Atom whose liberated electrons cause heating.\n Can be 0, 1, or 2 (HI, HeI, and HeII, respectively)\n \n ===============\n relevant kwargs\n ===============\n xray_flux : np.ndarray\n Array of fluxes corresponding to photon energies in self.igm.E.\n return_rc : bool\n Return actual heating rate, or rate coefficient for heating?\n Former has units of erg s**-1 cm**-3, latter has units of \n erg s**-1 cm**-3 atom**-1. \n \n Returns\n -------\n Proper heating rate density in units of in erg s**-1 cm**-3 at redshift z,\n due to electrons previously bound to input species.\n\n \"\"\"\n \n pop = self.pops[popid]\n \n if not pop.pf['pop_heat_src_igm'] or (z >= pop.zform):\n return 0.0 \n \n if pop.pf['pop_heat_rate'] is not None:\n return pop.HeatingRate(z)\n \n # Grab defaults, do some patches if need be \n kw = self._fix_kwargs(**kwargs)\n \n species_str = species_i_to_str[species]\n\n if pop.pf['pop_k_heat_igm'] is not None:\n return pop.pf['pop_k_heat_igm'](z)\n \n if band is not None:\n solve_rte = self.background.solve_rte[popid][band]\n else:\n solve_rte = False \n \n # Compute fraction of photo-electron energy deposited as heat\n if pop.pf['pop_fXh'] is None:\n \n # Interpolate in energy and ionized fraction\n if (self.esec.method > 1) and solve_rte:\n if kw['igm_e'] <= self.esec.x[0]:\n fheat = self.fheat[popid][band][:,0]\n else:\n i_x = np.argmin(np.abs(kw['igm_e'] - self.esec.x))\n if self.esec.x[i_x] > kw['igm_e']:\n i_x -= 1\n \n j = i_x + 1 \n \n fheat = self.fheat[popid][band][:,i_x] \\\n + (self.fheat[popid][band][:,j] - self.fheat[popid][band][:,i_x]) \\\n * (kw['igm_e'] - self.esec.x[i_x]) \\\n / (self.esec.x[j] - self.esec.x[i_x]) \n elif self.esec.method > 1:\n raise ValueError('Only know how to do advanced secondary ionization with solve_rte=True')\n else:\n fheat = self.esec.DepositionFraction(kw['igm_e'])[0]\n\n else:\n fheat = pop.pf['pop_fXh']\n \n # Assume heating rate density at redshift z is only due to emission\n # from sources at redshift z\n if not solve_rte:\n weight = self.rate_to_coefficient(z, species, **kw)\n \n Lx = pop.LuminosityDensity(z, Emin=pop.pf['pop_Emin_xray'], \n Emax=pop.pf['pop_Emax'])\n \n return weight * fheat * Lx * (1. + z)**3\n \n ##\n # Otherwise, do the full calculation\n ##\n \n # Re-normalize to help integrator\n norm = J21_num * self.sigma0\n \n # Computes excess photo-electron energy due to ionizations by\n # photons with energy E (normalized by sigma0 * Jhat)\n if kw['fluxes'][popid] is None:\n\n # If we're approximating helium, must add contributions now\n # since we'll never explicitly call this method w/ species=1.\n if self.approx_He:\n integrand = lambda E, zz: \\\n self.rb.AngleAveragedFluxSlice(z, E, zz, xavg=kw['xavg']) \\\n * (self.sigma(E) * (E - E_th[0]) \\\n + self.cosm.y * self.sigma(E, species=1) * (E - E_th[1])) \\\n * fheat / norm / ev_per_hz\n \n # Otherwise, just heating via hydrogen photo-electrons\n else:\n integrand = lambda E, zz: \\\n self.rb.AngleAveragedFluxSlice(z, E, zz, xavg=kw['xavg'], \n zxavg=kw['zxavg']) * self.sigma(E, species=1) \\\n * (E - E_th[species]) * fheat / norm / ev_per_hz\n \n # This means the fluxes have been computed already - integrate\n # over discrete set of points\n else:\n \n integrand = self.sigma_E[species_str][popid][band] \\\n * (self._E[popid][band] - E_th[species])\n\n if self.approx_He:\n integrand += self.cosm.y * self.sigma_E['he_1'][popid][band] \\\n * (self._E[popid][band] - E_th[1])\n \n integrand *= kw['fluxes'][popid][band] * fheat / norm / ev_per_hz\n \n # Compute integral over energy\n if type(integrand) == types.FunctionType:\n heat, err = dblquad(integrand, z, kw['zf'], lambda a: self.E0, \n lambda b: kw['Emax'], epsrel=self.rtol, epsabs=self.atol)\n else:\n if kw['Emax'] is not None:\n imax = np.argmin(np.abs(self._E[popid][band] - kw['Emax']))\n if imax == 0:\n return 0.0\n elif imax == (len(self._E[popid][band]) - 1): \n imax = None \n \n if self.sampled_integrator == 'romb':\n raise ValueError(\"Romberg's method cannot be used for integrating subintervals.\")\n heat = romb(integrand[0:imax] * self.E[0:imax], \n dx=self.dlogE[0:imax])[0] * log10\n else:\n heat = simps(integrand[0:imax] * self._E[popid][band][0:imax], \n x=self.logE[popid][band][0:imax]) * log10\n \n else:\n imin = np.argmin(np.abs(self._E[popid][band] - pop.pf['pop_Emin']))\n \n if self.sampled_integrator == 'romb':\n heat = romb(integrand[imin:] * self._E[popid][band][imin:], \n dx=self.dlogE[popid][band][imin:])[0] * log10\n elif self.sampled_integrator == 'trapz':\n heat = np.trapz(integrand[imin:] * self._E[popid][band][imin:], \n x=self.logE[popid][band][imin:]) * log10\n else:\n heat = simps(integrand[imin:] * self._E[popid][band][imin:], \n x=self.logE[popid][band][imin:]) * log10\n \n # Re-normalize, get rid of per steradian units\n heat *= 4. * np.pi * norm * erg_per_ev\n\n # Currently a rate coefficient, returned value depends on return_rc \n if kw['return_rc']:\n pass\n else:\n heat *= self.coefficient_to_rate(z, species, **kw)\n\n return heat \n \n def IonizationRateCGM(self, z, species=0, popid=0, band=0, **kwargs):\n \"\"\"\n Compute growth rate of HII regions.\n\n Parameters\n ----------\n z : float\n current redshift\n species : int\n Ionization rate for what atom?\n Can be 0, 1, or 2 (HI, HeI, and HeII, respectively)\n \n ===============\n relevant kwargs\n ===============\n fluxes : np.ndarray\n Array of fluxes corresponding to photon energies in self.igm.E.\n return_rc : bool\n Return actual heating rate, or rate coefficient for heating?\n Former has units of erg s**-1 cm**-3, latter has units of \n erg s**-1 cm**-3 atom**-1. \n\n Returns\n -------\n Ionization rate. Units determined by value of return_rc keyword\n argument, which is False by default.\n\n \"\"\"\n \n pop = self.pops[popid]\n \n if band is not None:\n b = self.background.bands_by_pop[popid][band]\n if not np.any(np.array(b) > E_LL):\n return 0.0\n if not np.allclose(b[0], E_LL, atol=0.1, rtol=0):\n return 0.0\n else:\n b = [13.6, 24.6]\n \n if (not pop.pf['pop_ion_src_cgm']) or (z > pop.zform):\n return 0.0\n \n # Need some guidance from 1-D calculations to do this\n if species > 0:\n return 0.0\n\n if pop.pf['pop_ion_rate'] is not None:\n return pop.IonizationRateCGM(z) \n\n kw = defkwargs.copy()\n kw.update(kwargs)\n\n if pop.pf['pop_k_ion_cgm'] is not None:\n return self.pf['pop_k_ion_cgm'](z)\n\n if kw['return_rc']:\n weight = self.rate_to_coefficient(z, species, **kw)\n else:\n weight = 1.0\n \n Qdot = pop.PhotonLuminosityDensity(z, Emin=13.6, Emax=24.6)\n \n return weight * Qdot * (1. + z)**3\n \n def IonizationRateIGM(self, z, species=0, popid=0, band=0, **kwargs):\n \"\"\"\n Compute volume averaged hydrogen ionization rate.\n \n Parameters\n ----------\n z : float\n redshift\n species : int\n HI, HeI, or HeII (species=0, 1, 2, respectively)\n \n Returns\n -------\n Volume averaged ionization rate in units of ionizations per \n second. If return_rc=True, will be in units of ionizations per\n second per atom.\n \n \"\"\"\n\n pop = self.pops[popid]\n\n # z between zform, zdead? must be careful for BHs\n if (not pop.pf['pop_ion_src_igm']) or (z > pop.zform):\n return 0.0\n\n # Grab defaults, do some patches if need be\n kw = self._fix_kwargs(**kwargs)\n \n species_str = species_i_to_str[species]\n\n if pop.pf['pop_k_ion_igm'] is not None:\n return pop.pf['pop_k_ion_igm'](z)\n\n if band is not None:\n solve_rte = self.background.solve_rte[popid][band]\n else:\n solve_rte = False\n\n if (not solve_rte) or \\\n (not np.any(self.background.bands_by_pop[popid] > pop.pf['pop_EminX'])):\n \n Lx = pop.LuminosityDensity(z, Emin=pop.pf['pop_Emin_xray'], \n Emax=pop.pf['pop_Emax'])\n \n weight = self.rate_to_coefficient(z, species, **kw)\n primary = weight * Lx \\\n * (1. + z)**3 / pop.pf['pop_Ex'] / erg_per_ev\n fion = self.esec.DepositionFraction(kw['igm_e'], channel='h_1')[0]\n\n return primary * (1. + fion) * (pop.pf['pop_Ex'] - E_th[0]) \\\n / E_th[0]\n\n # Full calculation - much like computing integrated flux\n norm = J21_num * self.sigma0\n \n # Integrate over function\n if kw['fluxes'][popid] is None:\n integrand = lambda E, zz: \\\n self.rb.AngleAveragedFluxSlice(z, E, zz, xavg=kw['xavg'], \n zxavg=kw['zxavg']) * self.sigma(E, species=species) \\\n / norm / ev_per_hz\n \n ion, err = dblquad(integrand, z, kw['zf'], lambda a: self.E0, \n lambda b: kw['Emax'], epsrel=self.rtol, epsabs=self.atol) \n \n # Integrate over set of discrete points\n else: \n integrand = self.sigma_E[species_str][popid][band] \\\n * kw['fluxes'][popid][band] / norm / ev_per_hz\n \n if self.sampled_integrator == 'romb':\n ion = romb(integrand * self.E[popid][band], \n dx=self.dlogE[popid][band])[0] * log10\n else:\n ion = simps(integrand * self.E[popid][band], \n x=self.logE[popid][band]) * log10\n \n # Re-normalize\n ion *= 4. * np.pi * norm\n \n # Currently a rate coefficient, returned value depends on return_rc\n if kw['return_rc']:\n pass\n else:\n ion *= self.coefficient_to_rate(z, species, **kw) \n \n return ion\n \n def SecondaryIonizationRateIGM(self, z, species=0, donor=0, popid=0, \n band=0, **kwargs):\n \"\"\"\n Compute volume averaged secondary ionization rate.\n\n Parameters\n ----------\n z : float\n redshift\n species : int\n Ionization rate of what atom?\n Can be 0, 1, or 2 (HI, HeI, and HeII, respectively)\n donor : int\n Which atom gave the electron?\n Can be 0, 1, or 2 (HI, HeI, and HeII, respectively) \n\n ===============\n relevant kwargs\n ===============\n fluxes : np.ndarray\n Array of fluxes corresponding to photon energies in self.igm.E.\n return_rc : bool\n Return actual heating rate, or rate coefficient for heating?\n Former has units of erg s**-1 cm**-3, latter has units of \n erg s**-1 cm**-3 atom**-1. \n\n Returns\n -------\n Volume averaged ionization rate due to secondary electrons, \n in units of ionizations per second.\n\n \"\"\" \n \n pop = self.pops[popid]\n \n if self.pf['secondary_ionization'] == 0:\n return 0.0\n\n if not pop.pf['pop_ion_src_igm']:\n return 0.0 \n\n if band is not None:\n solve_rte = self.background.solve_rte[popid][band]\n else:\n solve_rte = False\n\n # Computed in IonizationRateIGM in this case\n if not solve_rte:\n return 0.0\n\n if not np.any(self.background.bands_by_pop[popid] > pop.pf['pop_EminX']):\n return 0.0\n \n if ((donor or species) in [1,2]) and (not self.pf['include_He']):\n return 0.0\n\n # Grab defaults, do some patches if need be\n kw = self._fix_kwargs(**kwargs)\n\n #if self.pf['gamma_igm'] is not None:\n # return self.pf['gamma_igm'](z)\n\n species_str = species_i_to_str[species]\n donor_str = species_i_to_str[donor]\n\n if self.esec.method > 1 and solve_rte:\n\n fion_const = 1.\n if kw['igm_e'] == 0:\n fion = self.fion[species_str][popid][band][:,0]\n else:\n i_x = np.argmin(np.abs(kw['igm_e'] - self.esec.x))\n if self.esec.x[i_x] > kw['igm_e']:\n i_x -= 1\n\n j = i_x + 1 \n\n fion = self.fion[species_str][popid][band][:,i_x] \\\n + (self.fion[species_str][popid][band][:,j] - self.fion[species_str][popid][:,i_x]) \\\n * (kw['igm_e'] - self.esec.x[i_x]) \\\n / (self.esec.x[j] - self.esec.x[i_x])\n elif self.esec.method > 1:\n raise ValueError('Only know how to do advanced secondary ionization with solve_rte=True')\n else:\n fion = 1.0\n fion_const = self.esec.DepositionFraction(kw['igm_e'], \n channel=species_str)[0]\n\n norm = J21_num * self.sigma0\n \n if kw['fluxes'][popid] is None: \n if self.pf['approx_He']: # assumes lower integration limit > 4 Ryd\n integrand = lambda E, zz: \\\n self.rb.AngleAveragedFluxSlice(z, E, zz, xavg=kw['xavg'], \n zxavg=kw['zxavg']) * (self.sigma(E) * (E - E_th[0]) \\\n + self.cosm.y * self.sigma(E, 1) * (E - E_th[1])) \\\n / E_th[0] / norm / ev_per_hz\n else:\n integrand = lambda E, zz: \\\n self.rb.AngleAveragedFluxSlice(z, E, zz, xavg=kw['xavg'], \n zxavg=kw['zxavg']) * self.sigma(E) * (E - E_th[0]) \\\n / E_th[0] / norm / ev_per_hz\n else:\n integrand = fion * self.sigma_E[donor_str][popid][band] \\\n * (self.E[popid][band] - E_th[donor])\n \n if self.pf['approx_He']:\n integrand += self.cosm.y * self.sigma_E['he_1'][popid][band] \\\n * (self.E[popid][band] - E_th[1])\n \n integrand = integrand\n integrand *= kw['fluxes'][popid][band] / E_th[species] / norm \\\n / ev_per_hz\n \n if type(integrand) == types.FunctionType:\n ion, err = dblquad(integrand, z, kw['zf'], lambda a: self.E0, \n lambda b: kw['Emax'], epsrel=self.rtol, epsabs=self.atol)\n else:\n if self.sampled_integrator == 'romb':\n ion = romb(integrand * self.E[popid][band], \n dx=self.dlogE[popid][band])[0] * log10\n else:\n ion = simps(integrand * self.E[popid][band], \n x=self.logE[popid][band]) * log10 \n \n # Re-normalize\n ion *= 4. * np.pi * norm * fion_const\n \n # Currently a rate coefficient, returned value depends on return_rc\n if kw['return_rc']:\n pass\n else:\n ion *= self.coefficient_to_rate(z, species, **kw) \n \n return ion\n \n def DiffuseLymanAlphaFlux(self, z, **kwargs):\n \"\"\"\n Flux of Lyman-alpha photons induced by photo-electron collisions.\n \n \"\"\"\n \n raise NotImplemented('hey fix me')\n \n if not self.pf['secondary_lya']:\n return 0.0\n \n #return 1e-25\n \n # Grab defaults, do some patches if need be \n kw = self._fix_kwargs(**kwargs)\n \n # Compute fraction of photo-electron energy deposited as Lya excitation\n if self.esec.method > 1 and (kw['fluxes'][popid] is not None):\n if kw['igm_e'] == 0:\n flya = self.flya[:,0]\n else:\n i_x = np.argmin(np.abs(kw['igm_e'] - self.esec.x))\n if self.esec.x[i_x] > kw['igm_e']:\n i_x -= 1\n \n j = i_x + 1 \n \n flya = self.flya[:,i_x] \\\n + (self.flya[:,j] - self.flya[:,i_x]) \\\n * (kw['igm_e'] - self.esec.x[i_x]) \\\n / (self.esec.x[j] - self.esec.x[i_x]) \n else:\n return 0.0\n \n # Re-normalize to help integrator\n norm = J21_num * self.sigma0\n \n # Compute integrand\n integrand = self.sigma_E[species_str] * (self.E - E_th[species])\n \n integrand *= kw['fluxes'] * flya / norm / ev_per_hz\n \n if kw['Emax'] is not None:\n imax = np.argmin(np.abs(self.E - kw['Emax']))\n if imax == 0:\n return 0.0\n \n if self.sampled_integrator == 'romb':\n raise ValueError(\"Romberg's method cannot be used for integrating subintervals.\")\n heat = romb(integrand[0:imax] * self.E[0:imax], dx=self.dlogE[0:imax])[0] * log10\n else:\n heat = simps(integrand[0:imax] * self.E[0:imax], x=self.logE[0:imax]) * log10\n \n else:\n imin = np.argmin(np.abs(self.E - self.pop.pf['source_Emin']))\n \n if self.sampled_integrator == 'romb':\n heat = romb(integrand[imin:] * self.E[imin:], \n dx=self.dlogE[imin:])[0] * log10\n elif self.sampled_integrator == 'trapz':\n heat = np.trapz(integrand[imin:] * self.E[imin:], \n x=self.logE[imin:]) * log10\n else:\n heat = simps(integrand[imin:] * self.E[imin:], \n x=self.logE[imin:]) * log10\n \n # Re-normalize, get rid of per steradian units\n heat *= 4. * np.pi * norm * erg_per_ev\n\n # Currently a rate coefficient, returned value depends on return_rc \n if kw['return_rc']:\n pass\n else:\n heat *= self.coefficient_to_rate(z, species, **kw)\n\n return heat\n \n", "\"\"\"\n\nMetaGalacticBackground.py\n\nAuthor: Jordan Mirocha\nAffiliation: University of Colorado at Boulder\nCreated on: Mon Feb 16 12:43:06 MST 2015\n\nDescription: \n\n\"\"\"\n\nimport numpy as np\nfrom ..util import ParameterFile\nfrom scipy.interpolate import interp1d\nfrom ..solvers import UniformBackground\nfrom ..util.ReadData import _sort_history, flatten_energies, flatten_flux\n\nclass MetaGalacticBackground(UniformBackground):\n def __init__(self, grid=None, **kwargs):\n \"\"\"\n Initialize a MetaGalacticBackground object. \n \"\"\"\n\n self._is_thru_run = False\n \n UniformBackground.__init__(self, grid=grid, **kwargs)\n \n def run(self):\n \"\"\"\n Evolve radiation background in time.\n\n .. note:: Assumes we're using the generator, otherwise the time \n evolution must be controlled manually.\n\n Returns\n -------\n Nothing: sets `history` attribute containing the entire evolution\n of the background for each population.\n\n \"\"\"\n\n self._is_thru_run = True\n\n all_z = [] # sometimes not deterministic\n all_fluxes = []\n for (z, fluxes) in self.step():\n all_z.append(z)\n all_fluxes.append(fluxes)\n\n # At this stage, redshift is in descending order\n self.all_z = all_z\n self.all_fluxes = all_fluxes\n\n self._history = _sort_history(all_fluxes)\n\n def _init_stepping(self):\n \"\"\"\n Initialize lists which bracket radiation background fluxes.\n \n The structure of these lists is as follows:\n (1) Each list contains one element per source population.\n (2) If that population will approximate the RTE, this entry will be \n None.\n (3) The redshift lists, _zlo and _zhi, will just be a sequences of \n floats. \n (4) The flux entires, if not None, will be lists, since in general an\n emission band can be broken up into several pieces. In this case,\n the number of entries (for each source population) will be equal\n to the number of bands, which you can find in self.bands_by_pop.\n \n Sets\n ----\n Several attributes:\n (1) _zhi, _zlo\n (2) _fhi, _flo\n \n \"\"\"\n \n # For \"smart\" time-stepping\n self._zhi = []; self._zlo = []\n self._fhi = []; self._flo = []\n \n # Looping over populations.\n z_by_pop = []\n for i, generator in enumerate(self.generators):\n\n # Recall that each generator may actually be a list of generators,\n # one for each (sub-)band.\n \n if (generator == [None]) or (generator is None):\n self._zhi.append(None)\n self._zlo.append(None)\n self._fhi.append(None)\n self._flo.append(None)\n continue\n\n # Only make it here when real RT is happenin'\n\n # Setup arrays (or lists) for flux solutions\n _fhi = []\n _flo = []\n for j, gen in enumerate(generator):\n if gen.__name__ == '_flux_generator_generic':\n _fhi.append(np.zeros_like(self.energies[i][j]))\n _flo.append(np.zeros_like(self.energies[i][j]))\n continue\n\n # Otherwise, there are sub-bands (i.e., sawtooth)\n _fhi.append(np.zeros_like(np.concatenate(self.energies[i][j])))\n _flo.append(np.zeros_like(np.concatenate(self.energies[i][j])))\n\n # Loop over sub-bands and retrieve fluxes\n for j, gen in enumerate(generator):\n\n # Tap generator, grab fluxes\n zhi, flux = gen.next()\n\n # Increment the flux\n _fhi[j] += flux.copy()\n \n # Tap generator, grab fluxes (again)\n zlo, flux = gen.next()\n \n # Increment the flux (again)\n _flo[j] += flux.copy()\n\n # Save fluxes for this population\n self._zhi.append([zhi for k in range(len(generator))])\n self._zlo.append([zlo for k in range(len(generator))])\n \n self._fhi.append(_fhi)\n self._flo.append(_flo)\n \n z_by_pop.append(zlo)\n \n # Set the redshift based on whichever population took the smallest\n # step. Other populations will interpolate to find flux.\n self.update_redshift(max(z_by_pop))\n \n def step(self):\n \"\"\"\n Initialize generator for the meta-galactic radiation background.\n \n ..note:: This can run asynchronously with a MultiPhaseMedium object.\n\n Returns\n -------\n Generator for the background radiation field. Yields the flux for \n each population.\n\n \"\"\"\n\n t = 0.0\n z = self.pf['initial_redshift']\n zf = self.pf['final_redshift']\n \n # Start the generator\n while z > zf: \n z, fluxes = self.update_fluxes() \n \n yield z, fluxes\n\n def update_redshift(self, z):\n self.z = z\n\n @property\n def history(self):\n if hasattr(self, '_history'):\n pass\n elif hasattr(self, 'all_fluxes'):\n self._history = _sort_history(self.all_fluxes)\n else:\n raise NotImplemented('help!')\n \n return self._history\n \n def update_fluxes(self):\n \"\"\"\n Loop over flux generators and retrieve the next values.\n \n ..note:: Populations need not have identical redshift sampling.\n \n Returns\n -------\n Current redshift and dictionary of fluxes. Each element of the flux\n dictionary corresponds to a single population, and within that, there\n are separate lists for each sub-band over which we solve the RTE.\n \n \"\"\"\n \n if (not self._is_thru_run) and (not self.approx_all_pops) and \\\n not hasattr(self, '_fhi'):\n \n self._init_stepping()\n \n # Save fluxes by pop as simulations run\n self.all_z = []\n self.all_fluxes = []\n \n z_by_pop = [None for i in range(self.Npops)]\n \n fluxes = {}\n for i, pop_generator in enumerate(self.generators):\n \n # Skip approximate (or non-contributing) backgrounds\n if pop_generator is None:\n fluxes[i] = None\n continue\n \n fluxes_by_band = []\n\n # For each population, the band is broken up into pieces\n for j, generator in enumerate(pop_generator):\n \n # Those pieces might have a sawtooth component!\n \n # If not being run as part of another simulation, there are \n # no external time-stepping constraints, so just poke the \n # generator and move on\n if self._is_thru_run:\n z, f = generator.next()\n z_by_pop[i] = z\n fluxes_by_band.append(f)\n continue\n \n # Otherwise, we potentially need to sub-cycle the background.\n # This may happen if (1) the time-step is being regulated\n # from the simulation in which this background is embedded \n # (i.e., epsilon_dt requires smaller timestep than redshift\n # step allowed by this population) or (2) if other populations\n # have a different requirement for the redshift sampling, \n # such that this population must interpolate between its\n # (larger) redshift steps while other populations churn away.\n\n # For redshifts before this background turns on...\n # (this should only happen once)\n if self.z > self._zhi[i][j]:\n if generator.__name__ == '_flux_generator_generic':\n z, f = self.z, np.zeros_like(self.energies[i][j])\n else:\n z = self.z \n f = np.zeros_like(flatten_energies(self.energies[i][j]))\n\n fluxes_by_band.append(f)\n continue\n\n # If we've surpassed the lower redshift bound, poke the \n # generator\n elif self.z <= self._zlo[i][j]:\n\n self._zhi[i][j] = self._zlo[i][j]\n self._fhi[i][j] = self._flo[i][j]\n z, f = generator.next()\n \n # Sometimes the generator's redshift sampling will be finer\n # than needed by e.g., a MultiPhaseMedium, so we cycle\n # multiple times before exiting.\n while z > self.z:\n self._zhi[i][j] = self._zlo[i][j]\n self._fhi[i][j] = self._flo[i][j]\n \n z, f = generator.next()\n \n self._zlo[i][j] = z\n self._flo[i][j] = f\n else:\n z = self.z\n\n # If zlo < z <= self.zhi, we'll interpolate\n\n # If we're between redshift steps, interpolate to find the \n # background flux\n if self.z == self._zhi[i][j]:\n f = self._fhi[i][j]\n elif self.z > self._zlo[i][j]:\n \n z = self.z\n\n interp = interp1d([self._zlo[i][j], self._zhi[i][j]], \n [self._flo[i][j], self._fhi[i][j]], \n axis=0, assume_sorted=True, kind='linear') \n \n f = interp(z)\n\n elif self.z == self._zlo[i][j]:\n f = self._flo[i][j]\n\n fluxes_by_band.append(f)\n \n if not self._is_thru_run: \n z_by_pop[i] = max(self._zlo[i]) \n \n fluxes[i] = fluxes_by_band\n\n # Set the redshift based on whichever population took the smallest\n # step. Other populations will interpolate to find flux.\n znext = max(z_by_pop)\n \n if (not self._is_thru_run):\n self.all_z.append(z_by_pop)\n self.all_fluxes.append(fluxes)\n \n # If being externally controlled, we can't tamper with the redshift!\n if self._is_thru_run:\n self.update_redshift(znext)\n\n return znext, fluxes\n\n def update_rate_coefficients(self, z, **kwargs):\n \"\"\"\n Compute ionization and heating rate coefficients.\n\n Parameters\n ----------\n z : float\n Current redshift.\n\n Returns\n -------\n Dictionary of rate coefficients.\n\n \"\"\"\n \n # Must compute rate coefficients from fluxes \n if self.approx_all_pops:\n kwargs['fluxes'] = [None] * self.Npops\n else: \n z, fluxes = self.update_fluxes()\n kwargs['fluxes'] = fluxes\n \n # Run update_rate_coefficients within MultiPhaseMedium\n return super(MetaGalacticBackground, self).update_rate_coefficients(z, \n **kwargs)\n \n def get_integrated_flux(self, band, popid=0):\n \"\"\"\n Return integrated flux in supplied (Emin, Emax) band at all redshifts.\n \"\"\"\n \n zarr, Earr, flux = self.get_history(popid, True, True)\n \n i1 = np.argmin(np.abs(Earr - band[0]))\n i2 = np.argmin(np.abs(Earr - band[1]))\n \n return zarr, np.trapz(flux[:,i1:i2], x=Earr[i1:i2], axis=1)\n\n def get_history(self, popid=0, flatten=False, uniquify=True):\n \"\"\"\n Grab data associated with a single population.\n\n Parameters\n ----------\n popid : int\n ID number for population of interest.\n flatten : bool\n For sawtooth calculations, the energies are broken apart into \n different bands which have different sizes. Set this to true if\n you just want a single array, rather than having the energies\n and fluxes broken apart by their band.\n\n Returns\n -------\n Tuple containing the redshifts, energies, and fluxes for the given\n population, in that order.\n \n if flatten == True:\n The energy array is 1-D.\n The flux array will have shape (z, E)\n else:\n The energies are stored as a list. The number of elements will\n be determined by how many sub-bands there are. Each element will\n be a list or an array, depending on whether or not there is a \n sawtooth component to that particular background.\n \n \"\"\"\n \n hist = self.history\n \n # First, get redshifts. If not run \"thru run\", then they will\n # be in descending order so flip 'em.\n if self._is_thru_run:\n z = self.redshifts[popid]\n else:\n # This may change on the fly due to sub-cycling and such\n z = np.array(self.all_z).T[popid][-1::-1]\n\n if flatten:\n E = flatten_energies(self.energies[popid])\n\n f = np.zeros([len(z), E.size])\n for i, flux in enumerate(hist[popid]):\n fzflat = []\n for j in range(len(self.energies[popid])):\n fzflat.extend(flux[j])\n\n f[i] = np.array(fzflat)\n \n # \"tr\" = \"to return\"\n z_tr = z\n E_tr = E\n f_tr = np.array(f)[-1::-1,:]\n else:\n z_tr = z\n E_tr = self.energies[popid]\n f_tr = hist[popid][-1::-1,:]\n \n # We've flipped the fluxes too since they are inherently in \n # order of descending redshift. \n \n if uniquify:\n z_uni, indi = np.unique(z_tr, return_index=True)\n return z_uni, E_tr, f_tr[indi,:]\n else: \n return z_tr, E_tr, f_tr\n\n " ]
[ [ "numpy.array", "numpy.zeros", "numpy.ones", "numpy.diff", "numpy.arange", "numpy.cumsum", "numpy.log10", "numpy.linspace" ], [ "scipy.integrate.romb", "numpy.array", "scipy.integrate.simps", "numpy.zeros", "numpy.log", "numpy.diff", "numpy.any", "numpy.allclose", "numpy.trapz", "numpy.abs", "scipy.integrate.dblquad", "numpy.log10" ], [ "numpy.concatenate", "numpy.zeros_like", "numpy.array", "scipy.interpolate.interp1d", "numpy.trapz", "numpy.abs", "numpy.unique" ] ]
yfukai/exputils
[ "aab7bb69d12887f069e6768144dc767ea82e6306" ]
[ "lib/exputils/plotutils/__init__.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nfrom matplotlib import ticker\nfrom . import cm\n\n#https://stackoverflow.com/questions/31940285/plot-a-polar-color-wheel-based-on-a-colormap-using-python-matplotlib\ndef color_wheel(cmap,fig=plt.figure(),figsize=(4,4)):\n #Generate a figure with a polar projection\n fg = plt.figure(figsize=figsize)\n ax = fg.add_axes([0.1,0.1,0.8,0.8], projection='polar')\n\n #define colormap normalization for 0 to 2*pi\n norm = mpl.colors.Normalize(0, 2*np.pi) \n\n #Plot a color mesh on the polar plot\n #with the color set by the angle\n\n n = 200 #the number of secants for the mesh\n t = np.linspace(0,2*np.pi,n) #theta values\n r = np.linspace(0,1,2) #raidus values change 0.6 to 0 for full circle\n rg, tg = np.meshgrid(r,t) #create a r,theta meshgrid\n c = tg #define color values as theta value\n im = ax.pcolormesh(t, r, c.T,norm=norm,cmap=cmap) #plot the colormesh on axis with colormap\n ax.set_yticklabels([]) #turn of radial tick labels (yticks)\n ax.tick_params(pad=15,labelsize=24) #cosmetic changes to tick labels\n ax.spines['polar'].set_visible(False) #turn off the axis spine.\n \ndef legend_reverse(ax=None,**kwargs):\n if ax is None: ax=plt.gca()\n handles, labels = ax.get_legend_handles_labels()\n ax.legend(handles[::-1], labels[::-1],**kwargs)\ndef errorbar_arg_to_plot_arg(args):\n args_plot=args.copy()\n fmt=args_plot.pop(\"fmt\",\".\")\n args_plot.pop(\"capsize\",None)\n args_plot.pop(\"ecolor\",None)\n args_plot.pop(\"capthick\",None)\n return fmt, args_plot\ndef errorbar_limited(err_indices,x,y,yerr=None,xerr=None,ax=None,last_params={},**args):\n indices=np.argsort(x)\n x=x[indices]\n y=y[indices]\n if ax is None: ax=plt.gca()\n wo_err_indices=np.setdiff1d(np.arange(len(x)),err_indices)\n fmt,args_plot=errorbar_arg_to_plot_arg(args)\n args_plot.pop(\"label\",None)\n ax.plot(x[wo_err_indices],y[wo_err_indices],fmt,**args_plot)\n yerr2=None if yerr is None else yerr[err_indices]\n xerr2=None if xerr is None else xerr[err_indices]\n args.update({\"zorder\":args_plot.get(\"zorder\",3)+2})\n args.update(last_params)\n ax.errorbar(x[err_indices],y[err_indices],yerr2,xerr2,**args)\n\ndef get_all_data(ax=None):\n if not ax:\n ax=plt.gca()\n if len(ax.lines)>0:\n xss,yss=zip(*[l.get_data() for l in ax.lines])\n return xss,yss\n else:\n return None\n\ndef get_data_lim(ax=None,xlims=(-np.inf,np.inf),ylims=(-np.inf,np.inf)):\n if ax is None: ax=plt.gca()\n data=[np.concatenate(datum) for datum in get_all_data(ax)] #all xs, ys\n data=[datum[np.logical_and(vmin<datum,datum<vmax)] \n for datum,vmin,vmax in zip(data,*zip(xlims,ylims))]\n return [(np.min(datum),np.max(datum)) for datum in data]\n\ndef calc_lim(vmin,vmax,margin,islog=False):\n if islog:\n vr=vmax/vmin\n if vr>0:\n vm=np.exp(np.log(vr)*margin)\n vmin=vmin/vm ; vmax=vmax*vm\n else:\n vr=vmax-vmin\n vm=vr*margin\n vmin=vmin-vm ; vmax=vmax+vm\n return vmin,vmax\n\ndef fit_data_lim(ax=None,which=\"both\",\n margin=0,xlog=True,ylog=True,\n xlims=[-np.inf,np.inf],ylims=[-np.inf,np.inf]):\n if ax is None: ax=plt.gca()\n if xlog and xlims[0]<0: xlims[0]=0\n if ylog and ylims[0]<0: ylims[0]=0\n limss=get_data_lim(ax,xlims,ylims)\n xlim,ylim=[calc_lim(*lims,margin,islog) \n for lims,islog in zip(limss,(xlog,ylog))]\n if which==\"both\" or which==\"x\":\n ax.set_xlim(xlim)\n if which==\"both\" or which==\"y\":\n ax.set_ylim(ylim)\n\ndef set_log_minor(ax=None,which=\"both\",subs=(2,5)):\n if ax is None: ax=plt.gca()\n if which in (\"both\",\"x\"):\n ax.xaxis.set_minor_locator(ticker.LogLocator(subs=subs))\n ax.xaxis.set_minor_formatter(ticker.LogFormatter(labelOnlyBase=False))\n if which in (\"both\",\"y\"):\n ax.yaxis.set_minor_locator(ticker.LogLocator(subs=subs))\n ax.yaxis.set_minor_formatter(ticker.LogFormatter(labelOnlyBase=False))\n# else:\n# raise ValueError(\"which parameter must be both, x, or y\")\n\ndef plot_guideline(b,e,slope,label=\"\",style=\"-b\",left=False,ha=\"left\",va=\"bottom\",fontsize=10,plotargs={},textargs={},ax=None):\n if ax is None: ax=plt.gca()\n if len(b) == 2 and len(e) == 1:\n bx = b[0]\n by = b[1]\n ex = e[0]\n ey = by+((ex-bx)*slope)\n elif len(b) == 1 and len(e) == 2:\n bx = b[0]\n ex = e[0]\n ey = e[1]\n by = ey-((ex-bx)*slope)\n ax.plot([bx,ex],[by,ey],style,**plotargs)\n x = bx if left else ex\n y = by if left else ey\n ax.text(x,y,label,ha=ha,va=va,fontsize=fontsize,**textargs)\n\ndef plot_guideline_log(b,e,exponent,label=\"\",style=\"-b\",left=False,ha=\"left\",va=\"bottom\",\n fontsize=10,plotargs={},textargs={},ax=None,xoffset=0,yoffset=0):\n if ax is None: ax=plt.gca()\n if len(b) == 2 and len(e) == 1:\n bx = b[0]\n by = b[1]\n ex = e[0]\n ey = by*((ex/bx)**exponent)\n elif len(b) == 1 and len(e) == 2:\n bx = b[0]\n ex = e[0]\n ey = e[1]\n by = ey/((ex/bx)**exponent)\n ax.loglog([bx,ex],[by,ey],style,**plotargs)\n x = (bx if left else ex)+xoffset\n y = (by if left else ey)+yoffset\n ax.text(x,y,label,ha=ha,va=va,fontsize=fontsize,**textargs)\n\ndef plot_horizontal_line(y,label=\"\",linestyle=\"--\",color=\"k\",left=False,ha=\"left\",va=\"center\",fontsize=10,xoffset=0,yoffset=0,plotargs={},textargs={},ax=None):\n if ax is None: ax=plt.gca()\n ax.axhline(y,linestyle=linestyle,color=color,**plotargs)\n xlims=ax.get_xlim()\n x=xlims[0] if left else xlims[1]\n ax.text(x+xoffset,y+yoffset,label,horizontalalignment=ha,va=va,fontsize=fontsize,**textargs)\ndef imshow_color(img1,img2,img3,ax=None,*args,**kargs):\n if ax is None: ax=plt.gca()\n im = np.transpose([img1,img2,img3],(1,2,0))\n kargs.update({\"interpolation\":\"none\"})\n ax.imshow(im,*args,**kargs)\n\ndef set_str_formatters(fmt,ax=None,which=\"both\"):\n if ax is None: ax=plt.gca()\n if which==\"both\" or which==\"x\":\n ax.xaxis.set_major_formatter(ticker.FormatStrFormatter(fmt))\n ax.xaxis.set_minor_formatter(ticker.FormatStrFormatter(fmt))\n if which==\"both\" or which==\"y\":\n ax.yaxis.set_major_formatter(ticker.FormatStrFormatter(fmt))\n ax.yaxis.set_minor_formatter(ticker.FormatStrFormatter(fmt))\n\ndef hide_tick_label(ax=None,which=\"both\"):\n if ax is None: ax=plt.gca()\n if which==\"both\" or which==\"x\":\n plt.setp(ax.get_xmajorticklabels(), visible=False)\n plt.setp(ax.get_xminorticklabels(), visible=False)\n if which==\"both\" or which==\"y\":\n plt.setp(ax.get_ymajorticklabels(), visible=False)\n plt.setp(ax.get_yminorticklabels(), visible=False)\n" ]
[ [ "numpy.concatenate", "numpy.max", "numpy.log", "matplotlib.pyplot.gca", "numpy.min", "matplotlib.ticker.LogLocator", "matplotlib.pyplot.figure", "matplotlib.ticker.FormatStrFormatter", "numpy.logical_and", "matplotlib.ticker.LogFormatter", "numpy.transpose", "matplotlib.colors.Normalize", "numpy.argsort", "numpy.linspace", "numpy.meshgrid" ] ]
gujralsanyam22/pyrobot
[ "a0448714857b684d8b280f710e9304988524d2e0" ]
[ "src/pyrobot/vrep_locobot/camera.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport numpy as np\nimport pyrobot.utils.util as prutil\nfrom pyrobot.core import Camera\n\nfrom pyrobot.utils.util import try_cv2_import\n\ncv2 = try_cv2_import()\n\nfrom cv_bridge import CvBridge, CvBridgeError\n\n\nfrom pyrep.objects.vision_sensor import VisionSensor\nfrom pyrep.const import ObjectType, PerspectiveMode, RenderMode\nfrom pyrep.objects.joint import Joint\n\n\nclass LoCoBotCamera(Camera):\n \"\"\"docstring for SimpleCamera\"\"\"\n\n def __init__(self, configs, simulator):\n\n self.sim = simulator.sim\n self.rgb_cam = VisionSensor(\"kinect_rgb\")\n self.depth_cam = VisionSensor(\"kinect_depth\")\n self.rgb_cam.set_render_mode(RenderMode.OPENGL3)\n self.depth_cam.set_render_mode(RenderMode.OPENGL3)\n\n # Pan and tilt related variables.\n self.pan_joint = Joint(\"LoCoBot_head_pan_joint\")\n self.tilt_joint = Joint(\"LoCoBot_head_tilt_joint\")\n\n def get_rgb(self):\n\n return self.rgb_cam.capture_rgb()\n\n def get_depth(self):\n\n return self.depth_cam.capture_depth()\n\n def get_rgb_depth(self):\n\n return self.get_rgb(), self.get_depth()\n\n def get_intrinsics(self):\n\n # Todo: Remove this after we fix intrinsics\n raise NotImplementedError\n \"\"\"\n\t\tReturns the instrinsic matrix of the camera\n\n\t\t:return: the intrinsic matrix (shape: :math:`[3, 3]`)\n\t\t:rtype: np.ndarray\n\t\t\"\"\"\n # fx = self.configs['Camera.fx']\n # fy = self.configs['Camera.fy']\n # cx = self.configs['Camera.cx']\n # cy = self.configs['Camera.cy']\n Itc = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])\n return Itc\n\n def pix_to_3dpt(self, rs, cs, in_cam=False):\n \"\"\"\n\t\tGet the 3D points of the pixels in RGB images.\n\n\t\t:param rs: rows of interest in the RGB image.\n\t\t It can be a list or 1D numpy array\n\t\t which contains the row indices.\n\t\t The default value is None,\n\t\t which means all rows.\n\t\t:param cs: columns of interest in the RGB image.\n\t\t It can be a list or 1D numpy array\n\t\t which contains the column indices.\n\t\t The default value is None,\n\t\t which means all columns.\n\t\t:param in_cam: return points in camera frame,\n\t\t otherwise, return points in base frame\n\n\t\t:type rs: list or np.ndarray\n\t\t:type cs: list or np.ndarray\n\t\t:type in_cam: bool\n\n\t\t:returns: tuple (pts, colors)\n\n\t\t pts: point coordinates in world frame\n\t\t (shape: :math:`[N, 3]`)\n\n\t\t colors: rgb values for pts_in_cam\n\t\t (shape: :math:`[N, 3]`)\n\n\t\t:rtype: tuple(np.ndarray, np.ndarray)\n\t\t\"\"\"\n\n raise NotImplementedError\n\n def get_current_pcd(self, in_cam=True):\n \"\"\"\n\t\tReturn the point cloud at current time step (one frame only)\n\n\t\t:param in_cam: return points in camera frame,\n\t\t otherwise, return points in base frame\n\n\t\t:type in_cam: bool\n\t\t:returns: tuple (pts, colors)\n\n\t\t pts: point coordinates in world frame (shape: :math:`[N, 3]`)\n\n\t\t colors: rgb values for pts_in_cam (shape: :math:`[N, 3]`)\n\t\t:rtype: tuple(np.ndarray, np.ndarray)\n\t\t\"\"\"\n\n raise NotImplementedError\n\n @property\n def state(self):\n \"\"\"\n\t\tReturn the current pan and tilt joint angles of the robot camera.\n\n\t\t:return:\n\t\t pan_tilt: A list the form [pan angle, tilt angle]\n\t\t:rtype: list\n\t\t\"\"\"\n return self.get_state()\n\n def get_state(self):\n \"\"\"\n\t\tReturn the current pan and tilt joint angles of the robot camera.\n\n\t\t:return:\n\t\t pan_tilt: A list the form [pan angle, tilt angle]\n\t\t:rtype: list\n\t\t\"\"\"\n return [self.get_pan(), self.get_tilt()]\n\n def get_pan(self):\n \"\"\"\n\t\tReturn the current pan joint angle of the robot camera.\n\n\t\t:return:\n\t\t pan: Pan joint angle\n\t\t:rtype: float\n\t\t\"\"\"\n return self.pan_joint.get_joint_position()\n\n def get_tilt(self):\n \"\"\"\n\t\tReturn the current tilt joint angle of the robot camera.\n\n\t\t:return:\n\t\t tilt: Tilt joint angle\n\t\t:rtype: float\n\t\t\"\"\"\n return self.tilt_joint.get_joint_position()\n\n def set_pan(self, pan, wait=True):\n \"\"\"\n\t\tSets the pan joint angle to the specified value.\n\n\t\t:param pan: value to be set for pan joint\n\t\t:param wait: wait until the pan angle is set to\n\t\t the target angle.\n\n\t\t:type pan: float\n\t\t:type wait: bool\n\t\t\"\"\"\n\n self.pan_joint.set_joint_position(pan)\n # [self.sim.step() for _ in range(50)]\n\n def set_tilt(self, tilt, wait=True):\n \"\"\"\n\t\tSets the tilt joint angle to the specified value.\n\n\t\t:param tilt: value to be set for the tilt joint\n\t\t:param wait: wait until the tilt angle is set to\n\t\t the target angle.\n\n\t\t:type tilt: float\n\t\t:type wait: bool\n\t\t\"\"\"\n\n self.tilt_joint.set_joint_position(tilt)\n\n def set_pan_tilt(self, pan, tilt, wait=True):\n \"\"\"\n\t\tSets both the pan and tilt joint angles to the specified values.\n\n\t\t:param pan: value to be set for pan joint\n\t\t:param tilt: value to be set for the tilt joint\n\t\t:param wait: wait until the pan and tilt angles are set to\n\t\t the target angles.\n\n\t\t:type pan: float\n\t\t:type tilt: float\n\t\t:type wait: bool\n\t\t\"\"\"\n\n self.set_pan(pan)\n self.set_tilt(tilt)\n\n def reset(self):\n \"\"\"\n\t\tThis function resets the pan and tilt joints by actuating\n\t\tthem to their home configuration.\n\t\t\"\"\"\n self.set_pan_tilt(self.configs.CAMERA.RESET_PAN, self.configs.CAMERA.RESET_TILT)\n" ]
[ [ "numpy.array" ] ]
DPBayes/data-sharing-examples
[ "f9fffc5b8f45d8dd7b93cb7e812439decfa51193" ]
[ "adult/dp_logistic_regression_onehot/classify_anticipated.py" ]
[ "import pickle, torch\nimport numpy as np\nimport pandas as pd\n\ntarget_epsilons = [1.1, 2.0, 4.0, 8.0, 14.0]\nanticipated_Ts = [2, 5, 10, 20]\nmodels_dict = {}\nfor eps in target_epsilons:\n\tmodels_dict[eps] = pickle.load(open('./res/models_2019-11-05_{}.p'.format(eps), 'rb'))\n\n\nX_test = pd.read_csv('./onehotted_data/encoded_X_test.csv', sep=';')\ny_test = pd.read_csv('./onehotted_data/encoded_y_test.csv', sep=';', header=None).values.squeeze()\n\nfeature_names = list(X_test.columns)\nX_test['Intercept'] = np.ones(len(X_test))\nX_test = X_test[['Intercept'] + feature_names]\n\naccs_dict={}\nfor eps in target_epsilons:\n\tmodels = models_dict[eps]\n\taccs = np.zeros(40)\n\tfor i, model in enumerate(models):\n\t\tw_map = model.reparam.bias.data.numpy()\n\t\tS_N = model.reparam.weight.exp().data.numpy()**2\n\t\tmu_a = X_test.dot(w_map) \n\t\tsigma_a2 = (X_test**2).dot(S_N)\n\t\tkappa = (1+np.pi*sigma_a2/8)**-0.5\n\t\tsigmoid = lambda x : (1+np.exp(-x))**-1\n\t\ty_pred = 1*(sigmoid(kappa*mu_a)>0.5)\n\t\taccs[i] = np.mean(y_pred==y_test)\n\taccs = np.array(np.split(accs, 10))\n\t## accs \\in R^{10 x 4}, column corresponds to a anticipated runs\n\taccs_dict[eps]=accs\n\nmean_accs_dict = {eps : accs_dict[eps].mean(0) for eps in target_epsilons}\nstd_accs_dict = {eps : accs_dict[eps].std(0) for eps in target_epsilons}\n\npickle.dump({'means': mean_accs_dict, 'stds': std_accs_dict},\\\n\t\topen('../plot_scripts/plot_pickles/anticipated_res_onehot.p', 'wb'))\n" ]
[ [ "numpy.zeros", "numpy.exp", "numpy.split", "numpy.mean", "pandas.read_csv" ] ]
aldajo92/UDACITY-SDC_BehavioralCloning
[ "c2119a1bd244d7a4a1da37209e8c6174c9273628", "c2119a1bd244d7a4a1da37209e8c6174c9273628" ]
[ "read_and_train_6.py", "read_and_train_2.py" ]
[ "import csv\nimport cv2\nimport numpy as np\n\n# dataPath: folder path where all IMG's and driving_log's are stored\ndataPath = 'data'\ndriving_log_list = {'driving_log.csv':'IMG', 'driving_log2.csv':'IMG2'}\n\ncorrection = 0.5 # this is a parameter to tune\n\ndef get_image_from_sourcepath(source_path, folder):\n filename = source_path.split('/')[-1]\n current_path = './{}/{}/{}'.format(dataPath,folder,filename)\n image = cv2.imread(current_path)\n return image\n\n# filename: String path asociated with the specific csv file that contains the relation between images an values (driving_log).\n# local_lines : list of all rows in the csv file. Each row have information about the image paths and values as an inner list.\ndef read_lines_from_filename(filename):\n local_lines = []\n with open('./{}/{}'.format(dataPath, filename)) as csvfile:\n reader = csv.reader(csvfile)\n for line in reader:\n local_lines.append(line)\n return local_lines\n\n# images: global list that contains all the images used to train the model as the input\n# measurements: global list that contains all measurements used to train the model as the output\nimages = []\nmeasurements = []\n\n# lines: list that contains each row of the csv file\n# line: row that contains the image path for images, and also the steering and throttle values associated, as a list.\n# images: global array that contains all the images used to train the model as the input\n# measurements: global array that contains all measurements used to train the model as the output\n# correction: a parameter that needs to be tuned. It provides a correction in the scenario when the car sees the lane lines.\nprint('Reading from: ./{}/'.format(dataPath))\nfor (d_log, folder) in driving_log_list.items():\n print('Reading file: {}'.format(d_log))\n lines = read_lines_from_filename(d_log)\n\n for line in lines:\n steering_center = float(line[3])\n steering_left = steering_center + correction\n steering_right = steering_center - correction\n\n image_center = get_image_from_sourcepath(line[0], folder)\n image_left = get_image_from_sourcepath(line[1], folder)\n image_right = get_image_from_sourcepath(line[2], folder)\n \n images.extend([image_center, image_left, image_right])\n measurements.extend([steering_center, steering_left, steering_right])\n\naugmented_images, augmented_measurements = [], []\nfor image, measurement in zip(images, measurements):\n augmented_images.append(image)\n augmented_measurements.append(measurement)\n augmented_images.append(cv2.flip(image,1))\n augmented_measurements.append(measurement*-1.0)\n\nX_train = np.array(augmented_images)\nY_train = np.array(augmented_measurements)\n\nfrom keras.models import Sequential\nfrom keras.layers import Flatten, Dense, Lambda, Conv2D, MaxPool2D, Cropping2D\nfrom keras.layers.convolutional import Convolution2D\n\nmodel = Sequential()\nmodel.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=(160,320,3)))\nmodel.add(Cropping2D(cropping=((70,25),(0,0))))\nmodel.add(Conv2D(filters=6, kernel_size=(5, 5), activation='relu'))\nmodel.add(MaxPool2D())\nmodel.add(Conv2D(filters=6, kernel_size=(5, 5), activation='relu'))\nmodel.add(MaxPool2D())\nmodel.add(Flatten())\nmodel.add(Dense(120))\nmodel.add(Dense(84))\nmodel.add(Dense(1))\n\nmodel.compile(loss = 'mse', optimizer = 'adam')\nmodel.fit(X_train, Y_train, validation_split = 0.2, shuffle = True, nb_epoch=4)\n\nmodel.save('model.h5')", "import csv\nimport cv2\nimport numpy as np\n\n# dataPath: folder path where all IMG's and driving_log's are stored\ndataPath = 'train/'\n\nlines = []\nprint('Reading from: ./{}'.format(dataPath))\nwith open('./{}driving_log.csv'.format(dataPath)) as csvfile:\n reader = csv.reader(csvfile)\n for line in reader:\n lines.append(line)\n\n# images: global list that contains all the images used to train the model as the input\n# measurements: global list that contains all measurements used to train the model as the output\nimages = []\nmeasurements = []\n\n# lines: list that contains each row of the csv file\n# line: row that contains the image path for images, and also the steering and throttle values associated, as a list.\nfor line in lines:\n source_path = line[0]\n filename = source_path.split('/')[-1]\n current_path = './{}IMG/{}'.format(dataPath,filename)\n image = cv2.imread(current_path)\n images.append(image)\n measurement = float(line[3])\n measurements.append(measurement)\n\nX_train = np.array(images)\nY_train = np.array(measurements)\n\nfrom keras.models import Sequential\nfrom keras.layers import Flatten, Dense, Lambda\n\nmodel = Sequential()\nmodel.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=(160,320,3)))\nmodel.add(Flatten())\nmodel.add(Dense(1))\n\nmodel.compile(loss = 'mse', optimizer = 'adam')\nmodel.fit(X_train, Y_train, validation_split = 0.2, shuffle = True, nb_epoch=2)\n\nmodel.save('model.h5')" ]
[ [ "numpy.array" ], [ "numpy.array" ] ]
qcc4cp/qcc
[ "63227bbe36251b6f0bb3f78f2233337edcef547e" ]
[ "src/subset_sum.py" ]
[ "# python3\n\"\"\"Example: Number set partitioning such set sum(A) == sum(B).\"\"\"\n\n\n# Based on this paper:\n# https://cds.cern.ch/record/467590/files/0010018.pdf\n#\n# For a set A of integers, can A be partitioned into\n# two sets A1 and A2, such that:\n# sum(A1) == sum(A2)\n#\n# For this to work, sum(A) must not be odd.\n# We should reach 100% consistent results.\n\nimport random\nfrom typing import List\n\nfrom absl import app\nfrom absl import flags\nimport numpy as np\n\nfrom src.lib import helper\n\nflags.DEFINE_integer('nmax', 15, 'Maximum number')\nflags.DEFINE_integer('nnum', 6,\n 'Maximum number of set elements [1-nmax]')\nflags.DEFINE_integer('iterations', 20, 'Number of experiments')\n\n\ndef select_numbers(nmax: int, nnum: int) -> List[int]:\n \"\"\"Select nnum random, unique numbers in range 1 to nmax.\"\"\"\n\n while True:\n sample = random.sample(range(1, nmax), nnum)\n if sum(sample) % 2 == 0:\n return sample\n\n\ndef tensor_diag(n: int, num: int):\n \"\"\"Construct tensor product from diagonal matrices.\"\"\"\n\n def tensor_product(w1: float, w2: float, diag):\n return [j for i in zip([x * w1 for x in diag],\n [x * w2 for x in diag]) for j in i]\n\n diag = [1, -1] if num == 0 else [1, 1]\n for i in range(1, n):\n if i == num:\n diag = tensor_product(i, -i, diag)\n else:\n diag = tensor_product(1, 1, diag)\n return diag\n\n\ndef set_to_diagonal_h(num_list: List[int],\n nmax: int) -> np.ndarray:\n \"\"\"Construct diag(H).\"\"\"\n\n h = [0.0] * 2**nmax\n for num in num_list:\n diag = tensor_diag(nmax, num)\n for idx, val in enumerate(diag):\n h[idx] += val\n return h\n\n\ndef compute_partition(num_list: List[int]):\n \"\"\"Compute paritions that add up.\"\"\"\n\n solutions = []\n for bits in helper.bitprod(len(num_list)):\n iset = []\n oset = []\n for idx, val in enumerate(bits):\n (iset.append(num_list[idx]) if val == 0 else\n oset.append(num_list[idx]))\n if sum(iset) == sum(oset):\n solutions.append(bits)\n return solutions\n\n\ndef dump_solution(bits: List[int], num_list: List[int]):\n iset = []\n oset = []\n for idx, val in enumerate(bits):\n (iset.append(f'{num_list[idx]:d}') if val == 0 else\n oset.append(f'{num_list[idx]:d}'))\n return '+'.join(iset) + ' == ' + '+'.join(oset)\n\n\ndef run_experiment() -> None:\n \"\"\"Run an experiment, compute H, match against 0.\"\"\"\n\n nmax = flags.FLAGS.nmax\n num_list = select_numbers(nmax, flags.FLAGS.nnum)\n solutions = compute_partition(num_list)\n\n diag = set_to_diagonal_h(num_list, nmax)\n\n non_zero = np.count_nonzero(diag)\n if non_zero != 2**nmax:\n print('Solution should exist...', end='')\n if solutions:\n print(' Found Solution:',\n dump_solution(solutions[0], num_list))\n return True\n raise AssertionError('False positive found.')\n if solutions:\n raise AssertionError('False negative found.')\n return False\n\n\ndef main(argv):\n if len(argv) > 1:\n raise app.UsageError('Too many command-line arguments.')\n\n for i in range(flags.FLAGS.iterations):\n ret = run_experiment()\n\n\nif __name__ == '__main__':\n app.run(main)\n" ]
[ [ "numpy.count_nonzero" ] ]
wookayin/acme
[ "71b2ab8577a118c103718f034fa62c5ad2c0fd97" ]
[ "acme/agents/jax/ppo/networks.py" ]
[ "# Copyright 2018 DeepMind Technologies Limited. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"PPO network definitions.\"\"\"\n\nimport dataclasses\nfrom typing import Any, Callable, Optional, Sequence\n\nfrom acme import specs\nfrom acme.agents.jax import actor_core as actor_core_lib\nfrom acme.jax import networks as networks_lib\nfrom acme.jax import utils\n\nimport haiku as hk\nimport jax.numpy as jnp\nimport numpy as np\n\nEntropyFn = Callable[[Any], jnp.ndarray]\n\n\[email protected]\nclass PPONetworks:\n \"\"\"Network and pure functions for the PPO agent.\n\n If 'network' returns tfd.Distribution, you can use make_ppo_networks() to\n create this object properly.\n If one is building this object manually, one has a freedom to make 'network'\n object return anything that is later being passed as input to\n log_prob/entropy/sample functions to perform the corresponding computations.\n \"\"\"\n network: networks_lib.FeedForwardNetwork\n log_prob: networks_lib.LogProbFn\n entropy: EntropyFn\n sample: networks_lib.SampleFn\n sample_eval: Optional[networks_lib.SampleFn] = None\n\n\ndef make_inference_fn(\n ppo_networks: PPONetworks,\n evaluation: bool = False) -> actor_core_lib.FeedForwardPolicyWithExtra:\n \"\"\"Returns a function to be used for inference by a PPO actor.\"\"\"\n\n def inference(params: networks_lib.Params, key: networks_lib.PRNGKey,\n observations: networks_lib.Observation):\n distribution, _ = ppo_networks.network.apply(params, observations)\n if evaluation and ppo_networks.sample_eval:\n actions = ppo_networks.sample_eval(distribution, key)\n else:\n actions = ppo_networks.sample(distribution, key)\n if evaluation:\n return actions, {}\n log_prob = ppo_networks.log_prob(distribution, actions)\n return actions, {'log_prob': log_prob}\n\n return inference\n\n\ndef make_networks(\n spec: specs.EnvironmentSpec, hidden_layer_sizes: Sequence[int] = (256, 256)\n) -> PPONetworks:\n if isinstance(spec.actions, specs.DiscreteArray):\n return make_discrete_networks(spec, hidden_layer_sizes)\n else:\n return make_continuous_networks(\n spec,\n policy_layer_sizes=hidden_layer_sizes,\n value_layer_sizes=hidden_layer_sizes)\n\n\ndef make_ppo_networks(network: networks_lib.FeedForwardNetwork) -> PPONetworks:\n \"\"\"Constructs a PPONetworks instance from the given FeedForwardNetwork.\n\n Args:\n network: a transformed Haiku network that takes in observations and returns\n the action distribution and value.\n\n Returns:\n A PPONetworks instance with pure functions wrapping the input network.\n \"\"\"\n return PPONetworks(\n network=network,\n log_prob=lambda distribution, action: distribution.log_prob(action),\n entropy=lambda distribution: distribution.entropy(),\n sample=lambda distribution, key: distribution.sample(seed=key),\n sample_eval=lambda distribution, key: distribution.mode())\n\n\ndef make_discrete_networks(\n environment_spec: specs.EnvironmentSpec,\n hidden_layer_sizes: Sequence[int] = (512,),\n use_conv: bool = True,\n) -> PPONetworks:\n \"\"\"Creates networks used by the agent for discrete action environments.\n\n Args:\n environment_spec: Environment spec used to define number of actions.\n hidden_layer_sizes: Network definition.\n use_conv: Whether to use a conv or MLP feature extractor.\n Returns:\n PPONetworks\n \"\"\"\n\n num_actions = environment_spec.actions.num_values\n\n def forward_fn(inputs):\n layers = []\n if use_conv:\n layers.extend([networks_lib.AtariTorso()])\n layers.extend([\n hk.nets.MLP(hidden_layer_sizes, activate_final=True),\n networks_lib.CategoricalValueHead(num_values=num_actions)\n ])\n policy_value_network = hk.Sequential(layers)\n return policy_value_network(inputs)\n\n forward_fn = hk.without_apply_rng(hk.transform(forward_fn))\n dummy_obs = utils.zeros_like(environment_spec.observations)\n dummy_obs = utils.add_batch_dim(dummy_obs) # Dummy 'sequence' dim.\n network = networks_lib.FeedForwardNetwork(\n lambda rng: forward_fn.init(rng, dummy_obs), forward_fn.apply)\n # Create PPONetworks to add functionality required by the agent.\n return make_ppo_networks(network)\n\n\ndef make_continuous_networks(\n environment_spec: specs.EnvironmentSpec,\n policy_layer_sizes: Sequence[int] = (64, 64),\n value_layer_sizes: Sequence[int] = (64, 64),\n) -> PPONetworks:\n \"\"\"Creates PPONetworks to be used for continuous action environments.\"\"\"\n\n # Get total number of action dimensions from action spec.\n num_dimensions = np.prod(environment_spec.actions.shape, dtype=int)\n\n def forward_fn(inputs):\n policy_network = hk.Sequential([\n utils.batch_concat,\n hk.nets.MLP(policy_layer_sizes, activate_final=True),\n # Note: we don't respect bounded action specs here and instead\n # rely on CanonicalSpecWrapper to clip actions accordingly.\n networks_lib.MultivariateNormalDiagHead(num_dimensions)\n ])\n value_network = hk.Sequential([\n utils.batch_concat,\n hk.nets.MLP(value_layer_sizes, activate_final=True),\n hk.Linear(1),\n lambda x: jnp.squeeze(x, axis=-1)\n ])\n\n action_distribution = policy_network(inputs)\n value = value_network(inputs)\n return (action_distribution, value)\n\n # Transform into pure functions.\n forward_fn = hk.without_apply_rng(hk.transform(forward_fn))\n\n dummy_obs = utils.zeros_like(environment_spec.observations)\n dummy_obs = utils.add_batch_dim(dummy_obs) # Dummy 'sequence' dim.\n network = networks_lib.FeedForwardNetwork(\n lambda rng: forward_fn.init(rng, dummy_obs), forward_fn.apply)\n # Create PPONetworks to add functionality required by the agent.\n return make_ppo_networks(network)\n" ]
[ [ "numpy.prod" ] ]
Leajian/lpp-py
[ "299860a5d5f52189bb62e50cd4b3eda8aab01553" ]
[ "lpIO.py" ]
[ "import re\nimport json\nfrom numpy import array, squeeze\n\n\ndef sanityCheck(problem):\n hasNaturalConstraints = False\n keywordPattern = re.compile('max|min|s\\.?t\\.?|subject\\s*to|with|end', re.IGNORECASE)\n keywords = re.findall(keywordPattern, problem)\n\n if re.match('max|min', keywords[0], re.IGNORECASE):\n if len(keywords) >= 2 and re.match('s\\.?t\\.?|subject\\s*to', keywords[1], re.IGNORECASE):\n\n if len(keywords) == 4 and re.match('with', keywords[2], re.IGNORECASE):\n hasNaturalConstraints = True\n \n if not re.match('end', keywords[3], re.IGNORECASE):\n raise Exception('Expression \"end\" not found, include it after you end the problem\\'s description.')\n\n if len(keywords) == 3:\n print('WARNING! Expression \"with\" not found. Assuming all constraints are non-negative.')\n\n if not re.match('end', keywords[2], re.IGNORECASE):\n raise Exception('Expression \"end\" not found, include it after you end the problem\\'s description.')\n \n else:\n raise Exception('Expression \"s.t.\" or \"st\" or \"subject to\" not found, include it after you state the objective function.')\n else:\n raise Exception('Expression \"min\" or \"max\" not found, include it at the beginning of the problem\\'s description.')\n\n return hasNaturalConstraints\n\ndef openLP(fileName, hasNaturalConstraints=False):\n \"\"\"\n Description\n Opens the file which contains the linear problem description and splits\n it into segments so it can be parsed easier.\n Input\n string fileName The file's relative name.\n Output\n A list of containing segments of the problem, splitted on keywords.\n \"\"\"\n with open(fileName, 'r') as file:\n # Just note that read() function seeks until EOF,\n # so if it's called again, it has nothing.\n problem = file.read()\n\n # Simple sanity checks to avoid future problems and\n # also check if natural constraints are given.\n hasNaturalConstraints = sanityCheck(problem)\n\n # Cut the file into segments, from one keyword to another\n # (#1 max/min, #2 st, #optional with, #3 end).\n pattern = re.compile('s\\.?\\s*t\\.?|subject\\s*to|with|end', re.IGNORECASE)\n segmentedList = pattern.split(problem)\n\n # Unless 'with' natural constraints are given indeed,\n # we must return 3 parts.\n if hasNaturalConstraints:\n return segmentedList[:3], hasNaturalConstraints\n\n # Otherwise, we return only the first 2 parts,\n # but from 2 and beyond include the part with \"end\" delimiter,\n # which might contain nothing, a new line character or more than that\n # We don't care about content past the \"end\" delimiter.\n # Any other whitespace character is managed when necessary.\n # If there is any gibberish, the corresponding extractor function\n # is responsible to figure it out.\n return segmentedList[:2], hasNaturalConstraints\n\ndef writeLP2(MinMax, c, A, Eqin, b, naturalConstraints, inputFile, outputName=''):\n \"\"\"\n Description\n Writes the linear problem to a file in a presentable form.\n Input\n MinMax problem type\n c objective function's coefficients numpy.array\n A constraints' coefficients numpy.array\n Eqin constraints' types numpy.array\n b constraints' constants numpy.array\n naturalConstraints (optional) natural constraints' types numpy.array\n inputFile input file name\n outputName (optional) output file name\n Output\n A file which describes the problem in a presentable form.\n \"\"\"\n if outputName == '':\n outputName = '(LP-2) ' + inputFile\n with open(outputName, 'w+') as output:\n \"\"\"\n if MinMax == 1:\n output.write('max\\n')\n elif MinMax == -1:\n output.write('min\\n')\n \"\"\"\n output.write('MinMax = ' + str(MinMax) + '\\n\\n')\n output.write('c =\\n' + str(array(c)) + '\\n\\n') # 1 x n\n output.write('A =\\n' + str(array(A)) + '\\n\\n') # m x n\n output.write('Eqin =\\n' + str(array(Eqin).reshape(len(Eqin), 1)) + '\\n\\n') # m x 1\n output.write('b =\\n' + str(array(b).reshape(len(b), 1)) + '\\n\\n') # m x 1\n output.write('naturalConstraints =\\n' + str(squeeze(array(naturalConstraints).reshape(1, len(naturalConstraints))).tolist()) + '\\n\\n') # 1 x n\n\ndef writeLP2HumanReadable(MinMax, c, A, Eqin, b, naturalConstraints, inputFile, outputName=''):\n \"\"\"\n Description\n Writes the linear problem to a file in a human readable form.\n Input\n MinMax problem type\n c objective function's coefficients numpy.array\n A constraints' coefficients numpy.array\n Eqin constraints' types numpy.array\n b constraints' constants numpy.array\n naturalConstraints (optional) natural constraints' types numpy.array\n inputFile input file name\n outputName (optional) output file name\n Output\n A file which describes the problem in a human readable form.\n \"\"\"\n if outputName == '':\n outputName = '(LP-2) ' + inputFile\n with open(outputName, 'w+') as output:\n\n if MinMax == 1:\n output.write('max\\t')\n elif MinMax == -1:\n output.write('min\\t')\n\n # Enumarate each coefficient so we can name them\n for i, coeff in enumerate(c, start=1):\n # Ignore those with 0 coefficient\n if coeff == 0:\n output.write('\\t')\n continue\n\n # Put back the plus sign, unless it's the first term\n if str(coeff)[0] != '-' and i != 1:\n coeff = '+' + str(coeff)\n\n output.write(str(coeff) +'x' + str(i) + '\\t')\n output.write('\\n')\n\n output.write('s.t.')\n\n # For each row\n for i in zip(A, Eqin, b):\n output.write('\\t')\n\n # Enumarate each coefficient so we can name them\n for j, coeff in enumerate(i[0], start=1):\n # Ignore those with 0 coefficient\n if coeff == 0.0:\n output.write('\\t')\n continue\n\n # Put back the plus sign, unless it's the first term\n if str(coeff)[0] != '-' and j != 1:\n coeff = '+' + str(coeff)\n \n # Writting each term\n output.write(str(coeff) + 'x' + str(j) + '\\t')\n \n # Mapping the signs\n signs = {'0': '= ', '1':'>=', '-1':'<='}\n \n output.write(signs[str(squeeze(i[1]))] + ' ' + str(squeeze(i[2])) + '\\n')\n\n # Mapping the signs\n signs = {'0': 'free', '1':'>= 0', '-1':'<= 0'}\n for i, constr in enumerate(naturalConstraints, start=1):\n # Writting each constraint\n \n output.write('x' + str(i) + ' ' + signs[str(squeeze(constr))])\n if i != len(naturalConstraints):\n output.write(', ')\n output.write('\\n')\n\n\"\"\"\nJSON-related\n\"\"\"\n\ndef writeLP2json(MinMax, c, A, Eqin, b, naturalConstraints, inputFile, outputName=''):\n \"\"\"\n Description\n Writes the linear problem to a file in a serializable form.\n Input\n MinMax problem type\n c objective function's coefficients numpy.array\n A constraints' coefficients numpy.array\n Eqin constraints' types numpy.array\n b constraints' constants numpy.array\n naturalConstraints (optional) natural constraints' types numpy.array\n inputFile input file name\n outputName (optional) output file name\n Output\n A file which describes the problem in a serializable form.\n \"\"\"\n if outputName == '':\n outputName = '(LP-2) ' + inputFile + '.json'\n problem = {\n 'MinMax': MinMax,\n 'c': c.tolist(),\n 'A': A.tolist(),\n 'Eqin': Eqin.tolist(),\n 'b': b.tolist(),\n 'naturalConstraints': naturalConstraints\n }\n\n with open(outputName, 'w+') as output:\n json.dump(problem, output, indent=1)\n\ndef loadLP2json(inputFile):\n \"\"\"\n Description\n Returns a list of all information required for the for the linear problem.\n Input\n An LP-2 file name, which contains a problem parsed and saved by this parser in JSON format.\n Output\n In a list\n As floats\n int MinMax containing constraints' coefficients\n list c containing linear problem's coefficients array and its dimensions\n list A containing constraints' coefficients array and its dimensions\n list Eqin containing constraints' inequalities array and its dimensions\n list b containing constraints' constant parts array and its dimensions\n list naturalConstraints containing natural constraints\n \"\"\"\n with open(inputFile, 'r') as f:\n problem = json.load(f)\n\n MinMax = problem['MinMax']\n c = array(problem['c'])\n A = array(problem['A'])\n Eqin = array(problem['Eqin'])\n b = array(problem['b'])\n naturalConstraints = problem['naturalConstraints']\n \n return MinMax, c, A, Eqin, b, naturalConstraints\n" ]
[ [ "numpy.array", "numpy.squeeze" ] ]