repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
karannewatia/Mycelium
[ "c20deab29d97025d7623af4bbf97f79f3132b415" ]
[ "graph_scripts/identification_graph.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\n\nplt.rcParams['pdf.fonttype'] = 42\nplt.rcParams['ps.fonttype'] = 42\n\n\nmalice_vals = [0.005, 0.01, 0.02, 0.04]\nind = np.arange(4)\n\n\n#replace these with the data obtained from identification.py\nk2r1 = [0.0, 0.0, 0.0008000000000000229, 0.0015999999999999348]\nk2r2 = [0.0, 0.0, 0.0008000000000000229, 0.0033000000000004137]\nk2r3 = [0.0, 0.000200000000000089, 0.001200000000000201, 0.0049000000000002375]\nk3r1 = [0.0, 0.0, 0.0, 0.0]\nk3r2 = [0.0, 0.0, 0.0, 9.999999999987796e-05]\nk3r3 = [0.0, 0.0, 0.0, 0.000200000000000089]\n\nfont = {'size' : 17}\nplt.rc('font', **font)\nplt.gcf().subplots_adjust(bottom=0.15)\nplt.gcf().subplots_adjust(left=0.20)\n\nplt.plot(ind, k2r1, label = \"k=2,r=1\", marker=\"X\", markersize=10, linewidth=5)\nplt.plot(ind, k2r2, label = \"k=2,r=2\", marker=\"X\", markersize=10, linewidth=5)\nplt.plot(ind, k2r3, label = \"k=2,r=3\", marker=\"X\", markersize=10, linewidth=5)\n\nplt.plot(ind, k3r1, label = \"k=3,r=1\", marker=\"X\", markersize=10, linewidth=5)\nplt.plot(ind, k3r2, label = \"k=3,r=2\", marker=\"X\", markersize=10, linewidth=5)\nplt.plot(ind, k3r3, label = \"k=3,r=3\", marker=\"X\", markersize=10, linewidth=5)\n\nplt.xticks(ind, ('0.5', '1', '2', '4'))\nplt.xlabel('Malice rate (%)', fontsize='large')\nplt.ylabel('Probability of identification', fontsize='large')\nplt.legend()\nplt.savefig('../new_graphs/Identification.pdf', format='pdf')\n" ]
[ [ "matplotlib.pyplot.legend", "numpy.arange", "matplotlib.pyplot.rc", "matplotlib.pyplot.savefig", "matplotlib.pyplot.gcf", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.xticks", "matplotlib.pyplot.ylabel" ] ]
twobackfromtheend/carball
[ "6dcc3f7f0f2266cc3e0a3de24deaac2aec392b73" ]
[ "carball/analysis2/stats/demo_stats.py" ]
[ "from collections import Counter\nfrom typing import Dict, List\n\nimport numpy as np\nimport pandas as pd\n\nfrom api.analysis.stats_pb2 import PlayerStats\nfrom api.events.demo_pb2 import Demo\nfrom api.game.game_pb2 import Game\nfrom carball.analysis2.constants.constants import FIELD_Y_LIM, FIELD_X_LIM\n\n\ndef set_demo_stats(player_stats: Dict[str, PlayerStats],\n game: Game, demos: List[Demo],\n player_blue_data_frames: Dict[str, pd.DataFrame]):\n player_id_to_name: Dict[str, str] = {player.id.id: player.name for player in game.players}\n\n demo_counts = Counter()\n demoed_counts = Counter()\n demos_near_opponent_goal_counts = Counter()\n demoed_near_own_goal_counts = Counter()\n active_frames = list(player_blue_data_frames.values())[0].index\n\n for demo in demos:\n frame_number = demo.frame_number\n if frame_number not in active_frames:\n continue\n attacker_id = demo.attacker_id.id\n victim_id = demo.victim_id.id\n\n demo_counts[attacker_id] += 1\n demoed_counts[victim_id] += 1\n\n victim_blue_df = player_blue_data_frames[victim_id]\n\n victim_name = player_id_to_name[victim_id]\n victim_position_at_demo = victim_blue_df.loc[frame_number - 1, (victim_name, ['pos_x', 'pos_y'])].values\n BLUE_GOAL_POSITION = np.array([0, -FIELD_Y_LIM])\n\n victim_distance_from_goal = ((victim_position_at_demo - BLUE_GOAL_POSITION) ** 2).sum() ** 0.5\n if victim_distance_from_goal < FIELD_X_LIM / 2:\n demos_near_opponent_goal_counts[attacker_id] += 1\n demoed_near_own_goal_counts[victim_id] += 1\n\n for player_id, _player_stats in player_stats.items():\n _player_stats.demos = demo_counts[player_id]\n _player_stats.demoed = demoed_counts[player_id]\n _player_stats.demos_near_opponent_goal = demos_near_opponent_goal_counts[player_id]\n _player_stats.demoed_near_own_goal = demoed_near_own_goal_counts[player_id]\n" ]
[ [ "numpy.array" ] ]
AndrewArnett/lambdata
[ "fe7e2694a0a099f9df88807f744556c230e9f18d" ]
[ "lambdata_andrewarnett/__init__.py" ]
[ "\"\"\"\nlambdata - a collection of Data Science helper functions\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\n\nfrom lambdata_andrewarnett.dataframe_helper import shape_head, baseline\n\nTEST = pd.DataFrame(np.ones(10))\n" ]
[ [ "numpy.ones" ] ]
LiTszOn/GraphSAGE
[ "dbeb50d52e8d242b3c4ad3e4264c168a2c406e70" ]
[ "graphsage/unsupervised_train.py" ]
[ "from __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport time\nimport tensorflow as tf\nimport numpy as np\n\nfrom graphsage.models import SampleAndAggregate, SAGEInfo, Node2VecModel\nfrom graphsage.minibatch import EdgeMinibatchIterator\nfrom graphsage.neigh_samplers import UniformNeighborSampler\nfrom graphsage.utils import load_data\n\nos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\"\n\n# Set random seed\nseed = 123\nnp.random.seed(seed)\ntf.set_random_seed(seed)\n\n# Settings\nflags = tf.app.flags\nFLAGS = flags.FLAGS\n\ntf.app.flags.DEFINE_boolean('log_device_placement', False,\n \"\"\"Whether to log device placement.\"\"\")\n#core params..\nflags.DEFINE_string('model', 'graphsage', 'model names. See README for possible values.') \nflags.DEFINE_float('learning_rate', 0.00001, 'initial learning rate.')\nflags.DEFINE_string(\"model_size\", \"small\", \"Can be big or small; model specific def'ns\")\nflags.DEFINE_string('train_prefix', '', 'name of the object file that stores the training data. must be specified.')\n\n# left to default values in main experiments \nflags.DEFINE_integer('epochs', 1, 'number of epochs to train.')\nflags.DEFINE_float('dropout', 0.0, 'dropout rate (1 - keep probability).')\nflags.DEFINE_float('weight_decay', 0.0, 'weight for l2 loss on embedding matrix.')\nflags.DEFINE_integer('max_degree', 100, 'maximum node degree.')\nflags.DEFINE_integer('samples_1', 25, 'number of samples in layer 1')\nflags.DEFINE_integer('samples_2', 10, 'number of users samples in layer 2')\nflags.DEFINE_integer('dim_1', 128, 'Size of output dim (final is 2x this, if using concat)')\nflags.DEFINE_integer('dim_2', 128, 'Size of output dim (final is 2x this, if using concat)')\nflags.DEFINE_boolean('random_context', True, 'Whether to use random context or direct edges')\nflags.DEFINE_integer('neg_sample_size', 20, 'number of negative samples')\nflags.DEFINE_integer('batch_size', 512, 'minibatch size.')\nflags.DEFINE_integer('n2v_test_epochs', 1, 'Number of new SGD epochs for n2v.')\nflags.DEFINE_integer('identity_dim', 0, 'Set to positive value to use identity embedding features of that dimension. Default 0.')\n\n#logging, saving, validation settings etc.\nflags.DEFINE_boolean('save_embeddings', True, 'whether to save embeddings for all nodes after training')\nflags.DEFINE_string('base_log_dir', '.', 'base directory for logging and saving embeddings')\nflags.DEFINE_integer('validate_iter', 5000, \"how often to run a validation minibatch.\")\nflags.DEFINE_integer('validate_batch_size', 256, \"how many nodes per validation sample.\")\nflags.DEFINE_integer('gpu', 1, \"which gpu to use.\")\nflags.DEFINE_integer('print_every', 50, \"How often to print training info.\")\nflags.DEFINE_integer('max_total_steps', 10**10, \"Maximum total number of iterations\")\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=str(FLAGS.gpu)\n\nGPU_MEM_FRACTION = 0.8\n\ndef log_dir():\n log_dir = FLAGS.base_log_dir + \"/unsup-\" + FLAGS.train_prefix.split(\"/\")[-2]\n log_dir += \"/{model:s}_{model_size:s}_{lr:0.6f}/\".format(\n model=FLAGS.model,\n model_size=FLAGS.model_size,\n lr=FLAGS.learning_rate)\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n return log_dir\n\n# Define model evaluation function\ndef evaluate(sess, model, minibatch_iter, size=None):\n t_test = time.time()\n feed_dict_val = minibatch_iter.val_feed_dict(size)\n outs_val = sess.run([model.loss, model.ranks, model.mrr], \n feed_dict=feed_dict_val)\n return outs_val[0], outs_val[1], outs_val[2], (time.time() - t_test)\n\ndef incremental_evaluate(sess, model, minibatch_iter, size):\n t_test = time.time()\n finished = False\n val_losses = []\n val_mrrs = []\n iter_num = 0\n while not finished:\n feed_dict_val, finished, _ = minibatch_iter.incremental_val_feed_dict(size, iter_num)\n iter_num += 1\n outs_val = sess.run([model.loss, model.ranks, model.mrr], \n feed_dict=feed_dict_val)\n val_losses.append(outs_val[0])\n val_mrrs.append(outs_val[2])\n return np.mean(val_losses), np.mean(val_mrrs), (time.time() - t_test)\n\ndef save_val_embeddings(sess, model, minibatch_iter, size, out_dir, mod=\"\"):\n val_embeddings = []\n finished = False\n seen = set([])\n nodes = []\n iter_num = 0\n name = \"val\"\n while not finished:\n feed_dict_val, finished, edges = minibatch_iter.incremental_embed_feed_dict(size, iter_num)\n iter_num += 1\n outs_val = sess.run([model.loss, model.mrr, model.outputs1], \n feed_dict=feed_dict_val)\n #ONLY SAVE FOR embeds1 because of planetoid\n for i, edge in enumerate(edges):\n if not edge[0] in seen:\n val_embeddings.append(outs_val[-1][i,:])\n nodes.append(edge[0])\n seen.add(edge[0])\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n val_embeddings = np.vstack(val_embeddings)\n np.save(out_dir + name + mod + \".npy\", val_embeddings)\n with open(out_dir + name + mod + \".txt\", \"w\") as fp:\n fp.write(\"\\n\".join(map(str,nodes)))\n\ndef construct_placeholders():\n # Define placeholders\n placeholders = {\n 'batch1' : tf.placeholder(tf.int32, shape=(None), name='batch1'),\n 'batch2' : tf.placeholder(tf.int32, shape=(None), name='batch2'),\n # negative samples for all nodes in the batch\n 'neg_samples': tf.placeholder(tf.int32, shape=(None,),\n name='neg_sample_size'),\n 'dropout': tf.placeholder_with_default(0., shape=(), name='dropout'),\n 'batch_size' : tf.placeholder(tf.int32, name='batch_size'),\n }\n return placeholders\n\ndef train(train_data, test_data=None):\n G = train_data[0]\n features = train_data[1]\n id_map = train_data[2]\n\n print(\"G: \" + str(G))\n print(\"features: \" + str(features))\n print(\"id_map: \" + str(id_map))\n if not features is None:\n # pad with dummy zero vector\n features = np.vstack([features, np.zeros((features.shape[1],))])\n\n context_pairs = train_data[3] if FLAGS.random_context else None\n placeholders = construct_placeholders() #returns a dictionary of placeholder\n minibatch = EdgeMinibatchIterator(G, #produce a bunch of minibatch\n id_map,\n placeholders, batch_size=FLAGS.batch_size,\n max_degree=FLAGS.max_degree, \n num_neg_samples=FLAGS.neg_sample_size,\n context_pairs = context_pairs) #a useful object\n adj_info_ph = tf.placeholder(tf.int32, shape=minibatch.adj.shape)\n adj_info = tf.Variable(adj_info_ph, trainable=False, name=\"adj_info\")\n # with tf.Session() as sess:\n # sess.run(tf.global_variables_initializer())\n # sess.run(tf.local_variables_initializer())\n # print(\"adj_info: \" + str(sess.run(adj_info)))\n if FLAGS.model == 'graphsage_mean':\n # Create model\n sampler = UniformNeighborSampler(adj_info)#to wrap the lookup function\n layer_infos = [SAGEInfo(\"node\", sampler, FLAGS.samples_1, FLAGS.dim_1),\n SAGEInfo(\"node\", sampler, FLAGS.samples_2, FLAGS.dim_2)]\n\n model = SampleAndAggregate(placeholders, \n features,\n adj_info,\n minibatch.deg,\n layer_infos=layer_infos, \n model_size=FLAGS.model_size,\n identity_dim = FLAGS.identity_dim,\n logging=True) #set training parameters and define loss function etc\n elif FLAGS.model == 'gcn':\n # Create model\n sampler = UniformNeighborSampler(adj_info)\n layer_infos = [SAGEInfo(\"node\", sampler, FLAGS.samples_1, 2*FLAGS.dim_1),\n SAGEInfo(\"node\", sampler, FLAGS.samples_2, 2*FLAGS.dim_2)]\n\n model = SampleAndAggregate(placeholders, \n features,\n adj_info,\n minibatch.deg,\n layer_infos=layer_infos, \n aggregator_type=\"gcn\",\n model_size=FLAGS.model_size,\n identity_dim = FLAGS.identity_dim,\n concat=False,\n logging=True)\n\n elif FLAGS.model == 'graphsage_seq':\n sampler = UniformNeighborSampler(adj_info)\n layer_infos = [SAGEInfo(\"node\", sampler, FLAGS.samples_1, FLAGS.dim_1),\n SAGEInfo(\"node\", sampler, FLAGS.samples_2, FLAGS.dim_2)]\n\n model = SampleAndAggregate(placeholders, \n features,\n adj_info,\n minibatch.deg,\n layer_infos=layer_infos, \n identity_dim = FLAGS.identity_dim,\n aggregator_type=\"seq\",\n model_size=FLAGS.model_size,\n logging=True)\n\n elif FLAGS.model == 'graphsage_maxpool':\n sampler = UniformNeighborSampler(adj_info)\n layer_infos = [SAGEInfo(\"node\", sampler, FLAGS.samples_1, FLAGS.dim_1),\n SAGEInfo(\"node\", sampler, FLAGS.samples_2, FLAGS.dim_2)]\n\n model = SampleAndAggregate(placeholders, \n features,\n adj_info,\n minibatch.deg,\n layer_infos=layer_infos, \n aggregator_type=\"maxpool\",\n model_size=FLAGS.model_size,\n identity_dim = FLAGS.identity_dim,\n logging=True)\n elif FLAGS.model == 'graphsage_meanpool':\n sampler = UniformNeighborSampler(adj_info)\n layer_infos = [SAGEInfo(\"node\", sampler, FLAGS.samples_1, FLAGS.dim_1),\n SAGEInfo(\"node\", sampler, FLAGS.samples_2, FLAGS.dim_2)]\n\n model = SampleAndAggregate(placeholders, \n features,\n adj_info,\n minibatch.deg,\n layer_infos=layer_infos, \n aggregator_type=\"meanpool\",\n model_size=FLAGS.model_size,\n identity_dim = FLAGS.identity_dim,\n logging=True)\n\n elif FLAGS.model == 'n2v':\n model = Node2VecModel(placeholders, features.shape[0],\n minibatch.deg,\n #2x because graphsage uses concat\n nodevec_dim=2*FLAGS.dim_1,\n lr=FLAGS.learning_rate)\n else:\n raise Exception('Error: model name unrecognized.')\n\n config = tf.ConfigProto(log_device_placement=FLAGS.log_device_placement)\n config.gpu_options.allow_growth = True\n #config.gpu_options.per_process_gpu_memory_fraction = GPU_MEM_FRACTION\n config.allow_soft_placement = True\n \n # Initialize session\n sess = tf.Session(config=config)\n merged = tf.summary.merge_all()\n summary_writer = tf.summary.FileWriter(log_dir(), sess.graph)\n \n # Init variables\n sess.run(tf.global_variables_initializer(), feed_dict={adj_info_ph: minibatch.adj})\n \n # Train model\n \n train_shadow_mrr = None\n shadow_mrr = None\n\n total_steps = 0\n avg_time = 0.0\n epoch_val_costs = []\n\n train_adj_info = tf.assign(adj_info, minibatch.adj)\n val_adj_info = tf.assign(adj_info, minibatch.test_adj)\n for epoch in range(FLAGS.epochs): \n minibatch.shuffle() \n\n iter = 0\n print('Epoch: %04d' % (epoch + 1))\n epoch_val_costs.append(0)\n while not minibatch.end():\n # Construct feed dictionary\n feed_dict = minibatch.next_minibatch_feed_dict()\n feed_dict.update({placeholders['dropout']: FLAGS.dropout})\n\n t = time.time()\n # Training step\n outs = sess.run([merged, model.opt_op, model.loss, model.ranks, model.aff_all, \n model.mrr, model.outputs1], feed_dict=feed_dict)\n train_cost = outs[2]\n train_mrr = outs[5]\n if train_shadow_mrr is None:\n train_shadow_mrr = train_mrr#\n else:\n train_shadow_mrr -= (1-0.99) * (train_shadow_mrr - train_mrr)\n\n if iter % FLAGS.validate_iter == 0:\n # Validation\n sess.run(val_adj_info.op)\n val_cost, ranks, val_mrr, duration = evaluate(sess, model, minibatch, size=FLAGS.validate_batch_size)\n sess.run(train_adj_info.op)\n epoch_val_costs[-1] += val_cost\n if shadow_mrr is None:\n shadow_mrr = val_mrr\n else:\n shadow_mrr -= (1-0.99) * (shadow_mrr - val_mrr)\n\n if total_steps % FLAGS.print_every == 0:\n summary_writer.add_summary(outs[0], total_steps)\n \n # Print results\n avg_time = (avg_time * total_steps + time.time() - t) / (total_steps + 1)\n\n if total_steps % FLAGS.print_every == 0:\n print(\"Iter:\", '%04d' % iter, \n \"train_loss=\", \"{:.5f}\".format(train_cost),\n \"train_mrr=\", \"{:.5f}\".format(train_mrr),#Mean reciprocal rank\n \"train_mrr_ema=\", \"{:.5f}\".format(train_shadow_mrr), # exponential moving average\n \"val_loss=\", \"{:.5f}\".format(val_cost),\n \"val_mrr=\", \"{:.5f}\".format(val_mrr), \n \"val_mrr_ema=\", \"{:.5f}\".format(shadow_mrr), # exponential moving average\n \"time=\", \"{:.5f}\".format(avg_time))\n\n iter += 1\n total_steps += 1\n\n if total_steps > FLAGS.max_total_steps:\n break\n\n if total_steps > FLAGS.max_total_steps:\n break\n \n print(\"Optimization Finished!\")\n if FLAGS.save_embeddings:\n sess.run(val_adj_info.op)\n\n save_val_embeddings(sess, model, minibatch, FLAGS.validate_batch_size, log_dir())\n\n if FLAGS.model == \"n2v\":\n # stopping the gradient for the already trained nodes\n train_ids = tf.constant([[id_map[n]] for n in G.nodes_iter() if not G.node[n]['val'] and not G.node[n]['test']],\n dtype=tf.int32)\n test_ids = tf.constant([[id_map[n]] for n in G.nodes_iter() if G.node[n]['val'] or G.node[n]['test']], \n dtype=tf.int32)\n update_nodes = tf.nn.embedding_lookup(model.context_embeds, tf.squeeze(test_ids))\n no_update_nodes = tf.nn.embedding_lookup(model.context_embeds,tf.squeeze(train_ids))\n update_nodes = tf.scatter_nd(test_ids, update_nodes, tf.shape(model.context_embeds))\n no_update_nodes = tf.stop_gradient(tf.scatter_nd(train_ids, no_update_nodes, tf.shape(model.context_embeds)))\n model.context_embeds = update_nodes + no_update_nodes\n sess.run(model.context_embeds)\n\n # run random walks\n from graphsage.utils import run_random_walks\n nodes = [n for n in G.nodes_iter() if G.node[n][\"val\"] or G.node[n][\"test\"]]\n start_time = time.time()\n pairs = run_random_walks(G, nodes, num_walks=50)\n walk_time = time.time() - start_time\n\n test_minibatch = EdgeMinibatchIterator(G, \n id_map,\n placeholders, batch_size=FLAGS.batch_size,\n max_degree=FLAGS.max_degree, \n num_neg_samples=FLAGS.neg_sample_size,\n context_pairs = pairs,\n n2v_retrain=True,\n fixed_n2v=True)\n \n start_time = time.time()\n print(\"Doing test training for n2v.\")\n test_steps = 0\n for epoch in range(FLAGS.n2v_test_epochs):\n test_minibatch.shuffle()\n while not test_minibatch.end():\n feed_dict = test_minibatch.next_minibatch_feed_dict()\n feed_dict.update({placeholders['dropout']: FLAGS.dropout})\n outs = sess.run([model.opt_op, model.loss, model.ranks, model.aff_all, \n model.mrr, model.outputs1], feed_dict=feed_dict)\n if test_steps % FLAGS.print_every == 0:\n print(\"Iter:\", '%04d' % test_steps, \n \"train_loss=\", \"{:.5f}\".format(outs[1]),\n \"train_mrr=\", \"{:.5f}\".format(outs[-2]))\n test_steps += 1\n train_time = time.time() - start_time\n save_val_embeddings(sess, model, minibatch, FLAGS.validate_batch_size, log_dir(), mod=\"-test\")\n print(\"Total time: \", train_time+walk_time)\n print(\"Walk time: \", walk_time)\n print(\"Train time: \", train_time)\n\n \n\ndef main(argv=None):\n print(\"Loading training data..\")\n train_data = load_data(FLAGS.train_prefix, load_walks=True) # for processing Redit's data (Reddit's data is a bit wired)\n print(\"Done loading training data..\")\n train(train_data)\n\nif __name__ == '__main__':\n tf.app.run()\n" ]
[ [ "numpy.random.seed", "tensorflow.Variable", "tensorflow.shape", "tensorflow.placeholder_with_default", "tensorflow.assign", "tensorflow.placeholder", "numpy.save", "tensorflow.squeeze", "tensorflow.ConfigProto", "tensorflow.global_variables_initializer", "tensorflow.summary.merge_all", "numpy.mean", "tensorflow.Session", "tensorflow.set_random_seed", "tensorflow.app.flags.DEFINE_boolean", "numpy.zeros", "numpy.vstack", "tensorflow.app.run" ] ]
Senwang98/Lightweight-Detection-and-KD
[ "7d6a4c02d922d4ed0920c9108f1f06dd63c5e90b" ]
[ "mmdet/distillation/distillers/csd_distiller.py" ]
[ "import torch.nn as nn\nimport torch.nn.functional as F\nimport torch\nfrom mmdet.models.detectors.base import BaseDetector\nfrom mmdet.models import build_detector\nfrom mmcv.runner import load_checkpoint, _load_checkpoint, load_state_dict\nfrom ..builder import DISTILLER, build_distill_loss\nfrom collections import OrderedDict\n\n\[email protected]_module()\nclass CSD_DetectionDistiller(BaseDetector):\n \"\"\"Base distiller for detectors.\n\n It typically consists of teacher_model and student_model.\n \"\"\"\n\n def __init__(self,\n teacher_cfg,\n student_cfg,\n distill_cfg=None,\n teacher_pretrained=None,\n init_student=False):\n\n super(CSD_DetectionDistiller, self).__init__()\n\n self.teacher = build_detector(teacher_cfg.model,\n train_cfg=teacher_cfg.get('train_cfg'),\n test_cfg=teacher_cfg.get('test_cfg'))\n self.init_weights_teacher(teacher_pretrained)\n self.teacher.eval()\n\n self.student = build_detector(student_cfg.model,\n train_cfg=student_cfg.get('train_cfg'),\n test_cfg=student_cfg.get('test_cfg'))\n # inheriting strategy\n if init_student:\n t_checkpoint = _load_checkpoint(teacher_pretrained)\n all_name = []\n for name, v in t_checkpoint[\"state_dict\"].items():\n if name.startswith(\"backbone.\"):\n continue\n else:\n all_name.append((name, v))\n\n state_dict = OrderedDict(all_name)\n load_state_dict(self.student, state_dict)\n\n self.distill_losses = nn.ModuleDict()\n self.distill_cfg = distill_cfg\n for item_loc in distill_cfg:\n for item_loss in item_loc.methods:\n loss_name = item_loss.name\n self.distill_losses[loss_name] = build_distill_loss(item_loss)\n\n def base_parameters(self):\n return nn.ModuleList([self.student, self.distill_losses])\n\n @property\n def with_neck(self):\n \"\"\"bool: whether the detector has a neck\"\"\"\n return hasattr(self.student, 'neck') and self.student.neck is not None\n\n @property\n def with_shared_head(self):\n \"\"\"bool: whether the detector has a shared head in the RoI Head\"\"\"\n return hasattr(self.student, 'roi_head') and self.student.roi_head.with_shared_head\n\n @property\n def with_bbox(self):\n \"\"\"bool: whether the detector has a bbox head\"\"\"\n return ((hasattr(self.student, 'roi_head') and self.student.roi_head.with_bbox)\n or (hasattr(self.student, 'bbox_head') and self.student.bbox_head is not None))\n\n @property\n def with_mask(self):\n \"\"\"bool: whether the detector has a mask head\"\"\"\n return ((hasattr(self.student, 'roi_head') and self.student.roi_head.with_mask)\n or (hasattr(self.student, 'mask_head') and self.student.mask_head is not None))\n\n def init_weights_teacher(self, path=None):\n \"\"\"Load the pretrained model in teacher detector.\n\n Args:\n pretrained (str, optional): Path to pre-trained weights.\n Defaults to None.\n \"\"\"\n checkpoint = load_checkpoint(self.teacher, path, map_location='cpu')\n\n def forward_train(self, img, img_metas, **kwargs):\n \"\"\"\n Args:\n img (Tensor): Input images of shape (N, C, H, W).\n Typically these should be mean centered and std scaled.\n img_metas (list[dict]): A List of image info dict where each dict\n has: 'img_shape', 'scale_factor', 'flip', and may also contain\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n For details on the values of these keys see\n :class:`mmdet.datasets.pipelines.Collect`.\n\n Returns:\n dict[str, Tensor]: A dictionary of loss components(student's losses and distiller's losses).\n \"\"\"\n\n with torch.no_grad():\n self.teacher.eval()\n fea_t = self.teacher.extract_feat(img)\n\n student_feat = self.student.extract_feat(img)\n student_loss = self.student.bbox_head.forward_train(\n student_feat, img_metas, **kwargs)\n\n for i in range(len(student_feat)):\n loss_name = 'loss_csd_fpn_'+str(i)\n student_loss[loss_name] = self.distill_losses[loss_name](\n student_feat[i], fea_t[i].detach(), kwargs['gt_bboxes'], img_metas)\n\n return student_loss\n\n def simple_test(self, img, img_metas, **kwargs):\n return self.student.simple_test(img, img_metas, **kwargs)\n\n def aug_test(self, imgs, img_metas, **kwargs):\n return self.student.aug_test(imgs, img_metas, **kwargs)\n\n def extract_feat(self, imgs):\n \"\"\"Extract features from images.\"\"\"\n return self.student.extract_feat(imgs)\n" ]
[ [ "torch.nn.ModuleDict", "torch.no_grad", "torch.nn.ModuleList" ] ]
itcthienkhiem/myANPR
[ "e0a76b2165d539c6a38f51f7485f37349a85a074" ]
[ "ANPR.py" ]
[ "\r\ntry:\r\n import cv2\r\nexcept ImportError:\r\n print (\"You must have OpenCV installed\")\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\n#Image(filename='../../../data/ANPR/sample_plates.png')\r\n\r\ndef showfig(image, ucmap):\r\n #There is a difference in pixel ordering in OpenCV and Matplotlib.\r\n #OpenCV follows BGR order, while matplotlib follows RGB order.\r\n if len(image.shape)==3 :\r\n b,g,r = cv2.split(image) # get b,g,r\r\n image = cv2.merge([r,g,b]) # switch it to rgb\r\n imgplot=plt.imshow(image, ucmap)\r\n imgplot.axes.get_xaxis().set_visible(False)\r\n imgplot.axes.get_yaxis().set_visible(False)\r\n plt.show()\r\n\r\n\r\n\r\nplt.rcParams['figure.figsize'] = 10, 10\r\nplt.title('Sample Car')\r\nimage_path=\"out.jpg\"\r\ncarsample=cv2.imread(image_path)\r\nshowfig(carsample,None)\r\n\r\nplt.rcParams['figure.figsize'] = 7,7\r\n\r\n# convert into grayscale\r\ngray_carsample=cv2.cvtColor(carsample, cv2.COLOR_BGR2GRAY)\r\nshowfig(gray_carsample, plt.get_cmap('gray'))\r\n# blur the image\r\nblur=cv2.GaussianBlur(gray_carsample,(5,5),0)\r\nshowfig(blur, plt.get_cmap('gray'))\r\n# find the sobel gradient. use the kernel size to be 3\r\nsobelx=cv2.Sobel(blur, cv2.CV_8U, 1, 0, ksize=3)\r\nshowfig(sobelx, plt.get_cmap('gray'))\r\n#Otsu thresholding\r\n_,th2=cv2.threshold(sobelx, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)\r\nshowfig(th2, plt.get_cmap('gray'))\r\n#Morphological Closing\r\nse=cv2.getStructuringElement(cv2.MORPH_RECT,(23,2))\r\nclosing=cv2.morphologyEx(th2, cv2.MORPH_CLOSE, se)\r\nshowfig(closing, plt.get_cmap('gray'))\r\n_,contours,_=cv2.findContours(closing, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\r\nfor cnt in contours:\r\n rect=cv2.minAreaRect(cnt)\r\n box=cv2.boxPoints(rect)\r\n box=np.int0(box)\r\n cv2.drawContours(carsample, [box], 0, (0,255,0),2)\r\nshowfig(carsample, None)\r\n\r\ndef validate(cnt):\r\n rect=cv2.minAreaRect(cnt)\r\n box=cv2.boxPoints(rect)\r\n box=np.int0(box)\r\n output=False\r\n width=rect[1][0]\r\n height=rect[1][1]\r\n if ((width!=0) & (height!=0)):\r\n if (((height/width>2) & (height>width)) | ((width/height>2) & (width>height))):\r\n if((height*width<16000) & (height*width>1000)):\r\n output=True\r\n\r\n return output\r\n\r\n#Lets draw validated contours with red.\r\nfor cnt in contours:\r\n if validate(cnt):\r\n rect=cv2.minAreaRect(cnt)\r\n box=cv2.boxPoints(rect)\r\n box=np.int0(box)\r\n cv2.drawContours(carsample, [box], 0, (0,0,255),2)\r\nshowfig(carsample, None)\r\n" ]
[ [ "numpy.int0", "matplotlib.pyplot.imshow", "matplotlib.pyplot.title", "matplotlib.pyplot.get_cmap", "matplotlib.pyplot.show" ] ]
ankitshah009/youtube-8m-1
[ "a0f28c9ca05b72ca709322f2c4871a4345a69fbb" ]
[ "readers.py" ]
[ "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Provides readers configured for different datasets.\"\"\"\n\nimport tensorflow as tf\n\ntry:\n # relative imports on gcloud (as a module)\n from . import utils\nexcept ImportError:\n # relative imports locally (as a script) \n import utils\n\nfrom tensorflow import logging\ndef resize_axis(tensor, axis, new_size, fill_value=0):\n \"\"\"Truncates or pads a tensor to new_size on on a given axis.\n\n Truncate or extend tensor such that tensor.shape[axis] == new_size. If the\n size increases, the padding will be performed at the end, using fill_value.\n\n Args:\n tensor: The tensor to be resized.\n axis: An integer representing the dimension to be sliced.\n new_size: An integer or 0d tensor representing the new value for\n tensor.shape[axis].\n fill_value: Value to use to fill any new entries in the tensor. Will be\n cast to the type of tensor.\n\n Returns:\n The resized tensor.\n \"\"\"\n tensor = tf.convert_to_tensor(tensor)\n shape = tf.unstack(tf.shape(tensor))\n\n pad_shape = shape[:]\n pad_shape[axis] = tf.maximum(0, new_size - shape[axis])\n\n shape[axis] = tf.minimum(shape[axis], new_size)\n shape = tf.stack(shape)\n\n resized = tf.concat([\n tf.slice(tensor, tf.zeros_like(shape), shape),\n tf.fill(tf.stack(pad_shape), tf.cast(fill_value, tensor.dtype))\n ], axis)\n\n # Update shape.\n new_shape = tensor.get_shape().as_list() # A copy is being made.\n new_shape[axis] = new_size\n resized.set_shape(new_shape)\n return resized\n\nclass BaseReader(object):\n \"\"\"Inherit from this class when implementing new readers.\"\"\"\n\n def prepare_reader(self, unused_filename_queue):\n \"\"\"Create a thread for generating prediction and label tensors.\"\"\"\n raise NotImplementedError()\n\n\nclass YT8MAggregatedFeatureReader(BaseReader):\n \"\"\"Reads TFRecords of pre-aggregated Examples.\n\n The TFRecords must contain Examples with a sparse int64 'labels' feature and\n a fixed length float32 feature, obtained from the features in 'feature_name'.\n The float features are assumed to be an average of dequantized values.\n \"\"\"\n\n def __init__(self,\n num_classes=3862,\n feature_sizes=[1024, 128],\n feature_names=[\"mean_rgb\", \"mean_audio\"]):\n \"\"\"Construct a YT8MAggregatedFeatureReader.\n\n Args:\n num_classes: a positive integer for the number of classes.\n feature_sizes: positive integer(s) for the feature dimensions as a list.\n feature_names: the feature name(s) in the tensorflow record as a list.\n \"\"\"\n\n assert len(feature_names) == len(feature_sizes), \\\n \"length of feature_names (={}) != length of feature_sizes (={})\".format( \\\n len(feature_names), len(feature_sizes))\n\n self.num_classes = num_classes\n self.feature_sizes = feature_sizes\n self.feature_names = feature_names\n\n def prepare_reader(self, filename_queue, batch_size=1024):\n \"\"\"Creates a single reader thread for pre-aggregated YouTube 8M Examples.\n\n Args:\n filename_queue: A tensorflow queue of filename locations.\n\n Returns:\n A tuple of video indexes, features, labels, and padding data.\n \"\"\"\n reader = tf.TFRecordReader()\n _, serialized_examples = reader.read_up_to(filename_queue, batch_size)\n\n tf.add_to_collection(\"serialized_examples\", serialized_examples)\n return self.prepare_serialized_examples(serialized_examples)\n\n def prepare_serialized_examples(self, serialized_examples):\n # set the mapping from the fields to data types in the proto\n num_features = len(self.feature_names)\n assert num_features > 0, \"self.feature_names is empty!\"\n assert len(self.feature_names) == len(self.feature_sizes), \\\n \"length of feature_names (={}) != length of feature_sizes (={})\".format( \\\n len(self.feature_names), len(self.feature_sizes))\n\n feature_map = {\"id\": tf.FixedLenFeature([], tf.string),\n \"labels\": tf.VarLenFeature(tf.int64)}\n for feature_index in range(num_features):\n feature_map[self.feature_names[feature_index]] = tf.FixedLenFeature(\n [self.feature_sizes[feature_index]], tf.float32)\n\n features = tf.parse_example(serialized_examples, features=feature_map)\n labels = tf.sparse_to_indicator(features[\"labels\"], self.num_classes)\n labels.set_shape([None, self.num_classes])\n concatenated_features = tf.concat([\n features[feature_name] for feature_name in self.feature_names], 1)\n\n return features[\"id\"], concatenated_features, labels, tf.ones([tf.shape(serialized_examples)[0]])\n\nclass YT8MFrameFeatureReader(BaseReader):\n \"\"\"Reads TFRecords of SequenceExamples.\n\n The TFRecords must contain SequenceExamples with the sparse in64 'labels'\n context feature and a fixed length byte-quantized feature vector, obtained\n from the features in 'feature_names'. The quantized features will be mapped\n back into a range between min_quantized_value and max_quantized_value.\n \"\"\"\n\n def __init__(self,\n num_classes=3862,\n feature_sizes=[1024, 128],\n feature_names=[\"rgb\", \"audio\"],\n max_frames=300,\n float16_flag=False):\n \"\"\"Construct a YT8MFrameFeatureReader.\n\n Args:\n num_classes: a positive integer for the number of classes.\n feature_sizes: positive integer(s) for the feature dimensions as a list.\n feature_names: the feature name(s) in the tensorflow record as a list.\n max_frames: the maximum number of frames to process.\n \"\"\"\n\n assert len(feature_names) == len(feature_sizes), \\\n \"length of feature_names (={}) != length of feature_sizes (={})\".format( \\\n len(feature_names), len(feature_sizes))\n\n self.num_classes = num_classes\n self.feature_sizes = feature_sizes\n self.feature_names = feature_names\n self.max_frames = max_frames\n self.float16_flag = float16_flag\n\n def get_video_matrix(self,\n features,\n feature_size,\n max_frames,\n max_quantized_value,\n min_quantized_value):\n \"\"\"Decodes features from an input string and quantizes it.\n\n Args:\n features: raw feature values\n feature_size: length of each frame feature vector\n max_frames: number of frames (rows) in the output feature_matrix\n max_quantized_value: the maximum of the quantized value.\n min_quantized_value: the minimum of the quantized value.\n\n Returns:\n feature_matrix: matrix of all frame-features\n num_frames: number of frames in the sequence\n \"\"\"\n \n dtype = tf.float16 if self.float16_flag else tf.float32\n \n decoded_features = tf.reshape(\n tf.cast(tf.decode_raw(features, tf.uint8), dtype),\n [-1, feature_size])\n\n num_frames = tf.minimum(tf.shape(decoded_features)[0], max_frames)\n feature_matrix = utils.Dequantize(decoded_features,\n max_quantized_value,\n min_quantized_value)\n feature_matrix = resize_axis(feature_matrix, 0, max_frames)\n return feature_matrix, num_frames\n\n def prepare_reader(self,\n filename_queue,\n max_quantized_value=2,\n min_quantized_value=-2):\n \"\"\"Creates a single reader thread for YouTube8M SequenceExamples.\n\n Args:\n filename_queue: A tensorflow queue of filename locations.\n max_quantized_value: the maximum of the quantized value.\n min_quantized_value: the minimum of the quantized value.\n\n Returns:\n A tuple of video indexes, video features, labels, and padding data.\n \"\"\"\n reader = tf.TFRecordReader()\n _, serialized_example = reader.read(filename_queue)\n\n return self.prepare_serialized_examples(serialized_example,\n max_quantized_value, min_quantized_value)\n\n def prepare_serialized_examples(self, serialized_example,\n max_quantized_value=2, min_quantized_value=-2):\n\n contexts, features = tf.parse_single_sequence_example(\n serialized_example,\n context_features={\"id\": tf.FixedLenFeature(\n [], tf.string),\n \"labels\": tf.VarLenFeature(tf.int64)},\n sequence_features={\n feature_name : tf.FixedLenSequenceFeature([], dtype=tf.string)\n for feature_name in self.feature_names\n })\n\n # read ground truth labels\n labels = (tf.cast(\n tf.sparse_to_dense(contexts[\"labels\"].values, (self.num_classes,), 1,\n validate_indices=False),\n tf.bool))\n\n # loads (potentially) different types of features and concatenates them\n num_features = len(self.feature_names)\n assert num_features > 0, \"No feature selected: feature_names is empty!\"\n\n assert len(self.feature_names) == len(self.feature_sizes), \\\n \"length of feature_names (={}) != length of feature_sizes (={})\".format( \\\n len(self.feature_names), len(self.feature_sizes))\n\n num_frames = -1 # the number of frames in the video\n feature_matrices = [None] * num_features # an array of different features\n for feature_index in range(num_features):\n feature_matrix, num_frames_in_this_feature = self.get_video_matrix(\n features[self.feature_names[feature_index]],\n self.feature_sizes[feature_index],\n self.max_frames,\n max_quantized_value,\n min_quantized_value)\n if num_frames == -1:\n num_frames = num_frames_in_this_feature\n else:\n tf.assert_equal(num_frames, num_frames_in_this_feature)\n\n feature_matrices[feature_index] = feature_matrix\n\n # cap the number of frames at self.max_frames\n num_frames = tf.minimum(num_frames, self.max_frames)\n\n # concatenate different features\n video_matrix = tf.concat(feature_matrices, 1)\n\n # convert to batch format.\n # TODO: Do proper batch reads to remove the IO bottleneck.\n batch_video_ids = tf.expand_dims(contexts[\"id\"], 0)\n batch_video_matrix = tf.expand_dims(video_matrix, 0)\n batch_labels = tf.expand_dims(labels, 0)\n batch_frames = tf.expand_dims(num_frames, 0)\n\n return batch_video_ids, batch_video_matrix, batch_labels, batch_frames\n\n" ]
[ [ "tensorflow.convert_to_tensor", "tensorflow.concat", "tensorflow.FixedLenFeature", "tensorflow.stack", "tensorflow.minimum", "tensorflow.cast", "tensorflow.TFRecordReader", "tensorflow.sparse_to_dense", "tensorflow.decode_raw", "tensorflow.shape", "tensorflow.parse_example", "tensorflow.assert_equal", "tensorflow.zeros_like", "tensorflow.FixedLenSequenceFeature", "tensorflow.VarLenFeature", "tensorflow.add_to_collection", "tensorflow.maximum", "tensorflow.expand_dims", "tensorflow.sparse_to_indicator" ] ]
bbitarello/ldpred
[ "b84b99f23dc83dc164300b8dee6678207461a751" ]
[ "util.py" ]
[ "\"\"\"\nVarious general utility functions.\n\n\"\"\"\nimport scipy as sp\nfrom scipy import stats\nimport pickle\nimport gzip\nimport os\nfrom itertools import takewhile\nfrom itertools import repeat\nimport sys\nimport re\n\n# LDpred currently ignores the Y and MT chromosomes.\nok_chromosomes = set(range(1, 24))\nchromosomes_list = ['chrom_%d' % (chrom) for chrom in ok_chromosomes]\n\nchrom_name_map = {'X':23,'chr_X':23,'chrom_X':23}\nfor chrom in ok_chromosomes:\n chrom_name_map['%d' % (chrom)]=chrom\n chrom_name_map['chrom_%d' % (chrom)]=chrom\n chrom_name_map['chr_%d' % (chrom)]=chrom\n\n\ndef get_chrom_num(chrom):\n return chrom_name_map.get(re.sub(\"chr\", \"\", chrom),0)\n\n#Various auxiliary variables\nambig_nts = set([('A', 'T'), ('T', 'A'), ('G', 'C'), ('C', 'G')])\nopp_strand_dict = {'A': 'T', 'G': 'C', 'T': 'A', 'C': 'G'}\n\nvalid_nts = set(['A', 'T', 'C', 'G'])\n\n\n# LDpred currently ignores the Y and MT chromosomes.\nvalid_chromosomes = ['%d' % (x) for x in range(1, 24)]\nvalid_chromosomes.append('X')\n\nchromosomes_list = ['chrom_%s' % (chrom) for chrom in valid_chromosomes]\n\n\n#Conversion sizes for strings (necessary for using h5py and python 3)\nfids_dtype = '|S64'\niids_dtype = '|S64'\nsids_dtype = \"|S30\" \nnts_dtype = \"|S1\"\n\nsids_u_dtype = '<U30'\nnts_u_dtype = '<U1'\n\n\nmy_path = os.path.dirname(os.path.abspath(__file__))\nhm3_file = os.path.join(my_path, 'reference','hm3_sids.txt.gz')\nlrld_file = os.path.join(my_path, 'reference','long-range-ld-price-2008hg38.txt')\n\ndef check_chromosomes(missing_chromosomes):\n if len(missing_chromosomes) > 0:\n print('Ignored chromosomes:', ','.join(list(missing_chromosomes)))\n print('Please note that only data on chromosomes 1-23, and X is parsed.')\n\n\ndef calc_auc(y_true, y_hat, show_plot=False):\n \"\"\"\n Calculate the Area Under the Curve (AUC) for a predicted and observed case-control phenotype.\n \"\"\"\n y_true = sp.copy(y_true)\n if len(sp.unique(y_true)) == 2:\n y_min = y_true.min()\n y_max = y_true.max()\n if y_min != 0 or y_max != 1:\n print('Transforming back to a dichotomous trait')\n y_true[y_true == y_min] = 0\n y_true[y_true == y_max] = 1\n \n else:\n print('Warning: Calculating AUC for a quantitative phenotype.')\n y_mean = sp.mean(y_true)\n zero_filter = y_true <= y_mean\n one_filter = y_true > y_mean\n y_true[zero_filter] = 0\n y_true[one_filter] = 1\n\n num_cases = sp.sum(y_true == 1)\n num_controls = sp.sum(y_true == 0)\n assert num_cases + num_controls == len(y_true), 'The phenotype is not defined as expected. It is not binary (0 1 case-control status).'\n print('%d cases, %d controls' % (num_cases, num_controls)) \n \n num_indivs = float(len(y_true))\n tot_num_pos = float(sp.sum(y_true))\n tot_num_neg = float(num_indivs - tot_num_pos)\n \n l = y_hat.tolist()\n l.sort(reverse=True)\n roc_x = []\n roc_y = []\n auc = 0.0\n prev_fpr = 0.0\n for thres in l:\n thres_filter = y_hat >= thres\n y_t = y_true[thres_filter]\n n = len(y_t)\n tp = sp.sum(y_t)\n fp = n - tp\n \n fpr = fp / tot_num_neg\n tpr = tp / tot_num_pos\n roc_x.append(fpr)\n roc_y.append(tpr)\n delta_fpr = fpr - prev_fpr\n auc += tpr * delta_fpr\n prev_fpr = fpr\n print('AUC: %0.4f' % auc)\n if show_plot:\n import pylab\n pylab.plot(roc_x, roc_y)\n pylab.show()\n return auc\n\n\n\ndef obs_h2_to_liab(R2_osb,K=0.01,P=0.5):\n \"\"\"\n Transformation from observed to liability scale.\n \n Lee et al. AJHG 2011 conversion? \n \n For heritability only\n \"\"\"\n t = stats.norm.ppf(1-K)\n z = stats.norm.pdf(t)\n c = P*(1-P)*z**2/(K**2*(1-K)**2)\n R2_liab = R2_osb/c\n return R2_liab\n\n\ndef obs_r2_to_liab(R2_osb,K=0.01,P=0.5):\n \"\"\"\n Lee et al., Gen Epi 2012 conversion\n \n For R2 only\n\n \"\"\"\n t = stats.norm.ppf(K)\n z = stats.norm.pdf(t)\n m = z/K\n C = (K*(1-K))**2/((z**2)*(P*(1-P)))\n d = m*((P-K)/(1-K))\n theta =d**2 - d*t\n R2_liab_cc = (R2_osb*C)/(1+(R2_osb*C*theta))\n return R2_liab_cc\n \ndef load_hapmap_SNPs():\n f = gzip.open(hm3_file, 'r')\n hm3_sids = pickle.load(f)\n f.close()\n return hm3_sids\n\n\ndef load_lrld_dict():\n #Load Price et al. AJHG 2008 long range LD table.\n d = {}\n for chrom in ok_chromosomes:\n d[chrom] = {'reg_dict':{}}\n with open(lrld_file, 'r') as f:\n for line in f:\n l = line.split()\n d[chrom_name_map[l[0]]][l[3]] = {'start_pos':int(l[1]), 'end_pos':int(l[2])}\n return d\n\n\ndef is_in_lrld(chrom, pos, lrld_dict):\n if len(lrld_dict[chrom]['reg_dict'])==0:\n return False\n else:\n for lrld_reg in lrld_dict[chrom]['reg_dict']:\n if lrld_reg['start_pos'] < pos < lrld_reg['end_pos']:\n return True\n else:\n return False\n\n\ndef get_snp_lrld_status(chromosome, positions, lrld_dict):\n snp_lrld = sp.zeros(len(positions))\n for snp_i in range(len(positions)):\n snp_lrld[snp_i] = is_in_lrld(chromosome, positions[snp_i], lrld_dict)\n return snp_lrld\n \ndef is_gz(name):\n return name.lower().endswith(('.gz', '.gzip'))\n\n\n\ndef count_lines(filename):\n if sys.version_info >= (3,0):\n return count_lines_fast(filename)\n else:\n return count_lines_slow(filename)\n\n\ndef count_lines_fast(filename):\n opener = open\n if is_gz(filename):\n opener = gzip.open\n try:\n with opener(filename, 'rb') as f:\n bufgen = takewhile(lambda x: x, (f.raw.read(1024*1024) for _ in repeat(None)))\n num_lines =sum( buf.count(b'\\n') for buf in bufgen )\n except Exception:\n num_lines = -1\n return num_lines \n \n\ndef count_lines_slow(filename):\n opener = open\n if is_gz(filename):\n opener = gzip.open \n try:\n with opener(filename, 'rb') as f:\n num_lines = sum(1 for line in f)\n except Exception:\n num_lines=-1\n return num_lines\n" ]
[ [ "scipy.stats.norm.ppf", "scipy.stats.norm.pdf", "scipy.sum", "scipy.mean", "scipy.copy", "scipy.unique" ] ]
wangxicoding/edl
[ "75d651e72e5297aba2e597588cf958ea336deb4e" ]
[ "example/distill/nlp/reader.py" ]
[ "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport codecs\nimport os\nimport csv\nimport sys\n\nfrom paddlehub.dataset import InputExample\nfrom paddlehub.common.dir import DATA_HOME\nfrom paddlehub.dataset.base_nlp_dataset import BaseNLPDataset\nimport paddle as P\nimport paddle.fluid.dygraph as D\nimport numpy as np\n\n\ndef space_tokenizer(i):\n return i.split()\n\n\ndef pad_batch_data(data, dtype, pad_idx=0, max_len=-1):\n if max_len <= 0:\n for s in data:\n if len(s) > max_len:\n max_len = len(s)\n\n inst_data = np.array([\n list(inst) + list([pad_idx] * (max_len - len(inst))) for inst in data\n ])\n\n return np.array(inst_data).astype(dtype)\n\n\nclass ChnSentiCorp(BaseNLPDataset):\n def __init__(self):\n base_path = \"./data/\"\n super(ChnSentiCorp, self).__init__(\n base_path=base_path,\n train_file=\"train.part.0\",\n dev_file=\"dev.part.0\",\n test_file=\"test.part.0\",\n label_file=None,\n label_list=[\"0\", \"1\"], )\n\n self._word_dict = None\n\n def __read_file(self, input_file):\n \"\"\"\n data file format:\n origin sentence\\tword segment sentence\\tlabel\n \"\"\"\n with codecs.open(input_file, \"r\", encoding=\"UTF-8\") as f:\n for line in f:\n line = line.strip()\n if len(line) <= 0:\n continue\n arr = line.split(\"\\t\")\n #print(\"line:\", len(arr))\n yield arr\n\n def _read_file(self, input_file, phase=None):\n \"\"\"\n [(seq_id,label,origin sentence)]\n \"\"\"\n seq_id = 0\n examples = []\n for t in self.__read_file(input_file):\n if len(t) == 2:\n #example = InputExample(\n # guid=seq_id, label=t[1], text_a=t[0])\n #print(\"t2\", t[1])\n assert len(t) != 2, \"data format error:\" + t\n elif len(t) == 3:\n example = InputExample(guid=seq_id, label=t[2], text_a=t[0])\n #print(\"t3\", t[2])\n else:\n assert False, 'invalid format'\n seq_id += 1\n examples.append(example)\n\n return examples\n\n def student_word_dict(self, vocab_file):\n \"\"\"\n {\n word->word_idx\n }\n \"\"\"\n with codecs.open(vocab_file, \"r\", encoding=\"UTF-8\") as f:\n self._word_dict = {\n i.strip(): l\n for l, i in enumerate(f.readlines())\n }\n\n return self._word_dict\n\n def student_reader(self, input_file, word_dict):\n \"\"\"\n return [([segment_sentence_idxs], label, sentence), ()...]\n \"\"\"\n\n def reader():\n input_files = []\n if isinstance(input_file, str):\n input_files.append(input_file)\n else:\n input_files = input_file\n assert isinstance(input_file, list)\n\n for data_file in input_files:\n print(\"open file:\", data_file)\n for t in self.__read_file(data_file):\n s = []\n for word in space_tokenizer(t[1]):\n idx = word_dict[\n word] if word in word_dict else word_dict['[UNK]']\n s.append(idx)\n\n yield s, t[2], t[0]\n\n return reader\n\n def batch_reader(self, input_file, word_dict, batch_size, shuffle=True):\n def reader():\n if shuffle:\n s_reader = P.reader.shuffle(\n self.student_reader(input_file, word_dict),\n buf_size=100000)\n else:\n s_reader = self.student_reader(input_file, word_dict)\n\n b = [[], [], []]\n for rec in s_reader():\n if len(b[0]) == batch_size:\n yield b\n b = [[], [], []]\n continue\n\n for i in range(len(rec)):\n b[i].append(rec[i])\n\n if len(b[0]) > 0:\n yield b\n\n return reader\n\n def pad_batch_reader(self, input_file, word_dict, batch_size,\n shuffle=True):\n def reader():\n b_reader = self.batch_reader(\n input_file, word_dict, batch_size, shuffle=shuffle)\n for b in b_reader():\n b[0] = D.base.to_variable(pad_batch_data(b[0], 'int64'))\n b[1] = D.base.to_variable(np.array(b[1]).astype('int64'))\n yield b\n\n return reader\n\n\nif __name__ == '__main__':\n ds = ChnSentiCorp()\n ds._read_file(\"./data/train.part.0\")\n ds.student_reader(\"./data/train.part.0\", \"./data/vocab.bow.txt\")\n" ]
[ [ "numpy.array" ] ]
taotaotao3/only_common
[ "7dd3700d4bf3935c193b0b6f38a0dafa750ad01c" ]
[ "only_common.py" ]
[ "import sys\nimport io\nimport csv\nimport pprint\nimport pandas as pd\nimport pdb\ndef excommon(arg_1 = 'a.csv', arg_2 = 'b.csv', arg_3 = 'shift-jis'):\n\n print('sys.argv[1]:', arg_1)\n print('sys.argv[2]:', arg_2)\n print('sys.argv[3]:', arg_3)\n\n df_a = pd.read_csv(arg_1, encoding=arg_3, header=None)\n list_a = []\n list_a = list(df_a.loc[0][0])\n\n df_b = pd.read_csv(arg_2, encoding=arg_3, header=None)\n list_b = []\n list_b = list(df_b.loc[0][0])\n\n after_content = \"\"\n after_content2 = \"\"\n flag_last = \"0\"\n def duplicate_delete_csv(content, content2, after_content, after_content2, flag_last):\n after_content = content\n after_content2 = content2\n for i in range(len(content)):\n if i > int(len(content2)-1):\n after_content = content[:i]\n flag_last = \"1\"\n return after_content, after_content2, flag_last\n if len(content) - 1 == i and content[i] == content2[i]:\n flag_last = \"1\"\n content2 = content\n after_content2 = content2\n after_content = content\n return after_content, after_content2, flag_last\n if len(content2) - 1 == i and content[i] == content2[i]:\n flag_last = \"1\"\n content = content2\n after_content = content\n after_content2 = content2\n return after_content, after_content2, flag_last\n if content[i] != content2[i]:\n for num in range(len(content) - i):\n if content2[i] == content[i+num]:\n after_content = content[:i] + content[(i+num):]\n if i == len(content2) - 1:\n flag_last = \"1\"\n after_content = content2[:i+1]\n after_content2 = content2[:i+1]\n return after_content, after_content2, flag_last\n after_content2 = content2[:i] + content2[i+1:]\n if i == len(content2) - 1:\n flag_last = \"1\"\n after_content = content2[:i]\n after_content2 = content2[:i]\n return after_content, after_content2, flag_last\n\n while list_a != list_b:\n list_a, list_b, flag_last = duplicate_delete_csv(list_a, list_b, after_content, after_content2, flag_last) \n if flag_last == \"1\":\n break\n\n StrA = \"\".join(list_a)\n print('Only common parts:', StrA)\n sys.exit\n" ]
[ [ "pandas.read_csv" ] ]
MokriyYuriy/FlexNeuART
[ "49f13e3f9f0b0ea1399ea558436caaedd5233f5c" ]
[ "scripts/py_featextr_server/wordembed_cosine_server.py" ]
[ "#!/usr/bin/env python\nimport sys\nimport argparse\n\nsys.path.append('.')\n\nfrom scripts.py_featextr_server.base_server import BaseQueryHandler, startQueryServer\n\nimport numpy as np\n\nfrom scripts.py_featextr_server.utils import loadEmbeddings, createEmbedMap, robustCosineSimil\n\n# Exclusive==True means that only one getScores\n# function is executed at at time\nclass CosineSimilQueryHandler(BaseQueryHandler):\n def __init__(self, queryEmbedFile, docEmbedFile, exclusive, debugPrint=False, useIDF=True):\n super().__init__(exclusive)\n\n self.debugPrint = debugPrint\n self.useIDF = useIDF\n\n print('Loading answer embeddings from: ' + docEmbedFile)\n answWords, self.answEmbed = loadEmbeddings(docEmbedFile)\n self.answEmbedMap = createEmbedMap(answWords)\n\n if queryEmbedFile is not None:\n print('Loading query embeddings from: ' + queryEmbedFile)\n queryWords, self.queryEmbed = loadEmbeddings(queryEmbedFile)\n self.queryEmbedMap = createEmbedMap(queryWords)\n else:\n self.queryEmbed = self.answEmbed\n self.queryEmbedMap = self.answEmbedMap\n print('Loading is done!')\n\n def textEntryToStr(self, te):\n arr = []\n if self.debugPrint:\n for winfo in te.entries:\n arr.append('%s %g %d ' % (winfo.word, winfo.IDF, winfo.qty))\n return 'docId=' + te.id + ' ' + ' '.join(arr)\n\n def createDocEmbed(self, isQuery, textEntry):\n\n if isQuery:\n embeds = self.queryEmbed\n embedMap = self.queryEmbedMap\n else:\n embeds = self.answEmbed\n embedMap = self.answEmbedMap\n\n zerov = np.zeros_like(embeds[0])\n res = zerov\n\n for winfo in textEntry.entries:\n vectMult = winfo.qty\n if self.useIDF:\n vectMult *= winfo.IDF\n word = winfo.word\n if word in embedMap:\n res += embeds[embedMap[word]] * vectMult\n\n return res\n\n # This function overrides the parent class\n def computeScoresFromParsedOverride(self, query, docs):\n if self.debugPrint:\n print('getScores', query.id, self.textEntryToStr(query))\n ret = {}\n queryEmbed = self.createDocEmbed(True, query)\n if self.debugPrint:\n print(queryEmbed)\n for d in docs:\n if self.debugPrint:\n print(self.textEntryToStr(d))\n docEmbed = self.createDocEmbed(False, d)\n if self.debugPrint:\n print(docEmbed)\n # Regular cosine deals poorly with all-zero vectors\n simil = robustCosineSimil(docEmbed, queryEmbed)\n # simil = (1-cosine(docEmbed, queryEmbed))\n\n # Note that each element must be an array, b/c\n # we can generate more than one feature per document!\n ret[d.id] = [simil]\n\n return ret\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Serving word-embedding models.')\n\n parser.add_argument('--query_embed', metavar='query embeddings',\n default=None, type=str,\n help='Optional query embeddings file')\n\n parser.add_argument('--doc_embed', metavar='doc embeddings',\n required=True, type=str,\n help='document embeddings file')\n\n parser.add_argument('--debug_print', action='store_true',\n help='Provide debug output')\n\n parser.add_argument('--port', metavar='server port',\n required=True, type=int,\n help='Server port')\n\n parser.add_argument('--host', metavar='server host',\n default='127.0.0.1', type=str,\n help='server host addr to bind the port')\n\n args = parser.parse_args()\n\n multiThreaded = True\n startQueryServer(args.host, args.port, multiThreaded,\n CosineSimilQueryHandler(exclusive=False,\n queryEmbedFile=args.query_embed,\n docEmbedFile=args.doc_embed,\n debugPrint=args.debug_print))\n" ]
[ [ "numpy.zeros_like" ] ]
OmerRe/video-processing-methods
[ "245a89aaa1e774a62da1f043058242841a4f53ee" ]
[ "project/Code/video_stabilizer.py" ]
[ "import cv2\nimport numpy as np\nfrom Code.utils import fixBorder, convert_to_gray\n\n\ndef stabilize_video(video_frames: list, config: dict) -> list:\n \"\"\"Creating a stabilized video from an arbitrary input video.\n Args:\n input_video: cv2.VideoCapture. Video we want to stabilize.\n config: dict. Dictionary which contains useful constants.\n Returns:\n None, but creates stabilized video from the input video.\n Details:\n\n \"\"\"\n print(\"Starting Video Stabilization...\")\n transforms = find_motion_between_frames(config['video_params'], video_frames, config)\n transforms_smooth = calc_smooth_transforms(config, transforms)\n stabilized_frames = apply_smooth_motion_to_frames(config['video_params'], video_frames, transforms_smooth)\n print(\"Video Stabilization Finished\")\n\n return stabilized_frames\n\n\ndef find_motion_between_frames(video_params: dict, video_frames: list, config: dict) -> np.ndarray:\n # Pre-define transformation-store array\n transforms = np.zeros((video_params['n_frames'] - 1, 9), np.float32)\n prev_frame_gray = cv2.cvtColor(video_frames[0], cv2.COLOR_BGR2GRAY)\n\n for frame_idx, current_frame in enumerate(video_frames[1:]):\n # Detecting feature points in previous frame\n prev_frame_pts = []\n curr_frame_pts = []\n current_frame_gray = convert_to_gray(current_frame)\n # Calculating optical flow and keeping only the valid features points\n detector = cv2.FastFeatureDetector.create()\n orb = cv2.ORB_create()\n kp1 = detector.detect(prev_frame_gray, None)\n kp2 = detector.detect(current_frame_gray, None)\n kp1, des1 = orb.compute(prev_frame_gray, kp1)\n kp2, des2 = orb.compute(current_frame_gray, kp2)\n bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\n matches = bf.match(des1, des2)\n matches = sorted(matches, key=lambda x: x.distance)\n # img3 = cv2.drawMatches(prev_frame_gray, kp1, current_frame_gray, kp2, matches, None,\n # flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)\n # plt.imshow(img3), plt.show()\n\n prev_frame_pts.append(np.float32([kp1[match.queryIdx].pt for match in matches]).reshape(-1, 1, 2))\n curr_frame_pts.append(np.float32([kp2[match.trainIdx].pt for match in matches]).reshape(-1, 1, 2))\n prev_frame_pts = np.squeeze(np.array(prev_frame_pts))\n curr_frame_pts = np.squeeze(np.array(curr_frame_pts))\n\n transform_matrix, mask = cv2.findHomography(prev_frame_pts, curr_frame_pts, cv2.RANSAC, 5.0)\n transforms[frame_idx] = transform_matrix.flatten()\n\n print(f\"Video Stabilizing: calculating transformation for frame: {frame_idx + 1} \"\n f\"/ {video_params['n_frames'] - 1} - Tracked points: {len(prev_frame_pts)}\")\n\n prev_frame_gray = current_frame_gray\n return transforms\n\n\ndef apply_smooth_motion_to_frames(video_params: dict, video_frames: list, transforms_smooth: np.ndarray) -> list:\n stabilized_frames = [fixBorder(video_frames[0])]\n # Write n_frames-1 transformed frames\n for frame_idx, current_frame in enumerate(video_frames[:-1]):\n print(f\"Video Stabilizing: applying transformation to frame: {frame_idx + 1} \"\n f\"/ {video_params['n_frames'] - 1}\")\n transform_matrix = transforms_smooth[frame_idx].reshape((3, 3))\n # Apply homography wrapping to the given frame\n frame_stabilized = cv2.warpPerspective(current_frame, transform_matrix, (video_params['w'], video_params['h']))\n # Fix border artifacts\n frame_stabilized = fixBorder(frame_stabilized)\n stabilized_frames.append(frame_stabilized)\n return stabilized_frames\n\n\ndef movingAverage(curve: np.ndarray, radius: int) -> np.ndarray:\n window_size = 2 * radius + 1\n # Define the filter\n f = np.ones(window_size)/window_size\n # Add padding to the boundaries\n curve_pad = np.lib.pad(curve, (radius, radius), 'edge')\n # Apply convolution\n curve_smoothed = np.convolve(curve_pad, f, mode='same')\n # Remove padding\n curve_smoothed = curve_smoothed[radius:-radius]\n # return smoothed curve\n return curve_smoothed\n\n\ndef smooth(trajectory: np.ndarray, config: dict) -> np.ndarray:\n smoothed_trajectory = np.copy(trajectory)\n for i in range(smoothed_trajectory.shape[1]):\n smoothed_trajectory[:, i] = movingAverage(trajectory[:, i], radius=config['SMOOTHING_RADIUS'])\n return smoothed_trajectory\n\n\ndef calc_smooth_transforms(config: dict, transforms: np.ndarray) -> np.ndarray:\n # Compute trajectory using cumulative sum of transformations\n trajectory = np.cumsum(transforms, axis=0)\n smoothed_trajectory = smooth(trajectory, config)\n # Calculate difference between smoothed_trajectory and trajectory\n difference = smoothed_trajectory - trajectory\n # Calculate smooth transformation array\n transforms_smooth = transforms + difference\n return transforms_smooth\n" ]
[ [ "numpy.lib.pad", "numpy.convolve", "numpy.cumsum", "numpy.ones", "numpy.copy", "numpy.float32", "numpy.array", "numpy.zeros" ] ]
suunni/sp17-i524
[ "42dd11b914c03c741dad8a8505c3e091dc6ec412" ]
[ "project/S17-IO-3012/code/bin/benchmark_replicas_import.py" ]
[ "import matplotlib.pyplot as plt\nimport sys\nimport pandas as pd\n\n\ndef get_parm():\n \"\"\"retrieves mandatory parameter to program\n\n @param: none\n @type: n/a\n\n \"\"\"\n try:\n return sys.argv[1]\n except:\n print ('Must enter file name as parameter')\n exit()\n\n\ndef read_file(filename):\n \"\"\"reads a file into a pandas dataframe\n\n @param: filename The name of the file to read\n @type: string\n\n \"\"\"\n try:\n return pd.read_csv(filename)\n except:\n print ('Error retrieving file')\n exit()\n\n\ndef select_data(benchmark_df, cloud, config_replicas, mongos_instances, shard_replicas, shards_per_replica):\n benchmark_df = benchmark_df[benchmark_df.mongo_version == 34]\n benchmark_df = benchmark_df[benchmark_df.test_size == \"large\"]\n\n if cloud != 'X':\n benchmark_df = benchmark_df[benchmark_df.cloud == cloud]\n\n if config_replicas != 'X':\n benchmark_df = benchmark_df[benchmark_df.config_replicas == config_replicas]\n\n if mongos_instances != 'X':\n benchmark_df = benchmark_df[benchmark_df.mongos_instances == mongos_instances]\n\n if shard_replicas != 'X':\n benchmark_df = benchmark_df[benchmark_df.shard_replicas == shard_replicas]\n\n if shards_per_replica != 'X':\n benchmark_df = benchmark_df[benchmark_df.shards_per_replica == shards_per_replica]\n\n # benchmark_df1 = benchmark_df.groupby(['cloud', 'config_replicas', 'mongos_instances', 'shard_replicas', 'shards_per_replica']).mean()\n\n # http://stackoverflow.com/questions/10373660/converting-a-pandas-groupby-object-to-dataframe\n benchmark_df = benchmark_df.groupby(\n ['cloud', 'config_replicas', 'mongos_instances', 'shard_replicas', 'shards_per_replica'], as_index=False).mean()\n # http://stackoverflow.com/questions/10373660/converting-a-pandas-groupby-object-to-dataframe\n\n # print benchmark_df1['shard_replicas']\n # print benchmark_df1\n # print benchmark_df\n\n benchmark_df = benchmark_df.sort_values(by='shard_replicas', ascending=1)\n\n return benchmark_df\n\n\ndef make_figure(import_seconds_kilo, replicas_kilo, import_seconds_chameleon, replicas_chameleon, import_seconds_jetstream, replicas_jetstream):\n \"\"\"formats and creates a line chart\n\n @param1: import_seconds_kilo Array with import_seconds from kilo\n @type: numpy array\n @param2: replicas_kilo Array with replicas from kilo\n @type: numpy array\n @param3: import_seconds_chameleon Array with import_seconds from chameleon\n @type: numpy array\n @param4: replicas_chameleon Array with replicas from chameleon\n @type: numpy array\n \"\"\"\n fig = plt.figure()\n #plt.title('Average Mongoimport Runtime by Shard Replication Factor')\n plt.ylabel('Runtime in Seconds')\n plt.xlabel('Degree of Replication Per Set')\n\n # Make the chart\n plt.plot(replicas_kilo, import_seconds_kilo, label='Kilo Cloud')\n plt.plot(replicas_chameleon, import_seconds_chameleon, label='Chameleon Cloud')\n plt.plot(replicas_jetstream, import_seconds_jetstream, label='Jetstream Cloud')\n\n # http://stackoverflow.com/questions/11744990/how-to-set-auto-for-upper-limit-but-keep-a-fixed-lower-limit-with-matplotlib\n plt.ylim(ymin=0)\n\n plt.legend(loc='best')\n\n # Show the chart (for testing)\n # plt.show()\n # Save the chart\n fig.savefig('../report/replica_import.png')\n\n\n# Run the program by calling the functions\nif __name__ == \"__main__\":\n filename = get_parm()\n benchmark_df = read_file(filename)\n\n cloud = 'kilo'\n config_replicas = 1\n mongos_instances = 1\n shard_replicas = 1\n shards_per_replica = 'X'\n select_df = select_data(benchmark_df, cloud, config_replicas, mongos_instances, shard_replicas, shards_per_replica)\n # http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror\n # percentage death=\\\n import_seconds_kilo = select_df.as_matrix(columns=[select_df.columns[6]])\n replicas_kilo = select_df.as_matrix(columns=[select_df.columns[4]])\n # http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror\n\n cloud = 'chameleon'\n config_replicas = 1\n mongos_instances = 1\n shard_replicas = 1\n shards_per_replica = 'X'\n select_df = select_data(benchmark_df, cloud, config_replicas, mongos_instances, shard_replicas, shards_per_replica)\n # http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror\n # percentage death=\\\n import_seconds_chameleon = select_df.as_matrix(columns=[select_df.columns[6]])\n replicas_chameleon = select_df.as_matrix(columns=[select_df.columns[4]])\n # http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror\n\n cloud = 'jetstream'\n config_replicas = 1\n mongos_instances = 1\n shard_replicas = 1\n shards_per_replica = 'X'\n select_df = select_data(benchmark_df, cloud, config_replicas, mongos_instances, shard_replicas, shards_per_replica)\n # http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror\n # percentage death=\\\n import_seconds_jetstream = select_df.as_matrix(columns=[select_df.columns[6]])\n replicas_jetstream = select_df.as_matrix(columns=[select_df.columns[4]])\n # http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror\n\n make_figure(import_seconds_kilo, replicas_kilo, import_seconds_chameleon, replicas_chameleon, import_seconds_jetstream, replicas_jetstream)\n" ]
[ [ "matplotlib.pyplot.legend", "pandas.read_csv", "matplotlib.pyplot.ylim", "matplotlib.pyplot.figure", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.ylabel" ] ]
RichardScottOZ/harmonica
[ "ccb0437ea0ed528cfd144844edab98141c8d08da" ]
[ "harmonica/equivalent_layer/harmonic_spherical.py" ]
[ "\"\"\"\nEquivalent layer for generic harmonic functions in spherical coordinates\n\"\"\"\nimport numpy as np\nfrom numba import jit\nfrom sklearn.utils.validation import check_is_fitted\nimport verde as vd\nimport verde.base as vdb\n\nfrom .utils import jacobian_numba, predict_numba, pop_extra_coords\nfrom ..forward.utils import distance_spherical\n\n\nclass EQLHarmonicSpherical(vdb.BaseGridder):\n r\"\"\"\n Equivalent-layer for generic harmonic functions in spherical coordinates\n\n This equivalent layer can be used for:\n\n * Spherical coordinates (geographic coordinates must be converted before\n use)\n * Regional or global data where Earth's curvature must be taken into\n account\n * Gravity and magnetic data (including derivatives)\n * Single data types\n * Interpolation\n * Upward continuation\n * Finite-difference based derivative calculations\n\n It cannot be used for:\n\n * Joint inversion of multiple data types (e.g., gravity + gravity\n gradients)\n * Reduction to the pole of magnetic total field anomaly data\n * Analytical derivative calculations\n\n Point sources are located beneath the observed potential-field measurement\n points by default [Cooper2000]_. Custom source locations can be used by\n specifying the *points* argument. Coefficients associated with each point\n source are estimated through linear least-squares with damping (Tikhonov\n 0th order) regularization.\n\n The Green's function for point mass effects used is the inverse Euclidean\n distance between the grid coordinates and the point source:\n\n .. math::\n\n \\phi(\\bar{x}, \\bar{x}') = \\frac{1}{||\\bar{x} - \\bar{x}'||}\n\n where :math:`\\bar{x}` and :math:`\\bar{x}'` are the coordinate vectors of\n the observation point and the source, respectively.\n\n Parameters\n ----------\n damping : None or float\n The positive damping regularization parameter. Controls how much\n smoothness is imposed on the estimated coefficients.\n If None, no regularization is used.\n points : None or list of arrays (optional)\n List containing the coordinates of the point sources used as the\n equivalent layer. Coordinates are assumed to be in the following order:\n (``longitude``, ``latitude``, ``radius``). Both ``longitude`` and\n ``latitude`` must be in degrees and ``radius`` in meters.\n If None, will place one point source bellow each observation point at\n a fixed relative depth bellow the observation point [Cooper2000]_.\n Defaults to None.\n relative_depth : float\n Relative depth at which the point sources are placed beneath the\n observation points. Each source point will be set beneath each data\n point at a depth calculated as the radius of the data point minus\n this constant *relative_depth*. Use positive numbers (negative numbers\n would mean point sources are above the data points). Ignored if\n *points* is specified.\n\n Attributes\n ----------\n points_ : 2d-array\n Coordinates of the point sources used to build the equivalent layer.\n coefs_ : array\n Estimated coefficients of every point source.\n region_ : tuple\n The boundaries (``[W, E, S, N]``) of the data used to fit the\n interpolator. Used as the default region for the\n :meth:`~harmonica.EQLHarmonicSpherical.grid` method.\n \"\"\"\n\n # Set the default dimension names for generated outputs\n # as xr.Dataset.\n dims = (\"spherical_latitude\", \"longitude\")\n\n # Overwrite the defalt name for the upward coordinate.\n extra_coords_name = \"radius\"\n\n def __init__(\n self,\n damping=None,\n points=None,\n relative_depth=500,\n ):\n self.damping = damping\n self.points = points\n self.relative_depth = relative_depth\n # Define Green's function for spherical coordinates\n self.greens_function = greens_func_spherical\n\n def fit(self, coordinates, data, weights=None):\n \"\"\"\n Fit the coefficients of the equivalent layer.\n\n The data region is captured and used as default for the\n :meth:`~harmonica.EQLHarmonicSpherical.grid` method.\n\n All input arrays must have the same shape.\n\n Parameters\n ----------\n coordinates : tuple of arrays\n Arrays with the coordinates of each data point. Should be in the\n following order: (``longitude``, ``latitude``, ``radius``, ...).\n Only ``longitude``, ``latitude``, and ``radius`` will be used, all\n subsequent coordinates will be ignored.\n data : array\n The data values of each data point.\n weights : None or array\n If not None, then the weights assigned to each data point.\n Typically, this should be 1 over the data uncertainty squared.\n\n Returns\n -------\n self\n Returns this estimator instance for chaining operations.\n \"\"\"\n coordinates, data, weights = vdb.check_fit_input(coordinates, data, weights)\n # Capture the data region to use as a default when gridding.\n self.region_ = vd.get_region(coordinates[:2])\n coordinates = vdb.n_1d_arrays(coordinates, 3)\n if self.points is None:\n self.points_ = (\n coordinates[0],\n coordinates[1],\n coordinates[2] - self.relative_depth,\n )\n else:\n self.points_ = vdb.n_1d_arrays(self.points, 3)\n jacobian = self.jacobian(coordinates, self.points_)\n self.coefs_ = vdb.least_squares(jacobian, data, weights, self.damping)\n return self\n\n def predict(self, coordinates):\n \"\"\"\n Evaluate the estimated equivalent layer on the given set of points.\n\n Requires a fitted estimator\n (see :meth:`~harmonica.EQLHarmonicSpherical.fit`).\n\n Parameters\n ----------\n coordinates : tuple of arrays\n Arrays with the coordinates of each data point. Should be in the\n following order: (``longitude``, ``latitude``, ``radius``, ...).\n Only ``longitude``, ``latitude`` and ``radius`` will be used, all\n subsequent coordinates will be ignored.\n\n Returns\n -------\n data : array\n The data values evaluated on the given points.\n \"\"\"\n # We know the gridder has been fitted if it has the coefs_\n check_is_fitted(self, [\"coefs_\"])\n shape = np.broadcast(*coordinates[:3]).shape\n size = np.broadcast(*coordinates[:3]).size\n dtype = coordinates[0].dtype\n coordinates = tuple(np.atleast_1d(i).ravel() for i in coordinates[:3])\n data = np.zeros(size, dtype=dtype)\n predict_numba(\n coordinates, self.points_, self.coefs_, data, self.greens_function\n )\n return data.reshape(shape)\n\n def jacobian(\n self, coordinates, points, dtype=\"float64\"\n ): # pylint: disable=no-self-use\n \"\"\"\n Make the Jacobian matrix for the equivalent layer.\n\n Each column of the Jacobian is the Green's function for a single point\n source evaluated on all observation points.\n\n Parameters\n ----------\n coordinates : tuple of arrays\n Arrays with the coordinates of each data point. Should be in the\n following order: (``longitude``, ``latitude``, ``radius``, ...).\n Only ``longitude``, ``latitude`` and ``radius`` will be used, all\n subsequent coordinates will be ignored.\n points : tuple of arrays\n Tuple of arrays containing the coordinates of the point sources\n used as equivalent layer in the following order:\n (``longitude``, ``latitude``, ``radius``).\n dtype : str or numpy dtype\n The type of the Jacobian array.\n\n Returns\n -------\n jacobian : 2D array\n The (n_data, n_points) Jacobian matrix.\n \"\"\"\n # Compute Jacobian matrix\n n_data = coordinates[0].size\n n_points = points[0].size\n jac = np.zeros((n_data, n_points), dtype=dtype)\n jacobian_numba(coordinates, points, jac, self.greens_function)\n return jac\n\n def grid(\n self,\n upward,\n region=None,\n shape=None,\n spacing=None,\n dims=None,\n data_names=None,\n **kwargs\n ): # pylint: disable=arguments-differ\n \"\"\"\n Interpolate the data onto a regular grid.\n\n The grid can be specified by either the number of points in each\n dimension (the *shape*) or by the grid node spacing. See\n :func:`verde.grid_coordinates` for details. All grid points will be\n located at the same `upward` coordinate. Other arguments for\n :func:`verde.grid_coordinates` can be passed as extra keyword arguments\n (``kwargs``) to this method.\n\n If the interpolator collected the input data region, then it will be\n used if ``region=None``. Otherwise, you must specify the grid region.\n Use the *dims* and *data_names* arguments to set custom names for the\n dimensions and the data field(s) in the output :class:`xarray.Dataset`.\n Default names will be provided if none are given.\n\n Parameters\n ----------\n upward : float\n Upward coordinate of the grid points.\n region : list = [W, E, S, N]\n The west, east, south, and north boundaries of a given region.\n shape : tuple = (n_north, n_east) or None\n The number of points in the South-North and West-East directions,\n respectively.\n spacing : tuple = (s_north, s_east) or None\n The grid spacing in the South-North and West-East directions,\n respectively.\n dims : list or None\n The names of the northing and easting data dimensions,\n respectively, in the output grid. Default is determined from the\n ``dims`` attribute of the class. Must be defined in the following\n order: northing dimension, easting dimension.\n **NOTE: This is an exception to the \"easting\" then\n \"northing\" pattern but is required for compatibility with xarray.**\n data_names : list of None\n The name(s) of the data variables in the output grid. Defaults to\n ``['scalars']``.\n\n Returns\n -------\n grid : xarray.Dataset\n The interpolated grid. Metadata about the interpolator is written\n to the ``attrs`` attribute.\n\n \"\"\"\n # We override the grid method from BaseGridder so it takes the upward\n # coordinate as a positional argument. We disable pylint\n # arguments-differ error because we intend to make this method\n # different from the inherited one.\n\n # Ignore extra_coords if passed\n pop_extra_coords(kwargs)\n # Grid data\n # We always pass projection=None because that argument it's intended to\n # be used only with Cartesian gridders.\n grid = super().grid(\n region=region,\n shape=shape,\n spacing=spacing,\n dims=dims,\n data_names=data_names,\n projection=None,\n extra_coords=upward,\n **kwargs,\n )\n return grid\n\n def scatter(\n self,\n region=None,\n size=None,\n random_state=None,\n dims=None,\n data_names=None,\n projection=None,\n **kwargs\n ):\n \"\"\"\n .. warning ::\n\n Not implemented method. The scatter method will be deprecated on\n Verde v2.0.0.\n\n \"\"\"\n raise NotImplementedError\n\n def profile(\n self,\n point1,\n point2,\n size,\n dims=None,\n data_names=None,\n projection=None,\n **kwargs\n ):\n \"\"\"\n .. warning ::\n\n Not implemented method. The profile on spherical coordinates should\n be done using great-circle distances through the Haversine formula.\n\n \"\"\"\n raise NotImplementedError\n\n\n@jit(nopython=True)\ndef greens_func_spherical(\n longitude, latitude, radius, point_longitude, point_latitude, point_radius\n):\n \"\"\"\n Green's function for the equivalent layer in spherical coordinates\n\n Uses Numba to speed up things.\n \"\"\"\n distance = distance_spherical(\n (longitude, latitude, radius), (point_longitude, point_latitude, point_radius)\n )\n return 1 / distance\n" ]
[ [ "numpy.atleast_1d", "numpy.broadcast", "sklearn.utils.validation.check_is_fitted", "numpy.zeros" ] ]
salihmarangoz/StereoDepthEstimation
[ "a068df34329ee0642b5eb4277dedcd7012d78b4d" ]
[ "opencv_disparity/test.py" ]
[ "##################################################################################\n# SOURCE: https://github.com/aliyasineser/stereoDepth/blob/master/stereo_depth.py\n##################################################################################\n\nimport numpy as np\nimport cv2 as cv\nimport cv2\nfrom matplotlib import pyplot as plt\n\ndef depth_map(imgL, imgR):\n \"\"\" Depth map calculation. Works with SGBM and WLS. Need rectified images, returns depth map ( left to right disparity ) \"\"\"\n # SGBM Parameters -----------------\n window_size = 3 # wsize default 3; 5; 7 for SGBM reduced size image; 15 for SGBM full size image (1300px and above); 5 Works nicely\n\n left_matcher = cv2.StereoSGBM_create(\n minDisparity=0,\n numDisparities=12*16, # max_disp has to be dividable by 16 f. E. HH 192, 256\n blockSize=window_size,\n P1=8 * 5 * window_size,\n # wsize default 3; 5; 7 for SGBM reduced size image; 15 for SGBM full size image (1300px and above); 5 Works nicely\n P2=32 * 5 * window_size,\n disp12MaxDiff=12,\n uniquenessRatio=10,\n speckleWindowSize=50,\n speckleRange=32,\n preFilterCap=63,\n mode=cv2.STEREO_SGBM_MODE_SGBM_3WAY\n )\n right_matcher = cv2.ximgproc.createRightMatcher(left_matcher)\n # FILTER Parameters\n lmbda = 80000\n sigma = 1.3\n visual_multiplier = 6\n\n wls_filter = cv2.ximgproc.createDisparityWLSFilter(matcher_left=left_matcher)\n wls_filter.setLambda(lmbda)\n\n wls_filter.setSigmaColor(sigma)\n displ = left_matcher.compute(imgL, imgR) # .astype(np.float32)/16\n dispr = right_matcher.compute(imgR, imgL) # .astype(np.float32)/16\n displ = np.int16(displ)\n dispr = np.int16(dispr)\n filteredImg = wls_filter.filter(displ, imgL, None, dispr) # important to put \"imgL\" here!!!\n\n filteredImg = cv2.normalize(src=filteredImg, dst=filteredImg, beta=0, alpha=255, norm_type=cv2.NORM_MINMAX);\n filteredImg = np.uint8(filteredImg)\n\n return filteredImg\n\n\nimgL = cv.imread('l.png',0)\nimgR = cv.imread('r.png',0)\ndisparity = depth_map(imgL, imgR)\nplt.imshow(disparity,'gray')\nplt.show()" ]
[ [ "numpy.uint8", "matplotlib.pyplot.imshow", "matplotlib.pyplot.show", "numpy.int16" ] ]
balewski/neuron_inverter_benchmark
[ "4ad8a03c07e174728ccea2bc5f24d1ae620966a8" ]
[ "poptorch/toolbox/Dataloader_h5.py" ]
[ "__author__ = \"Jan Balewski\"\n__email__ = \"[email protected]\"\n\n'''\nthis data loader reads all data upon start, there is no distributed sampler\n\nreads all data at once and serves them from RAM\n- optimized for mult-GPU training\n- only used block of data from each H5-file\n- reads data from common file for all ranks\n- allows for in-fly transformation\n\nShuffle: only all samples after read is compleated\n\n'''\n\nimport time, os\nimport random\nimport h5py\nimport numpy as np\nfrom pprint import pprint\n\nimport copy\nfrom torch.utils.data import Dataset, DataLoader\nimport torch \nimport logging\nimport poptorch\n \n#...!...!..................\ndef get_data_loader(params, inpMD,domain,popopts, verb=1):\n conf=copy.deepcopy(params) # the input is reused later in the upper level code\n #print('\\n\\nGDL:',domain)\n conf['domain']=domain\n conf['h5name']=params['data_path']+inpMD['h5nameTemplate'].replace('*',params['cell_name'])\n if params['num_inp_chan']!=None: #user wants a change\n assert params['num_inp_chan']>0\n assert params['num_inp_chan']<=inpMD['numFeature']\n conf['numInpChan']=params['num_inp_chan']\n else: # just copy the meta-data value\n conf['numInpChan']=inpMD['numFeature']\n\n conf['doAux']=False #legacy switch never used\n #pprint(conf) \n dataset= Dataset_h5_neuronInverter(conf,verb)\n if 'max_samples_per_epoch' in params:\n max_samp= params['max_samples_per_epoch']\n print('GDL: WARN, shorter %s max_samples=%d from %d'%(domain,max_samp,dataset.numLocFrames)) \n dataset.numLocFrames=min(max_samp,dataset.numLocFrames) \n \n #print('bb',len(dataset),dataset.sanity())\n \n # GC-speciffic constraint:\n assert len(dataset)//conf['local_batch_size']//conf['gc_m2000']['replica_steps_per_iter']>0\n\n params[domain+'_steps_per_epoch']=dataset.sanity()\n\n params['model']['inputShape']=list(dataset.data_frames.shape[1:])\n params['model']['outputSize']=dataset.data_parU.shape[1]\n\n #shuffle=domain=='train' # use False only for reproducibility\n shuffle=True # both: train & val\n\n # Graphcore speciffic\n dataloader = poptorch.DataLoader(popopts,dataset,\n batch_size=conf['local_batch_size'],\n num_workers=conf['num_data_workers'],\n shuffle=shuffle,\n persistent_workers=True,\n mode=poptorch.DataLoaderMode.Async,\n async_options={\n \"sharing_strategy\":\n poptorch.SharingStrategy.SharedMemory,\n \"early_preload\": True,\n \"buffer_size\": conf['num_data_workers'],\n \"load_indefinitely\": True,\n \"miss_sleep_time_in_ms\": 0\n },\n auto_distributed_partitioning=False, #to serve all data\n )\n\n dataloader.conf=conf\n #print('cc',len(dataloader))\n return dataloader\n\n\n#-------------------\n#-------------------\n#-------------------\nclass Dataset_h5_neuronInverter(Dataset):\n \n def __init__(self, conf,verb=1):\n self.conf=conf\n self.verb=verb\n\n self.openH5()\n if self.verb and 0:\n print('\\nDS-cnst name=%s shuffle=%r BS=%d steps=%d myRank=%d numSampl/hd5=%d'%(self.conf['name'],self.conf['shuffle'],self.localBS,self.__len__(),self.conf['world_rank'],self.conf['numSamplesPerH5']),'H5-path=',self.conf['dataPath'])\n assert self.numLocFrames>0\n assert self.conf['world_rank']>=0\n\n if self.verb :\n logging.info(' DS:load-end %s locSamp=%d, X.shape: %s type: %s'%(self.conf['domain'],self.numLocFrames,str(self.data_frames.shape),self.data_frames.dtype))\n #print(' DS:Xall',self.data_frames.shape,self.data_frames.dtype)\n #print(' DS:Yall',self.data_parU.shape,self.data_parU.dtype)\n \n\n#...!...!..................\n def sanity(self): \n stepPerEpoch=int(np.floor( self.numLocFrames/ self.conf['local_batch_size']))\n if stepPerEpoch <1:\n print('\\nDS:ABORT, Have you requested too few samples per rank?, numLocFrames=%d, BS=%d name=%s'%(self.numLocFrames, localBS,self.conf['name']))\n exit(67)\n # all looks good\n return stepPerEpoch\n \n#...!...!..................\n def openH5(self):\n cf=self.conf\n inpF=cf['h5name']\n inpFeat=cf['numInpChan'] # this is what user wants\n dom=cf['domain']\n if self.verb>0 : logging.info('DS:fileH5 %s rank %d of %d '%(inpF,cf['world_rank'],cf['world_size']))\n \n if not os.path.exists(inpF):\n print('FAILD, missing HD5',inpF)\n exit(22)\n\n startTm0 = time.time()\n\n # = = = READING HD5 start\n h5f = h5py.File(inpF, 'r')\n Xshape=h5f[dom+'_frames'].shape\n totSamp=Xshape[0]\n\n locStep=int(totSamp/cf['world_size']/cf['local_batch_size'])\n locSamp=locStep*cf['local_batch_size']\n #print('totSamp=%d locStep=%d'%(totSamp,locStep))\n assert locStep>0\n maxShard= totSamp// locSamp\n assert maxShard>=cf['world_size']\n \n # chosen shard is rank dependent, wraps up if not sufficient number of ranks\n myShard=self.conf['world_rank'] %maxShard\n sampIdxOff=myShard*locSamp\n \n if self.verb: logging.info('DS:file dom=%s myShard=%d, maxShard=%d, sampIdxOff=%d allXshape=%s inpFeat=%d'%(cf['domain'],myShard,maxShard,sampIdxOff,str(Xshape),inpFeat))\n \n # data reading starts ....\n assert inpFeat<=Xshape[2]\n if inpFeat==Xshape[2]:\n self.data_frames=h5f[dom+'_frames'][sampIdxOff:sampIdxOff+locSamp]#.astype('float32')\n else:\n self.data_frames=h5f[dom+'_frames'][sampIdxOff:sampIdxOff+locSamp,:,:inpFeat]\n self.data_parU=h5f[dom+'_unitStar_par'][sampIdxOff:sampIdxOff+locSamp]#.astype('float32')\n if cf['doAux']: #never used\n self.data_parP=h5f[dom+'_phys_par'][sampIdxOff:sampIdxOff+locSamp]\n\n h5f.close()\n # = = = READING HD5 done\n if self.verb>0 :\n startTm1 = time.time()\n if self.verb: logging.info('DS: hd5 read time=%.2f(sec) dom=%s '%(startTm1 - startTm0,dom))\n \n # .......................................................\n #.... data embeddings, transformation should go here ....\n \n #self.data_parU*=1.2\n #.... end of embeddings ........\n # .......................................................\n\n if 0: # check normalization\n xm=np.mean(self.data_frames)\n xs=np.std(self.data_frames)\n print('xm',xm,xs,myShard,cf['domain'])\n ok99\n \n self.numLocFrames=self.data_frames.shape[0]\n #self.numLocFrames=512*10 # reduce nymber of samples\n\n def __len__(self): \n return self.numLocFrames\n\n\n def __getitem__(self, idx):\n # print('DSI:',idx,self.conf['name'],self.cnt); self.cnt+=1\n assert idx>=0\n assert idx< self.numLocFrames\n X=self.data_frames[idx]\n Y=self.data_parU[idx]\n return (X,Y)\n\n if self.conf['x_y_aux']: # predictions for Roy\n AUX=self.data_parP[pCnt:pCnt+bs]\n return (X,Y,AUX)\n\n" ]
[ [ "numpy.std", "numpy.mean", "numpy.floor" ] ]
timhunderwood/numpy-to-stl
[ "eea305ae30bb4aa5882d7c66edebe76173da8b06" ]
[ "examples/cellular_example.py" ]
[ "import cellular\nimport numpy\nimport mpl_toolkits.mplot3d\nimport matplotlib.pyplot as plt\nimport numpy_to_stl\n\n\ndef get_simulated_world(cells_per_day, rule, number_of_days):\n world = cellular.World(cells_per_day, rule, ones=False)\n world.simulate(number_of_days)\n world.display(landscape=True)\n return numpy.vstack(world.state)\n\n\ndef create_mesh_of_world(\n cells_per_day=100, rule=cellular.rules.rule_777, number_of_days=100\n):\n array = get_simulated_world(cells_per_day, rule, number_of_days)\n return numpy_to_stl.create_surface_mesh_from_array(array, base_height=1)\n\n\ndef plot_stl_world(cells_per_day=100, rule=cellular.rules.rule_777, number_of_days=200):\n world_mesh = create_mesh_of_world(cells_per_day, rule, number_of_days)\n\n figure = plt.figure()\n axes = mpl_toolkits.mplot3d.Axes3D(figure)\n #\n # # Load the STL files and add the vectors to the plot\n axes.add_collection3d(\n mpl_toolkits.mplot3d.art3d.Poly3DCollection(\n world_mesh.vectors, facecolor=\"red\", edgecolor=\"black\"\n )\n )\n\n # Auto scale to the mesh size\n scale = world_mesh.points.flatten(-1)\n axes.auto_scale_xyz(scale, scale, scale)\n\n # Show the plot to the screen\n plt.show()\n world_mesh.save(\"small_cellular_example.stl\")\n\n\nif __name__ == \"__main__\":\n plot_stl_world(cells_per_day=100, number_of_days=200)\n" ]
[ [ "matplotlib.pyplot.show", "numpy.vstack", "matplotlib.pyplot.figure" ] ]
Beta3-Data/FacialLandmark-Live-Training
[ "10b2b464f1deb015a7f152bb14f120f0dc6f9de2" ]
[ "dataset/dataset_test.py" ]
[ "from __future__ import print_function, division\r\nimport os\r\nimport torch\r\nimport pandas as pd\r\nfrom skimage import io, transform\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom torch.utils.data import Dataset, DataLoader\r\nfrom torchvision import transforms, utils\r\nfrom FaceLandmarksDataset import FaceLandmarksDataset\r\nfrom FaceLandmarksDataset import SmartRandomCrop\r\nfrom FaceLandmarksDataset import Rescale\r\n\r\n# Ignore warnings\r\ndef show_landmarks(image, landmarks):\r\n \"\"\"Show image with landmarks\"\"\"\r\n plt.imshow(image)\r\n plt.scatter(landmarks[:, 0], landmarks[:, 1], s=10, marker='.', c='r')\r\n plt.pause(0.001) # pause a bit so that plots are updated\r\nlandmarks_frame = pd.read_csv('face_landmarks.csv')\r\n\r\nn = 65\r\nimg_name = landmarks_frame.ix[n, 0]\r\nlandmarks = landmarks_frame.ix[n, 1:].as_matrix().astype('float')\r\nlandmarks = landmarks.reshape(-1, 2)\r\nmax_xy = np.max(landmarks,axis=0)\r\nmin_xy = np.min(landmarks,axis=0)\r\nprint(max_xy)\r\nprint(min_xy)\r\n\r\nprint('Image name: {}'.format(img_name))\r\nprint('Landmarks shape: {}'.format(landmarks.shape))\r\nprint('First 4 Landmarks: {}'.format(landmarks[:4]))\r\nface_dataset = FaceLandmarksDataset(csv_file='face_landmarks.csv',\r\n root_dir='data/image/')\r\n\r\nfig = plt.figure()\r\ncrop = SmartRandomCrop()\r\nscale = Rescale((256,256))\r\ncomposed = transforms.Compose([SmartRandomCrop(),])\r\n\r\nfor i in range(len(face_dataset)):\r\n sample = face_dataset[i]\r\n sample = crop(sample)\r\n sample = scale(sample)\r\n\r\n print(i, sample['image'].shape, sample['landmarks'].shape)\r\n\r\n ax = plt.subplot(1, 4, i + 1)\r\n plt.tight_layout()\r\n ax.set_title('Sample #{}'.format(i))\r\n ax.axis('off')\r\n show_landmarks(**sample)\r\n\r\n if i == 3:\r\n plt.show()\r\n break\r\n\r\n" ]
[ [ "matplotlib.pyplot.imshow", "pandas.read_csv", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.scatter", "numpy.min", "numpy.max", "matplotlib.pyplot.subplot", "matplotlib.pyplot.show", "matplotlib.pyplot.pause", "matplotlib.pyplot.figure" ] ]
Altizon/incubator-superset
[ "e55fe43ca67a29518674a1a2137a3dbd4f166864" ]
[ "superset/views/core.py" ]
[ "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# pylint: disable=C,R,W\nimport logging\nimport re\nfrom contextlib import closing\nfrom datetime import datetime, timedelta\nfrom typing import Any, cast, Dict, List, Optional, Union\nfrom urllib import parse\n\nimport backoff\nimport msgpack\nimport pandas as pd\nimport pyarrow as pa\nimport simplejson as json\nfrom flask import abort, flash, g, Markup, redirect, render_template, request, Response\nfrom flask_appbuilder import expose\nfrom flask_appbuilder.models.sqla.interface import SQLAInterface\nfrom flask_appbuilder.security.decorators import has_access, has_access_api\nfrom flask_appbuilder.security.sqla import models as ab_models\nfrom flask_babel import gettext as __, lazy_gettext as _\nfrom sqlalchemy import and_, Integer, or_, select\nfrom sqlalchemy.exc import SQLAlchemyError\nfrom sqlalchemy.orm.session import Session\nfrom werkzeug.urls import Href\n\nimport superset.models.core as models\nfrom superset import (\n app,\n appbuilder,\n cache,\n conf,\n dataframe,\n db,\n event_logger,\n get_feature_flags,\n is_feature_enabled,\n result_set,\n results_backend,\n results_backend_use_msgpack,\n security_manager,\n sql_lab,\n talisman,\n viz,\n)\nfrom superset.connectors.connector_registry import ConnectorRegistry\nfrom superset.connectors.sqla.models import AnnotationDatasource\nfrom superset.constants import RouteMethod\nfrom superset.exceptions import (\n DatabaseNotFound,\n SupersetException,\n SupersetSecurityException,\n SupersetTimeoutException,\n)\nfrom superset.jinja_context import get_template_processor\nfrom superset.models.dashboard import Dashboard\nfrom superset.models.datasource_access_request import DatasourceAccessRequest\nfrom superset.models.slice import Slice\nfrom superset.models.sql_lab import Query, TabState\nfrom superset.models.user_attributes import UserAttribute\nfrom superset.sql_parse import ParsedQuery\nfrom superset.sql_validators import get_validator_by_name\nfrom superset.utils import core as utils, dashboard_import_export\nfrom superset.utils.dates import now_as_float\nfrom superset.utils.decorators import etag_cache, stats_timing\nfrom superset.views.database.filters import DatabaseFilter\n\nfrom .base import (\n api,\n BaseSupersetView,\n check_ownership,\n common_bootstrap_payload,\n CsvResponse,\n data_payload_response,\n DeleteMixin,\n generate_download_headers,\n get_error_msg,\n get_user_roles,\n handle_api_exception,\n json_error_response,\n json_success,\n SupersetModelView,\n)\nfrom .utils import (\n apply_display_max_row_limit,\n bootstrap_user_data,\n get_datasource_info,\n get_form_data,\n get_viz,\n)\n\nconfig = app.config\nCACHE_DEFAULT_TIMEOUT = config[\"CACHE_DEFAULT_TIMEOUT\"]\nSQLLAB_QUERY_COST_ESTIMATE_TIMEOUT = config[\"SQLLAB_QUERY_COST_ESTIMATE_TIMEOUT\"]\nstats_logger = config[\"STATS_LOGGER\"]\nDAR = DatasourceAccessRequest\nQueryStatus = utils.QueryStatus\nlogger = logging.getLogger(__name__)\n\nDATABASE_KEYS = [\n \"allow_csv_upload\",\n \"allow_ctas\",\n \"allow_dml\",\n \"allow_multi_schema_metadata_fetch\",\n \"allow_run_async\",\n \"allows_subquery\",\n \"backend\",\n \"database_name\",\n \"expose_in_sqllab\",\n \"force_ctas_schema\",\n \"id\",\n]\n\n\nALL_DATASOURCE_ACCESS_ERR = __(\n \"This endpoint requires the `all_datasource_access` permission\"\n)\nDATASOURCE_MISSING_ERR = __(\"The data source seems to have been deleted\")\nACCESS_REQUEST_MISSING_ERR = __(\"The access requests seem to have been deleted\")\nUSER_MISSING_ERR = __(\"The user seems to have been deleted\")\n\nFORM_DATA_KEY_BLACKLIST: List[str] = []\nif not config[\"ENABLE_JAVASCRIPT_CONTROLS\"]:\n FORM_DATA_KEY_BLACKLIST = [\"js_tooltip\", \"js_onclick_href\", \"js_data_mutator\"]\n\n\ndef get_database_access_error_msg(database_name):\n return __(\n \"This view requires the database %(name)s or \"\n \"`all_datasource_access` permission\",\n name=database_name,\n )\n\n\ndef is_owner(obj, user):\n \"\"\" Check if user is owner of the slice \"\"\"\n return obj and user in obj.owners\n\n\ndef check_datasource_perms(\n self, datasource_type: Optional[str] = None, datasource_id: Optional[int] = None\n) -> None:\n \"\"\"\n Check if user can access a cached response from explore_json.\n\n This function takes `self` since it must have the same signature as the\n the decorated method.\n\n :param datasource_type: The datasource type, i.e., 'druid' or 'table'\n :param datasource_id: The datasource ID\n :raises SupersetSecurityException: If the user cannot access the resource\n \"\"\"\n\n form_data = get_form_data()[0]\n\n try:\n datasource_id, datasource_type = get_datasource_info(\n datasource_id, datasource_type, form_data\n )\n except SupersetException as e:\n raise SupersetSecurityException(str(e))\n\n viz_obj = get_viz(\n datasource_type=datasource_type,\n datasource_id=datasource_id,\n form_data=form_data,\n force=False,\n )\n\n security_manager.assert_viz_permission(viz_obj)\n\n\ndef check_slice_perms(self, slice_id):\n \"\"\"\n Check if user can access a cached response from slice_json.\n\n This function takes `self` since it must have the same signature as the\n the decorated method.\n \"\"\"\n\n form_data, slc = get_form_data(slice_id, use_slice_data=True)\n\n viz_obj = get_viz(\n datasource_type=slc.datasource.type,\n datasource_id=slc.datasource.id,\n form_data=form_data,\n force=False,\n )\n\n security_manager.assert_viz_permission(viz_obj)\n\n\ndef _deserialize_results_payload(\n payload: Union[bytes, str], query, use_msgpack: Optional[bool] = False\n) -> dict:\n logger.debug(f\"Deserializing from msgpack: {use_msgpack}\")\n if use_msgpack:\n with stats_timing(\n \"sqllab.query.results_backend_msgpack_deserialize\", stats_logger\n ):\n ds_payload = msgpack.loads(payload, raw=False)\n\n with stats_timing(\"sqllab.query.results_backend_pa_deserialize\", stats_logger):\n pa_table = pa.deserialize(ds_payload[\"data\"])\n\n df = result_set.SupersetResultSet.convert_table_to_df(pa_table)\n ds_payload[\"data\"] = dataframe.df_to_records(df) or []\n\n db_engine_spec = query.database.db_engine_spec\n all_columns, data, expanded_columns = db_engine_spec.expand_data(\n ds_payload[\"selected_columns\"], ds_payload[\"data\"]\n )\n ds_payload.update(\n {\"data\": data, \"columns\": all_columns, \"expanded_columns\": expanded_columns}\n )\n\n return ds_payload\n else:\n with stats_timing(\n \"sqllab.query.results_backend_json_deserialize\", stats_logger\n ):\n return json.loads(payload) # type: ignore\n\n\nclass AccessRequestsModelView(SupersetModelView, DeleteMixin):\n datamodel = SQLAInterface(DAR)\n include_route_methods = RouteMethod.CRUD_SET\n list_columns = [\n \"username\",\n \"user_roles\",\n \"datasource_link\",\n \"roles_with_datasource\",\n \"created_on\",\n ]\n order_columns = [\"created_on\"]\n base_order = (\"changed_on\", \"desc\")\n label_columns = {\n \"username\": _(\"User\"),\n \"user_roles\": _(\"User Roles\"),\n \"database\": _(\"Database URL\"),\n \"datasource_link\": _(\"Datasource\"),\n \"roles_with_datasource\": _(\"Roles to grant\"),\n \"created_on\": _(\"Created On\"),\n }\n\n\n@talisman(force_https=False)\[email protected](\"/health\")\ndef health():\n return \"OK\"\n\n\n@talisman(force_https=False)\[email protected](\"/healthcheck\")\ndef healthcheck():\n return \"OK\"\n\n\n@talisman(force_https=False)\[email protected](\"/ping\")\ndef ping():\n return \"OK\"\n\n\nclass KV(BaseSupersetView):\n\n \"\"\"Used for storing and retrieving key value pairs\"\"\"\n\n @event_logger.log_this\n @has_access_api\n @expose(\"/store/\", methods=[\"POST\"])\n def store(self):\n try:\n value = request.form.get(\"data\")\n obj = models.KeyValue(value=value)\n db.session.add(obj)\n db.session.commit()\n except Exception as e:\n return json_error_response(e)\n return Response(json.dumps({\"id\": obj.id}), status=200)\n\n @event_logger.log_this\n @has_access_api\n @expose(\"/<key_id>/\", methods=[\"GET\"])\n def get_value(self, key_id):\n try:\n kv = db.session.query(models.KeyValue).filter_by(id=key_id).scalar()\n if not kv:\n return Response(status=404, content_type=\"text/plain\")\n except Exception as e:\n return json_error_response(e)\n return Response(kv.value, status=200, content_type=\"text/plain\")\n\n\nclass R(BaseSupersetView):\n\n \"\"\"used for short urls\"\"\"\n\n @event_logger.log_this\n @expose(\"/<url_id>\")\n def index(self, url_id):\n url = db.session.query(models.Url).get(url_id)\n if url and url.url:\n explore_url = \"//superset/explore/?\"\n if url.url.startswith(explore_url):\n explore_url += f\"r={url_id}\"\n return redirect(explore_url[1:])\n else:\n return redirect(url.url[1:])\n else:\n flash(\"URL to nowhere...\", \"danger\")\n return redirect(\"/\")\n\n @event_logger.log_this\n @has_access_api\n @expose(\"/shortner/\", methods=[\"POST\"])\n def shortner(self):\n url = request.form.get(\"data\")\n obj = models.Url(url=url)\n db.session.add(obj)\n db.session.commit()\n return Response(\n \"{scheme}://{request.headers[Host]}/r/{obj.id}\".format(\n scheme=request.scheme, request=request, obj=obj\n ),\n mimetype=\"text/plain\",\n )\n\n\nclass Superset(BaseSupersetView):\n \"\"\"The base views for Superset!\"\"\"\n\n logger = logging.getLogger(__name__)\n\n @has_access_api\n @expose(\"/datasources/\")\n def datasources(self):\n datasources = ConnectorRegistry.get_all_datasources(db.session)\n datasources = [o.short_data for o in datasources if o.short_data.get(\"name\")]\n datasources = sorted(datasources, key=lambda o: o[\"name\"])\n return self.json_response(datasources)\n\n @has_access_api\n @expose(\"/override_role_permissions/\", methods=[\"POST\"])\n def override_role_permissions(self):\n \"\"\"Updates the role with the give datasource permissions.\n\n Permissions not in the request will be revoked. This endpoint should\n be available to admins only. Expects JSON in the format:\n {\n 'role_name': '{role_name}',\n 'database': [{\n 'datasource_type': '{table|druid}',\n 'name': '{database_name}',\n 'schema': [{\n 'name': '{schema_name}',\n 'datasources': ['{datasource name}, {datasource name}']\n }]\n }]\n }\n \"\"\"\n data = request.get_json(force=True)\n role_name = data[\"role_name\"]\n databases = data[\"database\"]\n\n db_ds_names = set()\n for dbs in databases:\n for schema in dbs[\"schema\"]:\n for ds_name in schema[\"datasources\"]:\n fullname = utils.get_datasource_full_name(\n dbs[\"name\"], ds_name, schema=schema[\"name\"]\n )\n db_ds_names.add(fullname)\n\n existing_datasources = ConnectorRegistry.get_all_datasources(db.session)\n datasources = [d for d in existing_datasources if d.full_name in db_ds_names]\n role = security_manager.find_role(role_name)\n # remove all permissions\n role.permissions = []\n # grant permissions to the list of datasources\n granted_perms = []\n for datasource in datasources:\n view_menu_perm = security_manager.find_permission_view_menu(\n view_menu_name=datasource.perm, permission_name=\"datasource_access\"\n )\n # prevent creating empty permissions\n if view_menu_perm and view_menu_perm.view_menu:\n role.permissions.append(view_menu_perm)\n granted_perms.append(view_menu_perm.view_menu.name)\n db.session.commit()\n return self.json_response(\n {\"granted\": granted_perms, \"requested\": list(db_ds_names)}, status=201\n )\n\n @event_logger.log_this\n @has_access\n @expose(\"/request_access/\")\n def request_access(self):\n datasources = set()\n dashboard_id = request.args.get(\"dashboard_id\")\n if dashboard_id:\n dash = db.session.query(Dashboard).filter_by(id=int(dashboard_id)).one()\n datasources |= dash.datasources\n datasource_id = request.args.get(\"datasource_id\")\n datasource_type = request.args.get(\"datasource_type\")\n if datasource_id:\n ds_class = ConnectorRegistry.sources.get(datasource_type)\n datasource = (\n db.session.query(ds_class).filter_by(id=int(datasource_id)).one()\n )\n datasources.add(datasource)\n\n has_access = all(\n (\n datasource and security_manager.datasource_access(datasource)\n for datasource in datasources\n )\n )\n if has_access:\n return redirect(\"/superset/dashboard/{}\".format(dashboard_id))\n\n if request.args.get(\"action\") == \"go\":\n for datasource in datasources:\n access_request = DAR(\n datasource_id=datasource.id, datasource_type=datasource.type\n )\n db.session.add(access_request)\n db.session.commit()\n flash(__(\"Access was requested\"), \"info\")\n return redirect(\"/\")\n\n return self.render_template(\n \"superset/request_access.html\",\n datasources=datasources,\n datasource_names=\", \".join([o.name for o in datasources]),\n )\n\n @event_logger.log_this\n @has_access\n @expose(\"/approve\")\n def approve(self):\n def clean_fulfilled_requests(session):\n for r in session.query(DAR).all():\n datasource = ConnectorRegistry.get_datasource(\n r.datasource_type, r.datasource_id, session\n )\n if not datasource or security_manager.datasource_access(datasource):\n # datasource does not exist anymore\n session.delete(r)\n session.commit()\n\n datasource_type = request.args.get(\"datasource_type\")\n datasource_id = request.args.get(\"datasource_id\")\n created_by_username = request.args.get(\"created_by\")\n role_to_grant = request.args.get(\"role_to_grant\")\n role_to_extend = request.args.get(\"role_to_extend\")\n\n session = db.session\n datasource = ConnectorRegistry.get_datasource(\n datasource_type, datasource_id, session\n )\n\n if not datasource:\n flash(DATASOURCE_MISSING_ERR, \"alert\")\n return json_error_response(DATASOURCE_MISSING_ERR)\n\n requested_by = security_manager.find_user(username=created_by_username)\n if not requested_by:\n flash(USER_MISSING_ERR, \"alert\")\n return json_error_response(USER_MISSING_ERR)\n\n requests = (\n session.query(DAR)\n .filter(\n DAR.datasource_id == datasource_id,\n DAR.datasource_type == datasource_type,\n DAR.created_by_fk == requested_by.id,\n )\n .all()\n )\n\n if not requests:\n flash(ACCESS_REQUEST_MISSING_ERR, \"alert\")\n return json_error_response(ACCESS_REQUEST_MISSING_ERR)\n\n # check if you can approve\n if security_manager.all_datasource_access() or check_ownership(\n datasource, raise_if_false=False\n ):\n # can by done by admin only\n if role_to_grant:\n role = security_manager.find_role(role_to_grant)\n requested_by.roles.append(role)\n msg = __(\n \"%(user)s was granted the role %(role)s that gives access \"\n \"to the %(datasource)s\",\n user=requested_by.username,\n role=role_to_grant,\n datasource=datasource.full_name,\n )\n utils.notify_user_about_perm_udate(\n g.user,\n requested_by,\n role,\n datasource,\n \"email/role_granted.txt\",\n app.config,\n )\n flash(msg, \"info\")\n\n if role_to_extend:\n perm_view = security_manager.find_permission_view_menu(\n \"email/datasource_access\", datasource.perm\n )\n role = security_manager.find_role(role_to_extend)\n security_manager.add_permission_role(role, perm_view)\n msg = __(\n \"Role %(r)s was extended to provide the access to \"\n \"the datasource %(ds)s\",\n r=role_to_extend,\n ds=datasource.full_name,\n )\n utils.notify_user_about_perm_udate(\n g.user,\n requested_by,\n role,\n datasource,\n \"email/role_extended.txt\",\n app.config,\n )\n flash(msg, \"info\")\n clean_fulfilled_requests(session)\n else:\n flash(__(\"You have no permission to approve this request\"), \"danger\")\n return redirect(\"/accessrequestsmodelview/list/\")\n for r in requests:\n session.delete(r)\n session.commit()\n return redirect(\"/accessrequestsmodelview/list/\")\n\n def get_viz(\n self,\n slice_id=None,\n form_data=None,\n datasource_type=None,\n datasource_id=None,\n force=False,\n ):\n if slice_id:\n slc = db.session.query(Slice).filter_by(id=slice_id).one()\n return slc.get_viz()\n else:\n viz_type = form_data.get(\"viz_type\", \"table\")\n datasource = ConnectorRegistry.get_datasource(\n datasource_type, datasource_id, db.session\n )\n viz_obj = viz.viz_types[viz_type](\n datasource, form_data=form_data, force=force\n )\n return viz_obj\n\n @has_access\n @expose(\"/slice/<slice_id>/\")\n def slice(self, slice_id):\n form_data, slc = get_form_data(slice_id, use_slice_data=True)\n if not slc:\n abort(404)\n endpoint = \"/superset/explore/?form_data={}\".format(\n parse.quote(json.dumps({\"slice_id\": slice_id}))\n )\n param = utils.ReservedUrlParameters.STANDALONE.value\n if request.args.get(param) == \"true\":\n endpoint += f\"&{param}=true\"\n return redirect(endpoint)\n\n def get_query_string_response(self, viz_obj):\n query = None\n try:\n query_obj = viz_obj.query_obj()\n if query_obj:\n query = viz_obj.datasource.get_query_str(query_obj)\n except Exception as e:\n logger.exception(e)\n return json_error_response(e)\n\n if not query:\n query = \"No query.\"\n\n return self.json_response(\n {\"query\": query, \"language\": viz_obj.datasource.query_language}\n )\n\n def get_raw_results(self, viz_obj):\n return self.json_response(\n {\"data\": viz_obj.get_df_payload()[\"df\"].to_dict(\"records\")}\n )\n\n def get_samples(self, viz_obj):\n return self.json_response({\"data\": viz_obj.get_samples()})\n\n def generate_json(\n self, viz_obj, csv=False, query=False, results=False, samples=False\n ):\n if csv:\n return CsvResponse(\n viz_obj.get_csv(),\n status=200,\n headers=generate_download_headers(\"csv\"),\n mimetype=\"application/csv\",\n )\n\n if query:\n return self.get_query_string_response(viz_obj)\n\n if results:\n return self.get_raw_results(viz_obj)\n\n if samples:\n return self.get_samples(viz_obj)\n\n payload = viz_obj.get_payload()\n return data_payload_response(*viz_obj.payload_json_and_has_error(payload))\n\n @event_logger.log_this\n @api\n @has_access_api\n @expose(\"/slice_json/<slice_id>\")\n @etag_cache(CACHE_DEFAULT_TIMEOUT, check_perms=check_slice_perms)\n def slice_json(self, slice_id):\n form_data, slc = get_form_data(slice_id, use_slice_data=True)\n datasource_type = slc.datasource.type\n datasource_id = slc.datasource.id\n viz_obj = get_viz(\n datasource_type=datasource_type,\n datasource_id=datasource_id,\n form_data=form_data,\n force=False,\n )\n return self.generate_json(viz_obj)\n\n @event_logger.log_this\n @api\n @has_access_api\n @expose(\"/annotation_json/<layer_id>\")\n def annotation_json(self, layer_id):\n form_data = get_form_data()[0]\n form_data[\"layer_id\"] = layer_id\n form_data[\"filters\"] = [{\"col\": \"layer_id\", \"op\": \"==\", \"val\": layer_id}]\n datasource = AnnotationDatasource()\n viz_obj = viz.viz_types[\"table\"](datasource, form_data=form_data, force=False)\n payload = viz_obj.get_payload()\n return data_payload_response(*viz_obj.payload_json_and_has_error(payload))\n\n EXPLORE_JSON_METHODS = [\"POST\"]\n if not is_feature_enabled(\"ENABLE_EXPLORE_JSON_CSRF_PROTECTION\"):\n EXPLORE_JSON_METHODS.append(\"GET\")\n\n @event_logger.log_this\n @api\n @has_access_api\n @handle_api_exception\n @expose(\n \"/explore_json/<datasource_type>/<datasource_id>/\", methods=EXPLORE_JSON_METHODS\n )\n @expose(\"/explore_json/\", methods=EXPLORE_JSON_METHODS)\n @etag_cache(CACHE_DEFAULT_TIMEOUT, check_perms=check_datasource_perms)\n def explore_json(self, datasource_type=None, datasource_id=None):\n \"\"\"Serves all request that GET or POST form_data\n\n This endpoint evolved to be the entry point of many different\n requests that GETs or POSTs a form_data.\n\n `self.generate_json` receives this input and returns different\n payloads based on the request args in the first block\n\n TODO: break into one endpoint for each return shape\"\"\"\n csv = request.args.get(\"csv\") == \"true\"\n query = request.args.get(\"query\") == \"true\"\n results = request.args.get(\"results\") == \"true\"\n samples = request.args.get(\"samples\") == \"true\"\n force = request.args.get(\"force\") == \"true\"\n form_data = get_form_data()[0]\n\n try:\n datasource_id, datasource_type = get_datasource_info(\n datasource_id, datasource_type, form_data\n )\n except SupersetException as e:\n return json_error_response(utils.error_msg_from_exception(e))\n\n viz_obj = get_viz(\n datasource_type=datasource_type,\n datasource_id=datasource_id,\n form_data=form_data,\n force=force,\n )\n\n return self.generate_json(\n viz_obj, csv=csv, query=query, results=results, samples=samples\n )\n\n @event_logger.log_this\n @has_access\n @expose(\"/import_dashboards\", methods=[\"GET\", \"POST\"])\n def import_dashboards(self):\n \"\"\"Overrides the dashboards using json instances from the file.\"\"\"\n f = request.files.get(\"file\")\n if request.method == \"POST\" and f:\n try:\n dashboard_import_export.import_dashboards(db.session, f.stream)\n except DatabaseNotFound as e:\n flash(\n _(\n \"Cannot import dashboard: %(db_error)s.\\n\"\n \"Make sure to create the database before \"\n \"importing the dashboard.\",\n db_error=e,\n ),\n \"danger\",\n )\n except Exception as e:\n logger.exception(e)\n flash(\n _(\n \"An unknown error occurred. \"\n \"Please contact your Superset administrator\"\n ),\n \"danger\",\n )\n return redirect(\"/dashboard/list/\")\n return self.render_template(\"superset/import_dashboards.html\")\n\n @event_logger.log_this\n @has_access\n @expose(\"/explore/<datasource_type>/<datasource_id>/\", methods=[\"GET\", \"POST\"])\n @expose(\"/explore/\", methods=[\"GET\", \"POST\"])\n def explore(self, datasource_type=None, datasource_id=None):\n user_id = g.user.get_id() if g.user else None\n form_data, slc = get_form_data(use_slice_data=True)\n\n # Flash the SIP-15 message if the slice is owned by the current user and has not\n # been updated, i.e., is not using the [start, end) interval.\n if (\n config[\"SIP_15_ENABLED\"]\n and slc\n and g.user in slc.owners\n and (\n not form_data.get(\"time_range_endpoints\")\n or form_data[\"time_range_endpoints\"]\n != (\n utils.TimeRangeEndpoint.INCLUSIVE,\n utils.TimeRangeEndpoint.EXCLUSIVE,\n )\n )\n ):\n url = Href(\"/superset/explore/\")(\n {\n \"form_data\": json.dumps(\n {\n \"slice_id\": slc.id,\n \"time_range_endpoints\": (\n utils.TimeRangeEndpoint.INCLUSIVE.value,\n utils.TimeRangeEndpoint.EXCLUSIVE.value,\n ),\n }\n )\n }\n )\n\n flash(Markup(config[\"SIP_15_TOAST_MESSAGE\"].format(url=url)))\n\n error_redirect = \"/chart/list/\"\n try:\n datasource_id, datasource_type = get_datasource_info(\n datasource_id, datasource_type, form_data\n )\n except SupersetException:\n return redirect(error_redirect)\n\n datasource = ConnectorRegistry.get_datasource(\n datasource_type, datasource_id, db.session\n )\n if not datasource:\n flash(DATASOURCE_MISSING_ERR, \"danger\")\n return redirect(error_redirect)\n\n if config[\"ENABLE_ACCESS_REQUEST\"] and (\n not security_manager.datasource_access(datasource)\n ):\n flash(\n __(security_manager.get_datasource_access_error_msg(datasource)),\n \"danger\",\n )\n return redirect(\n \"superset/request_access/?\"\n f\"datasource_type={datasource_type}&\"\n f\"datasource_id={datasource_id}&\"\n )\n\n viz_type = form_data.get(\"viz_type\")\n if not viz_type and datasource.default_endpoint:\n return redirect(datasource.default_endpoint)\n\n # slc perms\n slice_add_perm = security_manager.can_access(\"can_add\", \"SliceModelView\")\n slice_overwrite_perm = is_owner(slc, g.user)\n slice_download_perm = security_manager.can_access(\n \"can_download\", \"SliceModelView\"\n )\n\n form_data[\"datasource\"] = str(datasource_id) + \"__\" + datasource_type\n\n # On explore, merge legacy and extra filters into the form data\n utils.convert_legacy_filters_into_adhoc(form_data)\n utils.merge_extra_filters(form_data)\n\n # merge request url params\n if request.method == \"GET\":\n utils.merge_request_params(form_data, request.args)\n\n # handle save or overwrite\n action = request.args.get(\"action\")\n\n if action == \"overwrite\" and not slice_overwrite_perm:\n return json_error_response(\n _(\"You don't have the rights to \") + _(\"alter this \") + _(\"chart\"),\n status=400,\n )\n\n if action == \"saveas\" and not slice_add_perm:\n return json_error_response(\n _(\"You don't have the rights to \") + _(\"create a \") + _(\"chart\"),\n status=400,\n )\n\n if action in (\"saveas\", \"overwrite\"):\n return self.save_or_overwrite_slice(\n request.args,\n slc,\n slice_add_perm,\n slice_overwrite_perm,\n slice_download_perm,\n datasource_id,\n datasource_type,\n datasource.name,\n )\n\n standalone = (\n request.args.get(utils.ReservedUrlParameters.STANDALONE.value) == \"true\"\n )\n bootstrap_data = {\n \"can_add\": slice_add_perm,\n \"can_download\": slice_download_perm,\n \"can_overwrite\": slice_overwrite_perm,\n \"datasource\": datasource.data,\n \"form_data\": form_data,\n \"datasource_id\": datasource_id,\n \"datasource_type\": datasource_type,\n \"slice\": slc.data if slc else None,\n \"standalone\": standalone,\n \"user_id\": user_id,\n \"forced_height\": request.args.get(\"height\"),\n \"common\": common_bootstrap_payload(),\n }\n table_name = (\n datasource.table_name\n if datasource_type == \"table\"\n else datasource.datasource_name\n )\n if slc:\n title = slc.slice_name\n else:\n title = _(\"Explore - %(table)s\", table=table_name)\n return self.render_template(\n \"superset/basic.html\",\n bootstrap_data=json.dumps(\n bootstrap_data, default=utils.pessimistic_json_iso_dttm_ser\n ),\n entry=\"explore\",\n title=title,\n standalone_mode=standalone,\n )\n\n @api\n @handle_api_exception\n @has_access_api\n @expose(\"/filter/<datasource_type>/<datasource_id>/<column>/\")\n def filter(self, datasource_type, datasource_id, column):\n \"\"\"\n Endpoint to retrieve values for specified column.\n\n :param datasource_type: Type of datasource e.g. table\n :param datasource_id: Datasource id\n :param column: Column name to retrieve values for\n :return:\n \"\"\"\n # TODO: Cache endpoint by user, datasource and column\n datasource = ConnectorRegistry.get_datasource(\n datasource_type, datasource_id, db.session\n )\n if not datasource:\n return json_error_response(DATASOURCE_MISSING_ERR)\n security_manager.assert_datasource_permission(datasource)\n payload = json.dumps(\n datasource.values_for_column(column, config[\"FILTER_SELECT_ROW_LIMIT\"]),\n default=utils.json_int_dttm_ser,\n )\n return json_success(payload)\n\n def save_or_overwrite_slice(\n self,\n args,\n slc,\n slice_add_perm,\n slice_overwrite_perm,\n slice_download_perm,\n datasource_id,\n datasource_type,\n datasource_name,\n ):\n \"\"\"Save or overwrite a slice\"\"\"\n slice_name = args.get(\"slice_name\")\n action = args.get(\"action\")\n form_data = get_form_data()[0]\n\n if action in (\"saveas\"):\n if \"slice_id\" in form_data:\n form_data.pop(\"slice_id\") # don't save old slice_id\n slc = Slice(owners=[g.user] if g.user else [])\n\n slc.params = json.dumps(form_data, indent=2, sort_keys=True)\n slc.datasource_name = datasource_name\n slc.viz_type = form_data[\"viz_type\"]\n slc.datasource_type = datasource_type\n slc.datasource_id = datasource_id\n slc.slice_name = slice_name\n\n if action in (\"saveas\") and slice_add_perm:\n self.save_slice(slc)\n elif action == \"overwrite\" and slice_overwrite_perm:\n self.overwrite_slice(slc)\n\n # Adding slice to a dashboard if requested\n dash = None\n if request.args.get(\"add_to_dash\") == \"existing\":\n dash = (\n db.session.query(Dashboard)\n .filter_by(id=int(request.args.get(\"save_to_dashboard_id\")))\n .one()\n )\n # check edit dashboard permissions\n dash_overwrite_perm = check_ownership(dash, raise_if_false=False)\n if not dash_overwrite_perm:\n return json_error_response(\n _(\"You don't have the rights to \")\n + _(\"alter this \")\n + _(\"dashboard\"),\n status=400,\n )\n\n flash(\n _(\"Chart [{}] was added to dashboard [{}]\").format(\n slc.slice_name, dash.dashboard_title\n ),\n \"info\",\n )\n elif request.args.get(\"add_to_dash\") == \"new\":\n # check create dashboard permissions\n dash_add_perm = security_manager.can_access(\"can_add\", \"DashboardModelView\")\n if not dash_add_perm:\n return json_error_response(\n _(\"You don't have the rights to \")\n + _(\"create a \")\n + _(\"dashboard\"),\n status=400,\n )\n\n dash = Dashboard(\n dashboard_title=request.args.get(\"new_dashboard_name\"),\n owners=[g.user] if g.user else [],\n )\n flash(\n _(\n \"Dashboard [{}] just got created and chart [{}] was added \" \"to it\"\n ).format(dash.dashboard_title, slc.slice_name),\n \"info\",\n )\n\n if dash and slc not in dash.slices:\n dash.slices.append(slc)\n db.session.commit()\n\n response = {\n \"can_add\": slice_add_perm,\n \"can_download\": slice_download_perm,\n \"can_overwrite\": is_owner(slc, g.user),\n \"form_data\": slc.form_data,\n \"slice\": slc.data,\n \"dashboard_id\": dash.id if dash else None,\n }\n\n if request.args.get(\"goto_dash\") == \"true\":\n response.update({\"dashboard\": dash.url})\n\n return json_success(json.dumps(response))\n\n def save_slice(self, slc):\n session = db.session()\n msg = _(\"Chart [{}] has been saved\").format(slc.slice_name)\n session.add(slc)\n session.commit()\n flash(msg, \"info\")\n\n def overwrite_slice(self, slc):\n session = db.session()\n session.merge(slc)\n session.commit()\n msg = _(\"Chart [{}] has been overwritten\").format(slc.slice_name)\n flash(msg, \"info\")\n\n @api\n @has_access_api\n @expose(\"/schemas/<db_id>/\")\n @expose(\"/schemas/<db_id>/<force_refresh>/\")\n def schemas(self, db_id, force_refresh=\"false\"):\n db_id = int(db_id)\n force_refresh = force_refresh.lower() == \"true\"\n database = db.session.query(models.Database).get(db_id)\n if database:\n schemas = database.get_all_schema_names(\n cache=database.schema_cache_enabled,\n cache_timeout=database.schema_cache_timeout,\n force=force_refresh,\n )\n schemas = security_manager.schemas_accessible_by_user(database, schemas)\n else:\n schemas = []\n\n return Response(json.dumps({\"schemas\": schemas}), mimetype=\"application/json\")\n\n @api\n @has_access_api\n @expose(\"/tables/<int:db_id>/<schema>/<substr>/\")\n @expose(\"/tables/<int:db_id>/<schema>/<substr>/<force_refresh>/\")\n def tables(\n self, db_id: int, schema: str, substr: str, force_refresh: str = \"false\"\n ):\n \"\"\"Endpoint to fetch the list of tables for given database\"\"\"\n # Guarantees database filtering by security access\n query = db.session.query(models.Database)\n query = DatabaseFilter(\"id\", SQLAInterface(models.Database, db.session)).apply(\n query, None\n )\n database = query.filter_by(id=db_id).one_or_none()\n if not database:\n return json_error_response(\"Not found\", 404)\n\n force_refresh_parsed = force_refresh.lower() == \"true\"\n schema_parsed = utils.parse_js_uri_path_item(schema, eval_undefined=True)\n substr_parsed = utils.parse_js_uri_path_item(substr, eval_undefined=True)\n\n if schema_parsed:\n tables = (\n database.get_all_table_names_in_schema(\n schema=schema_parsed,\n force=force_refresh_parsed,\n cache=database.table_cache_enabled,\n cache_timeout=database.table_cache_timeout,\n )\n or []\n )\n views = (\n database.get_all_view_names_in_schema(\n schema=schema_parsed,\n force=force_refresh_parsed,\n cache=database.table_cache_enabled,\n cache_timeout=database.table_cache_timeout,\n )\n or []\n )\n else:\n tables = database.get_all_table_names_in_database(\n cache=True, force=False, cache_timeout=24 * 60 * 60\n )\n views = database.get_all_view_names_in_database(\n cache=True, force=False, cache_timeout=24 * 60 * 60\n )\n tables = security_manager.get_datasources_accessible_by_user(\n database, tables, schema_parsed\n )\n views = security_manager.get_datasources_accessible_by_user(\n database, views, schema_parsed\n )\n\n def get_datasource_label(ds_name: utils.DatasourceName) -> str:\n return (\n ds_name.table if schema_parsed else f\"{ds_name.schema}.{ds_name.table}\"\n )\n\n if substr_parsed:\n tables = [tn for tn in tables if substr_parsed in get_datasource_label(tn)]\n views = [vn for vn in views if substr_parsed in get_datasource_label(vn)]\n\n if not schema_parsed and database.default_schemas:\n user_schema = g.user.email.split(\"@\")[0]\n valid_schemas = set(database.default_schemas + [user_schema])\n\n tables = [tn for tn in tables if tn.schema in valid_schemas]\n views = [vn for vn in views if vn.schema in valid_schemas]\n\n max_items = config[\"MAX_TABLE_NAMES\"] or len(tables)\n total_items = len(tables) + len(views)\n max_tables = len(tables)\n max_views = len(views)\n if total_items and substr_parsed:\n max_tables = max_items * len(tables) // total_items\n max_views = max_items * len(views) // total_items\n\n table_options = [\n {\n \"value\": tn.table,\n \"schema\": tn.schema,\n \"label\": get_datasource_label(tn),\n \"title\": get_datasource_label(tn),\n \"type\": \"table\",\n }\n for tn in tables[:max_tables]\n ]\n table_options.extend(\n [\n {\n \"value\": vn.table,\n \"schema\": vn.schema,\n \"label\": get_datasource_label(vn),\n \"title\": get_datasource_label(vn),\n \"type\": \"view\",\n }\n for vn in views[:max_views]\n ]\n )\n table_options.sort(key=lambda value: value[\"label\"])\n payload = {\"tableLength\": len(tables) + len(views), \"options\": table_options}\n return json_success(json.dumps(payload))\n\n @api\n @has_access_api\n @expose(\"/copy_dash/<dashboard_id>/\", methods=[\"GET\", \"POST\"])\n def copy_dash(self, dashboard_id):\n \"\"\"Copy dashboard\"\"\"\n session = db.session()\n data = json.loads(request.form.get(\"data\"))\n dash = models.Dashboard()\n original_dash = session.query(Dashboard).get(dashboard_id)\n\n dash.owners = [g.user] if g.user else []\n dash.dashboard_title = data[\"dashboard_title\"]\n\n if data[\"duplicate_slices\"]:\n # Duplicating slices as well, mapping old ids to new ones\n old_to_new_sliceids = {}\n for slc in original_dash.slices:\n new_slice = slc.clone()\n new_slice.owners = [g.user] if g.user else []\n session.add(new_slice)\n session.flush()\n new_slice.dashboards.append(dash)\n old_to_new_sliceids[\"{}\".format(slc.id)] = \"{}\".format(new_slice.id)\n\n # update chartId of layout entities\n # in v2_dash positions json data, chartId should be integer,\n # while in older version slice_id is string type\n for value in data[\"positions\"].values():\n if (\n isinstance(value, dict)\n and value.get(\"meta\")\n and value.get(\"meta\").get(\"chartId\")\n ):\n old_id = \"{}\".format(value.get(\"meta\").get(\"chartId\"))\n new_id = int(old_to_new_sliceids[old_id])\n value[\"meta\"][\"chartId\"] = new_id\n else:\n dash.slices = original_dash.slices\n dash.params = original_dash.params\n\n self._set_dash_metadata(dash, data)\n session.add(dash)\n session.commit()\n dash_json = json.dumps(dash.data)\n session.close()\n return json_success(dash_json)\n\n @api\n @has_access_api\n @expose(\"/save_dash/<dashboard_id>/\", methods=[\"GET\", \"POST\"])\n def save_dash(self, dashboard_id):\n \"\"\"Save a dashboard's metadata\"\"\"\n session = db.session()\n dash = session.query(Dashboard).get(dashboard_id)\n check_ownership(dash, raise_if_false=True)\n data = json.loads(request.form.get(\"data\"))\n self._set_dash_metadata(dash, data)\n session.merge(dash)\n session.commit()\n session.close()\n return json_success(json.dumps({\"status\": \"SUCCESS\"}))\n\n @staticmethod\n def _set_dash_metadata(dashboard, data):\n positions = data[\"positions\"]\n # find slices in the position data\n slice_ids = []\n slice_id_to_name = {}\n for value in positions.values():\n if isinstance(value, dict):\n try:\n slice_id = value[\"meta\"][\"chartId\"]\n slice_ids.append(slice_id)\n slice_id_to_name[slice_id] = value[\"meta\"][\"sliceName\"]\n except KeyError:\n pass\n\n session = db.session()\n current_slices = session.query(Slice).filter(Slice.id.in_(slice_ids)).all()\n\n dashboard.slices = current_slices\n\n # update slice names. this assumes user has permissions to update the slice\n # we allow user set slice name be empty string\n for slc in dashboard.slices:\n try:\n new_name = slice_id_to_name[slc.id]\n if slc.slice_name != new_name:\n slc.slice_name = new_name\n session.merge(slc)\n session.flush()\n except KeyError:\n pass\n\n # remove leading and trailing white spaces in the dumped json\n dashboard.position_json = json.dumps(\n positions, indent=None, separators=(\",\", \":\"), sort_keys=True\n )\n md = dashboard.params_dict\n dashboard.css = data.get(\"css\")\n dashboard.dashboard_title = data[\"dashboard_title\"]\n\n if \"timed_refresh_immune_slices\" not in md:\n md[\"timed_refresh_immune_slices\"] = []\n if \"filter_scopes\" in data:\n md[\"filter_scopes\"] = json.loads(data[\"filter_scopes\"] or \"{}\")\n md[\"expanded_slices\"] = data[\"expanded_slices\"]\n md[\"refresh_frequency\"] = data.get(\"refresh_frequency\", 0)\n default_filters_data = json.loads(data.get(\"default_filters\", \"{}\"))\n applicable_filters = {\n key: v for key, v in default_filters_data.items() if int(key) in slice_ids\n }\n md[\"default_filters\"] = json.dumps(applicable_filters)\n if data.get(\"color_namespace\"):\n md[\"color_namespace\"] = data.get(\"color_namespace\")\n if data.get(\"color_scheme\"):\n md[\"color_scheme\"] = data.get(\"color_scheme\")\n if data.get(\"label_colors\"):\n md[\"label_colors\"] = data.get(\"label_colors\")\n dashboard.json_metadata = json.dumps(md)\n\n @api\n @has_access_api\n @expose(\"/add_slices/<dashboard_id>/\", methods=[\"POST\"])\n def add_slices(self, dashboard_id):\n \"\"\"Add and save slices to a dashboard\"\"\"\n data = json.loads(request.form.get(\"data\"))\n session = db.session()\n dash = session.query(Dashboard).get(dashboard_id)\n check_ownership(dash, raise_if_false=True)\n new_slices = session.query(Slice).filter(Slice.id.in_(data[\"slice_ids\"]))\n dash.slices += new_slices\n session.merge(dash)\n session.commit()\n session.close()\n return \"SLICES ADDED\"\n\n @api\n @has_access_api\n @expose(\"/testconn\", methods=[\"POST\", \"GET\"])\n def testconn(self):\n \"\"\"Tests a sqla connection\"\"\"\n try:\n db_name = request.json.get(\"name\")\n uri = request.json.get(\"uri\")\n\n # if the database already exists in the database, only its safe (password-masked) URI\n # would be shown in the UI and would be passed in the form data.\n # so if the database already exists and the form was submitted with the safe URI,\n # we assume we should retrieve the decrypted URI to test the connection.\n if db_name:\n existing_database = (\n db.session.query(models.Database)\n .filter_by(database_name=db_name)\n .one_or_none()\n )\n if existing_database and uri == existing_database.safe_sqlalchemy_uri():\n uri = existing_database.sqlalchemy_uri_decrypted\n\n # this is the database instance that will be tested\n database = models.Database(\n # extras is sent as json, but required to be a string in the Database model\n extra=json.dumps(request.json.get(\"extras\", {})),\n impersonate_user=request.json.get(\"impersonate_user\"),\n encrypted_extra=json.dumps(request.json.get(\"encrypted_extra\", {})),\n )\n database.set_sqlalchemy_uri(uri)\n\n username = g.user.username if g.user is not None else None\n engine = database.get_sqla_engine(user_name=username)\n\n with closing(engine.connect()) as conn:\n conn.scalar(select([1]))\n return json_success('\"OK\"')\n except Exception as e:\n logger.exception(e)\n return json_error_response(\n \"Connection failed!\\n\\n\" f\"The error message returned was:\\n{e}\", 400\n )\n\n @api\n @has_access_api\n @expose(\"/recent_activity/<user_id>/\", methods=[\"GET\"])\n def recent_activity(self, user_id):\n \"\"\"Recent activity (actions) for a given user\"\"\"\n M = models\n\n if request.args.get(\"limit\"):\n limit = int(request.args.get(\"limit\"))\n else:\n limit = 1000\n\n qry = (\n db.session.query(M.Log, M.Dashboard, Slice)\n .outerjoin(M.Dashboard, M.Dashboard.id == M.Log.dashboard_id)\n .outerjoin(Slice, Slice.id == M.Log.slice_id)\n .filter(\n and_(\n ~M.Log.action.in_((\"queries\", \"shortner\", \"sql_json\")),\n M.Log.user_id == user_id,\n )\n )\n .order_by(M.Log.dttm.desc())\n .limit(limit)\n )\n payload = []\n for log in qry.all():\n item_url = None\n item_title = None\n if log.Dashboard:\n item_url = log.Dashboard.url\n item_title = log.Dashboard.dashboard_title\n elif log.Slice:\n item_url = log.Slice.slice_url\n item_title = log.Slice.slice_name\n\n payload.append(\n {\n \"action\": log.Log.action,\n \"item_url\": item_url,\n \"item_title\": item_title,\n \"time\": log.Log.dttm,\n }\n )\n return json_success(json.dumps(payload, default=utils.json_int_dttm_ser))\n\n @api\n @has_access_api\n @expose(\"/csrf_token/\", methods=[\"GET\"])\n def csrf_token(self):\n return Response(\n self.render_template(\"superset/csrf_token.json\"), mimetype=\"text/json\"\n )\n\n @api\n @has_access_api\n @expose(\"/available_domains/\", methods=[\"GET\"])\n def available_domains(self):\n \"\"\"\n Returns the list of available Superset Webserver domains (if any)\n defined in config. This enables charts embedded in other apps to\n leverage domain sharding if appropriately configured.\n \"\"\"\n return Response(\n json.dumps(conf.get(\"SUPERSET_WEBSERVER_DOMAINS\")), mimetype=\"text/json\"\n )\n\n @api\n @has_access_api\n @expose(\"/fave_dashboards_by_username/<username>/\", methods=[\"GET\"])\n def fave_dashboards_by_username(self, username):\n \"\"\"This lets us use a user's username to pull favourite dashboards\"\"\"\n user = security_manager.find_user(username=username)\n return self.fave_dashboards(user.get_id())\n\n @api\n @has_access_api\n @expose(\"/fave_dashboards/<user_id>/\", methods=[\"GET\"])\n def fave_dashboards(self, user_id):\n qry = (\n db.session.query(Dashboard, models.FavStar.dttm)\n .join(\n models.FavStar,\n and_(\n models.FavStar.user_id == int(user_id),\n models.FavStar.class_name == \"Dashboard\",\n Dashboard.id == models.FavStar.obj_id,\n ),\n )\n .order_by(models.FavStar.dttm.desc())\n )\n payload = []\n for o in qry.all():\n d = {\n \"id\": o.Dashboard.id,\n \"dashboard\": o.Dashboard.dashboard_link(),\n \"title\": o.Dashboard.dashboard_title,\n \"url\": o.Dashboard.url,\n \"dttm\": o.dttm,\n }\n if o.Dashboard.created_by:\n user = o.Dashboard.created_by\n d[\"creator\"] = str(user)\n d[\"creator_url\"] = \"/superset/profile/{}/\".format(user.username)\n payload.append(d)\n return json_success(json.dumps(payload, default=utils.json_int_dttm_ser))\n\n @api\n @has_access_api\n @expose(\"/created_dashboards/<user_id>/\", methods=[\"GET\"])\n def created_dashboards(self, user_id):\n Dash = Dashboard\n qry = (\n db.session.query(Dash)\n .filter(or_(Dash.created_by_fk == user_id, Dash.changed_by_fk == user_id))\n .order_by(Dash.changed_on.desc())\n )\n payload = [\n {\n \"id\": o.id,\n \"dashboard\": o.dashboard_link(),\n \"title\": o.dashboard_title,\n \"url\": o.url,\n \"dttm\": o.changed_on,\n }\n for o in qry.all()\n ]\n return json_success(json.dumps(payload, default=utils.json_int_dttm_ser))\n\n @api\n @has_access_api\n @expose(\"/user_slices\", methods=[\"GET\"])\n @expose(\"/user_slices/<user_id>/\", methods=[\"GET\"])\n def user_slices(self, user_id=None):\n \"\"\"List of slices a user created, or faved\"\"\"\n if not user_id:\n user_id = g.user.id\n FavStar = models.FavStar\n qry = (\n db.session.query(Slice, FavStar.dttm)\n .join(\n models.FavStar,\n and_(\n models.FavStar.user_id == int(user_id),\n models.FavStar.class_name == \"slice\",\n Slice.id == models.FavStar.obj_id,\n ),\n isouter=True,\n )\n .filter(\n or_(\n Slice.created_by_fk == user_id,\n Slice.changed_by_fk == user_id,\n FavStar.user_id == user_id,\n )\n )\n .order_by(Slice.slice_name.asc())\n )\n payload = [\n {\n \"id\": o.Slice.id,\n \"title\": o.Slice.slice_name,\n \"url\": o.Slice.slice_url,\n \"data\": o.Slice.form_data,\n \"dttm\": o.dttm if o.dttm else o.Slice.changed_on,\n \"viz_type\": o.Slice.viz_type,\n }\n for o in qry.all()\n ]\n return json_success(json.dumps(payload, default=utils.json_int_dttm_ser))\n\n @api\n @has_access_api\n @expose(\"/created_slices\", methods=[\"GET\"])\n @expose(\"/created_slices/<user_id>/\", methods=[\"GET\"])\n def created_slices(self, user_id=None):\n \"\"\"List of slices created by this user\"\"\"\n if not user_id:\n user_id = g.user.id\n qry = (\n db.session.query(Slice)\n .filter(or_(Slice.created_by_fk == user_id, Slice.changed_by_fk == user_id))\n .order_by(Slice.changed_on.desc())\n )\n payload = [\n {\n \"id\": o.id,\n \"title\": o.slice_name,\n \"url\": o.slice_url,\n \"dttm\": o.changed_on,\n \"viz_type\": o.viz_type,\n }\n for o in qry.all()\n ]\n return json_success(json.dumps(payload, default=utils.json_int_dttm_ser))\n\n @api\n @has_access_api\n @expose(\"/fave_slices\", methods=[\"GET\"])\n @expose(\"/fave_slices/<user_id>/\", methods=[\"GET\"])\n def fave_slices(self, user_id=None):\n \"\"\"Favorite slices for a user\"\"\"\n if not user_id:\n user_id = g.user.id\n qry = (\n db.session.query(Slice, models.FavStar.dttm)\n .join(\n models.FavStar,\n and_(\n models.FavStar.user_id == int(user_id),\n models.FavStar.class_name == \"slice\",\n Slice.id == models.FavStar.obj_id,\n ),\n )\n .order_by(models.FavStar.dttm.desc())\n )\n payload = []\n for o in qry.all():\n d = {\n \"id\": o.Slice.id,\n \"title\": o.Slice.slice_name,\n \"url\": o.Slice.slice_url,\n \"dttm\": o.dttm,\n \"viz_type\": o.Slice.viz_type,\n }\n if o.Slice.created_by:\n user = o.Slice.created_by\n d[\"creator\"] = str(user)\n d[\"creator_url\"] = \"/superset/profile/{}/\".format(user.username)\n payload.append(d)\n return json_success(json.dumps(payload, default=utils.json_int_dttm_ser))\n\n @api\n @has_access_api\n @expose(\"/warm_up_cache/\", methods=[\"GET\"])\n def warm_up_cache(self):\n \"\"\"Warms up the cache for the slice or table.\n\n Note for slices a force refresh occurs.\n \"\"\"\n slices = None\n session = db.session()\n slice_id = request.args.get(\"slice_id\")\n table_name = request.args.get(\"table_name\")\n db_name = request.args.get(\"db_name\")\n\n if not slice_id and not (table_name and db_name):\n return json_error_response(\n __(\n \"Malformed request. slice_id or table_name and db_name \"\n \"arguments are expected\"\n ),\n status=400,\n )\n if slice_id:\n slices = session.query(Slice).filter_by(id=slice_id).all()\n if not slices:\n return json_error_response(\n __(\"Chart %(id)s not found\", id=slice_id), status=404\n )\n elif table_name and db_name:\n SqlaTable = ConnectorRegistry.sources[\"table\"]\n table = (\n session.query(SqlaTable)\n .join(models.Database)\n .filter(\n models.Database.database_name == db_name\n or SqlaTable.table_name == table_name\n )\n ).one_or_none()\n if not table:\n return json_error_response(\n __(\n \"Table %(t)s wasn't found in the database %(d)s\",\n t=table_name,\n s=db_name,\n ),\n status=404,\n )\n slices = (\n session.query(Slice)\n .filter_by(datasource_id=table.id, datasource_type=table.type)\n .all()\n )\n\n for slc in slices:\n try:\n form_data = get_form_data(slc.id, use_slice_data=True)[0]\n obj = get_viz(\n datasource_type=slc.datasource.type,\n datasource_id=slc.datasource.id,\n form_data=form_data,\n force=True,\n )\n obj.get_json()\n except Exception as e:\n logger.exception(\"Failed to warm up cache\")\n return json_error_response(utils.error_msg_from_exception(e))\n return json_success(\n json.dumps(\n [{\"slice_id\": slc.id, \"slice_name\": slc.slice_name} for slc in slices]\n )\n )\n\n @has_access_api\n @expose(\"/favstar/<class_name>/<obj_id>/<action>/\")\n def favstar(self, class_name, obj_id, action):\n \"\"\"Toggle favorite stars on Slices and Dashboard\"\"\"\n session = db.session()\n FavStar = models.FavStar\n count = 0\n favs = (\n session.query(FavStar)\n .filter_by(class_name=class_name, obj_id=obj_id, user_id=g.user.get_id())\n .all()\n )\n if action == \"select\":\n if not favs:\n session.add(\n FavStar(\n class_name=class_name,\n obj_id=obj_id,\n user_id=g.user.get_id(),\n dttm=datetime.now(),\n )\n )\n count = 1\n elif action == \"unselect\":\n for fav in favs:\n session.delete(fav)\n else:\n count = len(favs)\n session.commit()\n return json_success(json.dumps({\"count\": count}))\n\n @api\n @has_access_api\n @expose(\"/dashboard/<dashboard_id>/published/\", methods=(\"GET\", \"POST\"))\n def publish(self, dashboard_id):\n \"\"\"Gets and toggles published status on dashboards\"\"\"\n logger.warning(\n \"This API endpoint is deprecated and will be removed in version 1.0.0\"\n )\n session = db.session()\n Role = ab_models.Role\n dash = (\n session.query(Dashboard).filter(Dashboard.id == dashboard_id).one_or_none()\n )\n admin_role = session.query(Role).filter(Role.name == \"Admin\").one_or_none()\n\n if request.method == \"GET\":\n if dash:\n return json_success(json.dumps({\"published\": dash.published}))\n else:\n return json_error_response(\n f\"ERROR: cannot find dashboard {dashboard_id}\", status=404\n )\n\n else:\n edit_perm = is_owner(dash, g.user) or admin_role in get_user_roles()\n if not edit_perm:\n return json_error_response(\n f'ERROR: \"{g.user.username}\" cannot alter dashboard \"{dash.dashboard_title}\"',\n status=403,\n )\n\n dash.published = str(request.form[\"published\"]).lower() == \"true\"\n session.commit()\n return json_success(json.dumps({\"published\": dash.published}))\n\n @has_access\n @expose(\"/dashboard/<dashboard_id>/\")\n def dashboard(self, dashboard_id):\n \"\"\"Server side rendering for a dashboard\"\"\"\n session = db.session()\n qry = session.query(Dashboard)\n if dashboard_id.isdigit():\n qry = qry.filter_by(id=int(dashboard_id))\n else:\n qry = qry.filter_by(slug=dashboard_id)\n\n dash = qry.one_or_none()\n if not dash:\n abort(404)\n datasources = set()\n for slc in dash.slices:\n datasource = slc.datasource\n if datasource:\n datasources.add(datasource)\n\n if config[\"ENABLE_ACCESS_REQUEST\"]:\n for datasource in datasources:\n if datasource and not security_manager.datasource_access(datasource):\n flash(\n __(\n security_manager.get_datasource_access_error_msg(datasource)\n ),\n \"danger\",\n )\n return redirect(\n \"superset/request_access/?\" f\"dashboard_id={dash.id}&\"\n )\n\n dash_edit_perm = check_ownership(\n dash, raise_if_false=False\n ) and security_manager.can_access(\"can_save_dash\", \"Superset\")\n dash_save_perm = security_manager.can_access(\"can_save_dash\", \"Superset\")\n superset_can_explore = security_manager.can_access(\"can_explore\", \"Superset\")\n superset_can_csv = security_manager.can_access(\"can_csv\", \"Superset\")\n slice_can_edit = security_manager.can_access(\"can_edit\", \"SliceModelView\")\n\n standalone_mode = (\n request.args.get(utils.ReservedUrlParameters.STANDALONE.value) == \"true\"\n )\n edit_mode = (\n request.args.get(utils.ReservedUrlParameters.EDIT_MODE.value) == \"true\"\n )\n\n # Hack to log the dashboard_id properly, even when getting a slug\n @event_logger.log_this\n def dashboard(**kwargs):\n pass\n\n dashboard(\n dashboard_id=dash.id,\n dashboard_version=\"v2\",\n dash_edit_perm=dash_edit_perm,\n edit_mode=edit_mode,\n )\n\n dashboard_data = dash.data\n dashboard_data.update(\n {\n \"standalone_mode\": standalone_mode,\n \"dash_save_perm\": dash_save_perm,\n \"dash_edit_perm\": dash_edit_perm,\n \"superset_can_explore\": superset_can_explore,\n \"superset_can_csv\": superset_can_csv,\n \"slice_can_edit\": slice_can_edit,\n }\n )\n url_params = {\n key: value\n for key, value in request.args.items()\n if key not in [param.value for param in utils.ReservedUrlParameters]\n }\n\n bootstrap_data = {\n \"user_id\": g.user.get_id(),\n \"dashboard_data\": dashboard_data,\n \"datasources\": {ds.uid: ds.data for ds in datasources},\n \"common\": common_bootstrap_payload(),\n \"editMode\": edit_mode,\n \"urlParams\": url_params,\n }\n\n if request.args.get(\"json\") == \"true\":\n return json_success(\n json.dumps(bootstrap_data, default=utils.pessimistic_json_iso_dttm_ser)\n )\n\n return self.render_template(\n \"superset/dashboard.html\",\n entry=\"dashboard\",\n standalone_mode=standalone_mode,\n title=dash.dashboard_title,\n bootstrap_data=json.dumps(\n bootstrap_data, default=utils.pessimistic_json_iso_dttm_ser\n ),\n )\n\n @api\n @event_logger.log_this\n @expose(\"/log/\", methods=[\"POST\"])\n def log(self):\n return Response(status=200)\n\n @has_access\n @expose(\"/sync_druid/\", methods=[\"POST\"])\n @event_logger.log_this\n def sync_druid_source(self):\n \"\"\"Syncs the druid datasource in main db with the provided config.\n\n The endpoint takes 3 arguments:\n user - user name to perform the operation as\n cluster - name of the druid cluster\n config - configuration stored in json that contains:\n name: druid datasource name\n dimensions: list of the dimensions, they become druid columns\n with the type STRING\n metrics_spec: list of metrics (dictionary). Metric consists of\n 2 attributes: type and name. Type can be count,\n etc. `count` type is stored internally as longSum\n other fields will be ignored.\n\n Example: {\n 'name': 'test_click',\n 'metrics_spec': [{'type': 'count', 'name': 'count'}],\n 'dimensions': ['affiliate_id', 'campaign', 'first_seen']\n }\n \"\"\"\n payload = request.get_json(force=True)\n druid_config = payload[\"config\"]\n user_name = payload[\"user\"]\n cluster_name = payload[\"cluster\"]\n\n user = security_manager.find_user(username=user_name)\n DruidDatasource = ConnectorRegistry.sources[\"druid\"]\n DruidCluster = DruidDatasource.cluster_class\n if not user:\n err_msg = __(\n \"Can't find User '%(name)s', please ask your admin \" \"to create one.\",\n name=user_name,\n )\n logger.error(err_msg)\n return json_error_response(err_msg)\n cluster = (\n db.session.query(DruidCluster)\n .filter_by(cluster_name=cluster_name)\n .one_or_none()\n )\n if not cluster:\n err_msg = __(\n \"Can't find DruidCluster with cluster_name = \" \"'%(name)s'\",\n name=cluster_name,\n )\n logger.error(err_msg)\n return json_error_response(err_msg)\n try:\n DruidDatasource.sync_to_db_from_config(druid_config, user, cluster)\n except Exception as e:\n logger.exception(utils.error_msg_from_exception(e))\n return json_error_response(utils.error_msg_from_exception(e))\n return Response(status=201)\n\n @has_access\n @expose(\"/sqllab_viz/\", methods=[\"POST\"])\n @event_logger.log_this\n def sqllab_viz(self):\n SqlaTable = ConnectorRegistry.sources[\"table\"]\n data = json.loads(request.form.get(\"data\"))\n table_name = data.get(\"datasourceName\")\n database_id = data.get(\"dbId\")\n table = (\n db.session.query(SqlaTable)\n .filter_by(database_id=database_id, table_name=table_name)\n .one_or_none()\n )\n if not table:\n table = SqlaTable(table_name=table_name, owners=[g.user])\n table.database_id = database_id\n table.schema = data.get(\"schema\")\n table.template_params = data.get(\"templateParams\")\n table.is_sqllab_view = True\n q = ParsedQuery(data.get(\"sql\"))\n table.sql = q.stripped()\n db.session.add(table)\n cols = []\n for config in data.get(\"columns\"):\n column_name = config.get(\"name\")\n SqlaTable = ConnectorRegistry.sources[\"table\"]\n TableColumn = SqlaTable.column_class\n SqlMetric = SqlaTable.metric_class\n col = TableColumn(\n column_name=column_name,\n filterable=True,\n groupby=True,\n is_dttm=config.get(\"is_date\", False),\n type=config.get(\"type\", False),\n )\n cols.append(col)\n\n table.columns = cols\n table.metrics = [SqlMetric(metric_name=\"count\", expression=\"count(*)\")]\n db.session.commit()\n return json_success(json.dumps({\"table_id\": table.id}))\n\n @has_access\n @expose(\"/extra_table_metadata/<database_id>/<table_name>/<schema>/\")\n @event_logger.log_this\n def extra_table_metadata(self, database_id, table_name, schema):\n schema = utils.parse_js_uri_path_item(schema, eval_undefined=True)\n table_name = utils.parse_js_uri_path_item(table_name)\n mydb = db.session.query(models.Database).filter_by(id=database_id).one()\n payload = mydb.db_engine_spec.extra_table_metadata(mydb, table_name, schema)\n return json_success(json.dumps(payload))\n\n @has_access\n @expose(\"/select_star/<database_id>/<table_name>\")\n @expose(\"/select_star/<database_id>/<table_name>/<schema>\")\n @event_logger.log_this\n def select_star(self, database_id, table_name, schema=None):\n logging.warning(\n f\"{self.__class__.__name__}.select_star \"\n \"This API endpoint is deprecated and will be removed in version 1.0.0\"\n )\n stats_logger.incr(f\"{self.__class__.__name__}.select_star.init\")\n database = db.session.query(models.Database).get(database_id)\n if not database:\n stats_logger.incr(\n f\"deprecated.{self.__class__.__name__}.select_star.database_not_found\"\n )\n return json_error_response(\"Not found\", 404)\n schema = utils.parse_js_uri_path_item(schema, eval_undefined=True)\n table_name = utils.parse_js_uri_path_item(table_name)\n # Check that the user can access the datasource\n if not self.appbuilder.sm.can_access_datasource(database, table_name, schema):\n stats_logger.incr(\n f\"deprecated.{self.__class__.__name__}.select_star.permission_denied\"\n )\n logging.warning(\n f\"Permission denied for user {g.user} on table: {table_name} \"\n f\"schema: {schema}\"\n )\n return json_error_response(\"Not found\", 404)\n stats_logger.incr(f\"deprecated.{self.__class__.__name__}.select_star.success\")\n return json_success(\n database.select_star(\n table_name, schema, latest_partition=True, show_cols=True\n )\n )\n\n @has_access_api\n @expose(\"/estimate_query_cost/<database_id>/\", methods=[\"POST\"])\n @expose(\"/estimate_query_cost/<database_id>/<schema>/\", methods=[\"POST\"])\n @event_logger.log_this\n def estimate_query_cost(\n self, database_id: int, schema: Optional[str] = None\n ) -> Response:\n mydb = db.session.query(models.Database).get(database_id)\n\n sql = json.loads(request.form.get(\"sql\", '\"\"'))\n template_params = json.loads(request.form.get(\"templateParams\") or \"{}\")\n if template_params:\n template_processor = get_template_processor(mydb)\n sql = template_processor.process_template(sql, **template_params)\n\n timeout = SQLLAB_QUERY_COST_ESTIMATE_TIMEOUT\n timeout_msg = f\"The estimation exceeded the {timeout} seconds timeout.\"\n try:\n with utils.timeout(seconds=timeout, error_message=timeout_msg):\n cost = mydb.db_engine_spec.estimate_query_cost(\n mydb, schema, sql, utils.sources.get(\"sql_lab\")\n )\n except SupersetTimeoutException as e:\n logger.exception(e)\n return json_error_response(timeout_msg)\n except Exception as e:\n return json_error_response(str(e))\n\n spec = mydb.db_engine_spec\n query_cost_formatters = get_feature_flags().get(\n \"QUERY_COST_FORMATTERS_BY_ENGINE\", {}\n )\n query_cost_formatter = query_cost_formatters.get(\n spec.engine, spec.query_cost_formatter\n )\n cost = query_cost_formatter(cost)\n\n return json_success(json.dumps(cost))\n\n @expose(\"/theme/\")\n def theme(self):\n return self.render_template(\"superset/theme.html\")\n\n @has_access_api\n @expose(\"/results/<key>/\")\n @event_logger.log_this\n def results(self, key):\n return self.results_exec(key)\n\n def results_exec(self, key: str):\n \"\"\"Serves a key off of the results backend\n\n It is possible to pass the `rows` query argument to limit the number\n of rows returned.\n \"\"\"\n if not results_backend:\n return json_error_response(\"Results backend isn't configured\")\n\n read_from_results_backend_start = now_as_float()\n blob = results_backend.get(key)\n stats_logger.timing(\n \"sqllab.query.results_backend_read\",\n now_as_float() - read_from_results_backend_start,\n )\n if not blob:\n return json_error_response(\n \"Data could not be retrieved. \" \"You may want to re-run the query.\",\n status=410,\n )\n\n query = db.session.query(Query).filter_by(results_key=key).one_or_none()\n if query is None:\n return json_error_response(\n \"Data could not be retrieved. You may want to re-run the query.\",\n status=404,\n )\n\n rejected_tables = security_manager.rejected_tables(\n query.sql, query.database, query.schema\n )\n if rejected_tables:\n return json_error_response(\n security_manager.get_table_access_error_msg(rejected_tables), status=403\n )\n\n payload = utils.zlib_decompress(blob, decode=not results_backend_use_msgpack)\n obj: dict = _deserialize_results_payload(\n payload, query, cast(bool, results_backend_use_msgpack)\n )\n\n if \"rows\" in request.args:\n try:\n rows = int(request.args[\"rows\"])\n except ValueError:\n return json_error_response(\"Invalid `rows` argument\", status=400)\n obj = apply_display_max_row_limit(obj, rows)\n\n return json_success(\n json.dumps(obj, default=utils.json_iso_dttm_ser, ignore_nan=True)\n )\n\n @has_access_api\n @expose(\"/stop_query/\", methods=[\"POST\"])\n @event_logger.log_this\n @backoff.on_exception(\n backoff.constant,\n Exception,\n interval=1,\n on_backoff=lambda details: db.session.rollback(),\n on_giveup=lambda details: db.session.rollback(),\n max_tries=5,\n )\n def stop_query(self):\n client_id = request.form.get(\"client_id\")\n\n query = db.session.query(Query).filter_by(client_id=client_id).one()\n if query.status in [\n QueryStatus.FAILED,\n QueryStatus.SUCCESS,\n QueryStatus.TIMED_OUT,\n ]:\n logger.error(\n f\"Query with client_id {client_id} could not be stopped: query already complete\"\n )\n return self.json_response(\"OK\")\n query.status = QueryStatus.STOPPED\n db.session.commit()\n\n return self.json_response(\"OK\")\n\n @has_access_api\n @expose(\"/validate_sql_json/\", methods=[\"POST\", \"GET\"])\n @event_logger.log_this\n def validate_sql_json(self):\n \"\"\"Validates that arbitrary sql is acceptable for the given database.\n Returns a list of error/warning annotations as json.\n \"\"\"\n sql = request.form.get(\"sql\")\n database_id = request.form.get(\"database_id\")\n schema = request.form.get(\"schema\") or None\n template_params = json.loads(request.form.get(\"templateParams\") or \"{}\")\n\n if len(template_params) > 0:\n # TODO: factor the Database object out of template rendering\n # or provide it as mydb so we can render template params\n # without having to also persist a Query ORM object.\n return json_error_response(\n \"SQL validation does not support template parameters\", status=400\n )\n\n session = db.session()\n mydb = session.query(models.Database).filter_by(id=database_id).one_or_none()\n if not mydb:\n return json_error_response(\n \"Database with id {} is missing.\".format(database_id), status=400\n )\n\n spec = mydb.db_engine_spec\n validators_by_engine = get_feature_flags().get(\"SQL_VALIDATORS_BY_ENGINE\")\n if not validators_by_engine or spec.engine not in validators_by_engine:\n return json_error_response(\n \"no SQL validator is configured for {}\".format(spec.engine), status=400\n )\n validator_name = validators_by_engine[spec.engine]\n validator = get_validator_by_name(validator_name)\n if not validator:\n return json_error_response(\n \"No validator named {} found (configured for the {} engine)\".format(\n validator_name, spec.engine\n )\n )\n\n try:\n timeout = config[\"SQLLAB_VALIDATION_TIMEOUT\"]\n timeout_msg = f\"The query exceeded the {timeout} seconds timeout.\"\n with utils.timeout(seconds=timeout, error_message=timeout_msg):\n errors = validator.validate(sql, schema, mydb)\n payload = json.dumps(\n [err.to_dict() for err in errors],\n default=utils.pessimistic_json_iso_dttm_ser,\n ignore_nan=True,\n encoding=None,\n )\n return json_success(payload)\n except Exception as e:\n logger.exception(e)\n msg = _(\n f\"{validator.name} was unable to check your query.\\n\"\n \"Please recheck your query.\\n\"\n f\"Exception: {e}\"\n )\n # Return as a 400 if the database error message says we got a 4xx error\n if re.search(r\"([\\W]|^)4\\d{2}([\\W]|$)\", str(e)):\n return json_error_response(f\"{msg}\", status=400)\n else:\n return json_error_response(f\"{msg}\")\n\n def _sql_json_async(\n self,\n session: Session,\n rendered_query: str,\n query: Query,\n expand_data: bool,\n log_params: Optional[Dict[str, Any]] = None,\n ) -> str:\n \"\"\"\n Send SQL JSON query to celery workers\n\n :param session: SQLAlchemy session object\n :param rendered_query: the rendered query to perform by workers\n :param query: The query (SQLAlchemy) object\n :return: String JSON response\n \"\"\"\n logger.info(f\"Query {query.id}: Running query on a Celery worker\")\n # Ignore the celery future object and the request may time out.\n try:\n sql_lab.get_sql_results.delay(\n query.id,\n rendered_query,\n return_results=False,\n store_results=not query.select_as_cta,\n user_name=g.user.username if g.user else None,\n start_time=now_as_float(),\n expand_data=expand_data,\n log_params=log_params,\n )\n except Exception as e:\n logger.exception(f\"Query {query.id}: {e}\")\n msg = _(\n \"Failed to start remote query on a worker. \"\n \"Tell your administrator to verify the availability of \"\n \"the message queue.\"\n )\n query.status = QueryStatus.FAILED\n query.error_message = msg\n session.commit()\n return json_error_response(\"{}\".format(msg))\n resp = json_success(\n json.dumps(\n {\"query\": query.to_dict()},\n default=utils.json_int_dttm_ser,\n ignore_nan=True,\n ),\n status=202,\n )\n session.commit()\n return resp\n\n def _sql_json_sync(\n self,\n session: Session,\n rendered_query: str,\n query: Query,\n expand_data: bool,\n log_params: Optional[Dict[str, Any]] = None,\n ) -> str:\n \"\"\"\n Execute SQL query (sql json)\n\n :param rendered_query: The rendered query (included templates)\n :param query: The query SQL (SQLAlchemy) object\n :return: String JSON response\n \"\"\"\n try:\n timeout = config[\"SQLLAB_TIMEOUT\"]\n timeout_msg = f\"The query exceeded the {timeout} seconds timeout.\"\n store_results = (\n is_feature_enabled(\"SQLLAB_BACKEND_PERSISTENCE\")\n and not query.select_as_cta\n )\n with utils.timeout(seconds=timeout, error_message=timeout_msg):\n # pylint: disable=no-value-for-parameter\n data = sql_lab.get_sql_results(\n query.id,\n rendered_query,\n return_results=True,\n store_results=store_results,\n user_name=g.user.username if g.user else None,\n expand_data=expand_data,\n log_params=log_params,\n )\n\n payload = json.dumps(\n apply_display_max_row_limit(data),\n default=utils.pessimistic_json_iso_dttm_ser,\n ignore_nan=True,\n encoding=None,\n )\n except Exception as e:\n logger.exception(f\"Query {query.id}: {e}\")\n return json_error_response(f\"{{e}}\")\n if data.get(\"status\") == QueryStatus.FAILED:\n return json_error_response(payload=data)\n return json_success(payload)\n\n @has_access_api\n @expose(\"/sql_json/\", methods=[\"POST\"])\n @event_logger.log_this\n def sql_json(self):\n log_params = {\n \"user_agent\": cast(Optional[str], request.headers.get(\"USER_AGENT\"))\n }\n return self.sql_json_exec(request.json, log_params)\n\n def sql_json_exec(\n self, query_params: dict, log_params: Optional[Dict[str, Any]] = None\n ):\n \"\"\"Runs arbitrary sql and returns data as json\"\"\"\n # Collect Values\n database_id: int = cast(int, query_params.get(\"database_id\"))\n schema: str = cast(str, query_params.get(\"schema\"))\n sql: str = cast(str, query_params.get(\"sql\"))\n try:\n template_params: dict = json.loads(\n query_params.get(\"templateParams\") or \"{}\"\n )\n except json.JSONDecodeError:\n logger.warning(\n f\"Invalid template parameter {query_params.get('templateParams')}\"\n \" specified. Defaulting to empty dict\"\n )\n template_params = {}\n limit: int = query_params.get(\"queryLimit\") or app.config[\"SQL_MAX_ROW\"]\n async_flag: bool = cast(bool, query_params.get(\"runAsync\"))\n if limit < 0:\n logger.warning(\n f\"Invalid limit of {limit} specified. Defaulting to max limit.\"\n )\n limit = 0\n select_as_cta: bool = cast(bool, query_params.get(\"select_as_cta\"))\n tmp_table_name: str = cast(str, query_params.get(\"tmp_table_name\"))\n client_id: str = cast(\n str, query_params.get(\"client_id\") or utils.shortid()[:10]\n )\n sql_editor_id: str = cast(str, query_params.get(\"sql_editor_id\"))\n tab_name: str = cast(str, query_params.get(\"tab\"))\n status: str = QueryStatus.PENDING if async_flag else QueryStatus.RUNNING\n\n session = db.session()\n mydb = session.query(models.Database).get(database_id)\n if not mydb:\n return json_error_response(f\"Database with id {database_id} is missing.\")\n\n # Set tmp_table_name for CTA\n if select_as_cta and mydb.force_ctas_schema:\n tmp_table_name = f\"{mydb.force_ctas_schema}.{tmp_table_name}\"\n\n # Save current query\n query = Query(\n database_id=database_id,\n sql=sql,\n schema=schema,\n select_as_cta=select_as_cta,\n start_time=now_as_float(),\n tab_name=tab_name,\n status=status,\n sql_editor_id=sql_editor_id,\n tmp_table_name=tmp_table_name,\n user_id=g.user.get_id() if g.user else None,\n client_id=client_id,\n )\n try:\n session.add(query)\n session.flush()\n query_id = query.id\n session.commit() # shouldn't be necessary\n except SQLAlchemyError as e:\n logger.error(f\"Errors saving query details {e}\")\n session.rollback()\n raise Exception(_(\"Query record was not created as expected.\"))\n if not query_id:\n raise Exception(_(\"Query record was not created as expected.\"))\n\n logger.info(f\"Triggering query_id: {query_id}\")\n\n rejected_tables = security_manager.rejected_tables(sql, mydb, schema)\n if rejected_tables:\n query.status = QueryStatus.FAILED\n session.commit()\n return json_error_response(\n security_manager.get_table_access_error_msg(rejected_tables),\n link=security_manager.get_table_access_link(rejected_tables),\n status=403,\n )\n\n try:\n template_processor = get_template_processor(\n database=query.database, query=query\n )\n rendered_query = template_processor.process_template(\n query.sql, **template_params\n )\n except Exception as e:\n error_msg = utils.error_msg_from_exception(e)\n return json_error_response(\n f\"Query {query_id}: Template rendering failed: {error_msg}\"\n )\n\n # set LIMIT after template processing\n limits = [mydb.db_engine_spec.get_limit_from_sql(rendered_query), limit]\n query.limit = min(lim for lim in limits if lim is not None)\n\n # Flag for whether or not to expand data\n # (feature that will expand Presto row objects and arrays)\n expand_data: bool = cast(\n bool,\n is_feature_enabled(\"PRESTO_EXPAND_DATA\")\n and query_params.get(\"expand_data\"),\n )\n\n # Async request.\n if async_flag:\n return self._sql_json_async(\n session, rendered_query, query, expand_data, log_params\n )\n # Sync request.\n return self._sql_json_sync(\n session, rendered_query, query, expand_data, log_params\n )\n\n @has_access\n @expose(\"/csv/<client_id>\")\n @event_logger.log_this\n def csv(self, client_id):\n \"\"\"Download the query results as csv.\"\"\"\n logger.info(\"Exporting CSV file [{}]\".format(client_id))\n query = db.session.query(Query).filter_by(client_id=client_id).one()\n\n rejected_tables = security_manager.rejected_tables(\n query.sql, query.database, query.schema\n )\n if rejected_tables:\n flash(security_manager.get_table_access_error_msg(rejected_tables))\n return redirect(\"/\")\n blob = None\n if results_backend and query.results_key:\n logger.info(\n \"Fetching CSV from results backend \" \"[{}]\".format(query.results_key)\n )\n blob = results_backend.get(query.results_key)\n if blob:\n logger.info(\"Decompressing\")\n payload = utils.zlib_decompress(\n blob, decode=not results_backend_use_msgpack\n )\n obj = _deserialize_results_payload(\n payload, query, results_backend_use_msgpack\n )\n columns = [c[\"name\"] for c in obj[\"columns\"]]\n df = pd.DataFrame.from_records(obj[\"data\"], columns=columns)\n logger.info(\"Using pandas to convert to CSV\")\n csv = df.to_csv(index=False, **config[\"CSV_EXPORT\"])\n else:\n logger.info(\"Running a query to turn into CSV\")\n sql = query.select_sql or query.executed_sql\n df = query.database.get_df(sql, query.schema)\n # TODO(bkyryliuk): add compression=gzip for big files.\n csv = df.to_csv(index=False, **config[\"CSV_EXPORT\"])\n response = Response(csv, mimetype=\"text/csv\")\n response.headers[\n \"Content-Disposition\"\n ] = f\"attachment; filename={query.name}.csv\"\n event_info = {\n \"event_type\": \"data_export\",\n \"client_id\": client_id,\n \"row_count\": len(df.index),\n \"database\": query.database.name,\n \"schema\": query.schema,\n \"sql\": query.sql,\n \"exported_format\": \"csv\",\n }\n logger.info(\n f\"CSV exported: {repr(event_info)}\", extra={\"superset_event\": event_info}\n )\n return response\n\n @api\n @handle_api_exception\n @has_access\n @expose(\"/fetch_datasource_metadata\")\n @event_logger.log_this\n def fetch_datasource_metadata(self):\n datasource_id, datasource_type = request.args.get(\"datasourceKey\").split(\"__\")\n datasource = ConnectorRegistry.get_datasource(\n datasource_type, datasource_id, db.session\n )\n # Check if datasource exists\n if not datasource:\n return json_error_response(DATASOURCE_MISSING_ERR)\n\n # Check permission for datasource\n security_manager.assert_datasource_permission(datasource)\n return json_success(json.dumps(datasource.data))\n\n @has_access_api\n @expose(\"/queries/<last_updated_ms>\")\n def queries(self, last_updated_ms):\n \"\"\"\n Get the updated queries.\n\n :param last_updated_ms: unix time, milliseconds\n \"\"\"\n last_updated_ms_int = int(float(last_updated_ms)) if last_updated_ms else 0\n return self.queries_exec(last_updated_ms_int)\n\n def queries_exec(self, last_updated_ms_int: int):\n stats_logger.incr(\"queries\")\n if not g.user.get_id():\n return json_error_response(\n \"Please login to access the queries.\", status=403\n )\n\n # UTC date time, same that is stored in the DB.\n last_updated_dt = utils.EPOCH + timedelta(seconds=last_updated_ms_int / 1000)\n\n sql_queries = (\n db.session.query(Query)\n .filter(\n Query.user_id == g.user.get_id(), Query.changed_on >= last_updated_dt\n )\n .all()\n )\n dict_queries = {q.client_id: q.to_dict() for q in sql_queries}\n return json_success(json.dumps(dict_queries, default=utils.json_int_dttm_ser))\n\n @has_access\n @expose(\"/search_queries\")\n @event_logger.log_this\n def search_queries(self) -> Response:\n \"\"\"\n Search for previously run sqllab queries. Used for Sqllab Query Search\n page /superset/sqllab#search.\n\n Custom permission can_only_search_queries_owned restricts queries\n to only queries run by current user.\n\n :returns: Response with list of sql query dicts\n \"\"\"\n query = db.session.query(Query)\n if security_manager.can_access_all_queries():\n search_user_id = request.args.get(\"user_id\")\n elif (\n request.args.get(\"user_id\") is not None\n and request.args.get(\"user_id\") != g.user.get_user_id()\n ):\n return Response(status=403, mimetype=\"application/json\")\n else:\n search_user_id = g.user.get_user_id()\n database_id = request.args.get(\"database_id\")\n search_text = request.args.get(\"search_text\")\n status = request.args.get(\"status\")\n # From and To time stamp should be Epoch timestamp in seconds\n from_time = request.args.get(\"from\")\n to_time = request.args.get(\"to\")\n\n if search_user_id:\n # Filter on user_id\n query = query.filter(Query.user_id == search_user_id)\n\n if database_id:\n # Filter on db Id\n query = query.filter(Query.database_id == database_id)\n\n if status:\n # Filter on status\n query = query.filter(Query.status == status)\n\n if search_text:\n # Filter on search text\n query = query.filter(Query.sql.like(\"%{}%\".format(search_text)))\n\n if from_time:\n query = query.filter(Query.start_time > int(from_time))\n\n if to_time:\n query = query.filter(Query.start_time < int(to_time))\n\n query_limit = config[\"QUERY_SEARCH_LIMIT\"]\n sql_queries = query.order_by(Query.start_time.asc()).limit(query_limit).all()\n\n dict_queries = [q.to_dict() for q in sql_queries]\n\n return Response(\n json.dumps(dict_queries, default=utils.json_int_dttm_ser),\n status=200,\n mimetype=\"application/json\",\n )\n\n @app.errorhandler(500)\n def show_traceback(self):\n return (\n render_template(\"superset/traceback.html\", error_msg=get_error_msg()),\n 500,\n )\n\n @expose(\"/welcome\")\n def welcome(self):\n \"\"\"Personalized welcome page\"\"\"\n if not g.user or not g.user.get_id():\n return redirect(appbuilder.get_url_for_login)\n\n welcome_dashboard_id = (\n db.session.query(UserAttribute.welcome_dashboard_id)\n .filter_by(user_id=g.user.get_id())\n .scalar()\n )\n if welcome_dashboard_id:\n return self.dashboard(str(welcome_dashboard_id))\n\n payload = {\n \"user\": bootstrap_user_data(g.user),\n \"common\": common_bootstrap_payload(),\n }\n\n return self.render_template(\n \"superset/welcome.html\",\n entry=\"welcome\",\n bootstrap_data=json.dumps(\n payload, default=utils.pessimistic_json_iso_dttm_ser\n ),\n )\n\n @has_access\n @expose(\"/profile/<username>/\")\n def profile(self, username):\n \"\"\"User profile page\"\"\"\n if not username and g.user:\n username = g.user.username\n\n user = (\n db.session.query(ab_models.User).filter_by(username=username).one_or_none()\n )\n if not user:\n abort(404, description=f\"User: {username} does not exist.\")\n\n payload = {\n \"user\": bootstrap_user_data(user, include_perms=True),\n \"common\": common_bootstrap_payload(),\n }\n\n return self.render_template(\n \"superset/basic.html\",\n title=_(\"%(user)s's profile\", user=username),\n entry=\"profile\",\n bootstrap_data=json.dumps(\n payload, default=utils.pessimistic_json_iso_dttm_ser\n ),\n )\n\n @staticmethod\n def _get_sqllab_payload(user_id: int) -> Dict[str, Any]:\n # send list of tab state ids\n tabs_state = (\n db.session.query(TabState.id, TabState.label)\n .filter_by(user_id=user_id)\n .all()\n )\n tab_state_ids = [tab_state[0] for tab_state in tabs_state]\n # return first active tab, or fallback to another one if no tab is active\n active_tab = (\n db.session.query(TabState)\n .filter_by(user_id=user_id)\n .order_by(TabState.active.desc())\n .first()\n )\n\n databases: Dict[int, Any] = {}\n queries: Dict[str, Any] = {}\n\n # These are unnecessary if sqllab backend persistence is disabled\n if is_feature_enabled(\"SQLLAB_BACKEND_PERSISTENCE\"):\n databases = {\n database.id: {\n k: v for k, v in database.to_json().items() if k in DATABASE_KEYS\n }\n for database in db.session.query(models.Database).all()\n }\n # return all user queries associated with existing SQL editors\n user_queries = (\n db.session.query(Query)\n .filter_by(user_id=user_id)\n .filter(Query.sql_editor_id.cast(Integer).in_(tab_state_ids))\n .all()\n )\n queries = {\n query.client_id: {k: v for k, v in query.to_dict().items()}\n for query in user_queries\n }\n\n return {\n \"defaultDbId\": config[\"SQLLAB_DEFAULT_DBID\"],\n \"common\": common_bootstrap_payload(),\n \"tab_state_ids\": tabs_state,\n \"active_tab\": active_tab.to_dict() if active_tab else None,\n \"databases\": databases,\n \"queries\": queries,\n }\n\n @has_access\n @expose(\"/sqllab\")\n def sqllab(self):\n \"\"\"SQL Editor\"\"\"\n payload = self._get_sqllab_payload(g.user.get_id())\n bootstrap_data = json.dumps(\n payload, default=utils.pessimistic_json_iso_dttm_ser\n )\n\n return self.render_template(\n \"superset/basic.html\", entry=\"sqllab\", bootstrap_data=bootstrap_data\n )\n\n @api\n @handle_api_exception\n @has_access_api\n @expose(\"/slice_query/<slice_id>/\")\n def slice_query(self, slice_id):\n \"\"\"\n This method exposes an API endpoint to\n get the database query string for this slice\n \"\"\"\n viz_obj = get_viz(slice_id)\n security_manager.assert_viz_permission(viz_obj)\n return self.get_query_string_response(viz_obj)\n\n @api\n @has_access_api\n @expose(\"/schemas_access_for_csv_upload\")\n def schemas_access_for_csv_upload(self):\n \"\"\"\n This method exposes an API endpoint to\n get the schema access control settings for csv upload in this database\n \"\"\"\n if not request.args.get(\"db_id\"):\n return json_error_response(\"No database is allowed for your csv upload\")\n\n db_id = int(request.args.get(\"db_id\"))\n database = db.session.query(models.Database).filter_by(id=db_id).one()\n try:\n schemas_allowed = database.get_schema_access_for_csv_upload()\n if (\n security_manager.database_access(database)\n or security_manager.all_datasource_access()\n ):\n return self.json_response(schemas_allowed)\n # the list schemas_allowed should not be empty here\n # and the list schemas_allowed_processed returned from security_manager\n # should not be empty either,\n # otherwise the database should have been filtered out\n # in CsvToDatabaseForm\n schemas_allowed_processed = security_manager.schemas_accessible_by_user(\n database, schemas_allowed, False\n )\n return self.json_response(schemas_allowed_processed)\n except Exception as e:\n logger.exception(e)\n return json_error_response(\n \"Failed to fetch schemas allowed for csv upload in this database! \"\n \"Please contact your Superset Admin!\"\n )\n\n\nclass CssTemplateModelView(SupersetModelView, DeleteMixin):\n datamodel = SQLAInterface(models.CssTemplate)\n include_route_methods = RouteMethod.CRUD_SET\n\n list_title = _(\"CSS Templates\")\n show_title = _(\"Show CSS Template\")\n add_title = _(\"Add CSS Template\")\n edit_title = _(\"Edit CSS Template\")\n\n list_columns = [\"template_name\"]\n edit_columns = [\"template_name\", \"css\"]\n add_columns = edit_columns\n label_columns = {\"template_name\": _(\"Template Name\")}\n\n\nclass CssTemplateAsyncModelView(CssTemplateModelView):\n include_route_methods = {RouteMethod.API_READ}\n list_columns = [\"template_name\", \"css\"]\n\n\[email protected]_request\ndef apply_http_headers(response: Response):\n \"\"\"Applies the configuration's http headers to all responses\"\"\"\n\n # HTTP_HEADERS is deprecated, this provides backwards compatibility\n response.headers.extend(\n {**config[\"OVERRIDE_HTTP_HEADERS\"], **config[\"HTTP_HEADERS\"]}\n )\n\n for k, v in config[\"DEFAULT_HTTP_HEADERS\"].items():\n if k not in response.headers:\n response.headers[k] = v\n return response\n" ]
[ [ "pandas.DataFrame.from_records" ] ]
amarildolikmeta/oac-explore
[ "e3d63992a4ff33c8df593941f498457e94f81eb8" ]
[ "scripts/launch_test.py" ]
[ "import json\nimport sys\nsys.path.append(\"../\")\nfrom trainer.particle_trainer import ParticleTrainer\nfrom trainer.gaussian_trainer import GaussianTrainer\nfrom trainer.trainer import SACTrainer\nimport numpy as np\nimport torch\nfrom main import env_producer, get_policy_producer, get_q_producer\nfrom utils.pythonplusplus import load_gzip_pickle\n\n\nts = '1584884279.5007188'\nts = '1589352957.4422379'\niter = 190\npath = '../data/point/sac_/' + ts\nts = '1590677750.0582957'\npath = '../data/point/mean_update_counts/p-oac_/' + ts\nts = '1595343877.9346888'\npath = '../data/point/hard/terminal/ddpgcounts/p-oac_/no_bias/' + ts\n\nrestore = True\n\nvariant = json.load(open(path + '/variant.json', 'r'))\ndomain = variant['domain']\nseed = variant['seed']\nr_max = variant['r_max']\nensemble = variant['ensemble']\ndelta = variant['delta']\nn_estimators = variant['n_estimators']\nif seed == 0:\n np.random.seed()\n seed = np.random.randint(0, 1000000)\ntorch.manual_seed(seed)\nnp.random.seed(seed)\nenv_args = {}\nif domain in ['riverswim']:\n env_args['dim'] = variant['dim']\nif domain in ['point']:\n env_args['difficulty'] = variant['difficulty']\n env_args['max_state'] = variant['max_state']\n env_args['clip_state'] = variant['clip_state']\n env_args['terminal'] = variant['terminal']\n\nexpl_env = env_producer(domain, seed, **env_args)\neval_env = env_producer(domain, seed * 10 + 1, **env_args)\nobs_dim = expl_env.observation_space.low.size\naction_dim = expl_env.action_space.low.size\n\n# Get producer function for policy and value functions\nM = variant['layer_size']\nN = variant['num_layers']\n\nalg = variant['alg']\n\nif alg in ['p-oac', 'g-oac', 'g-tsac', 'p-tsac'] and variant['share_layers']:\n output_size = n_estimators\n n_estimators = 1\nelse:\n output_size = 1\nob = expl_env.reset()\nprint(ob)\nq_producer = get_q_producer(obs_dim, action_dim, hidden_sizes=[M] * N, output_size=output_size)\npolicy_producer = get_policy_producer(\n obs_dim, action_dim, hidden_sizes=[M] * N)\nq_min = variant['r_min'] / (1 - variant['trainer_kwargs']['discount'])\nq_max = variant['r_max'] / (1 - variant['trainer_kwargs']['discount'])\nalg_to_trainer = {\n 'sac': SACTrainer,\n 'oac': SACTrainer,\n 'p-oac': ParticleTrainer,\n 'g-oac': GaussianTrainer\n}\ntrainer = alg_to_trainer[variant['alg']]\n\nkwargs ={ }\nif alg in ['p-oac', 'g-oac', 'g-tsac', 'p-tsac']:\n n_estimators = variant['n_estimators']\n kwargs = dict(\n n_estimators=n_estimators,\n delta=variant['delta'],\n q_min=q_min,\n q_max=q_max,\n ensemble=variant['ensemble'],\n n_policies=variant['n_policies'],\n )\nkwargs.update(dict(\n policy_producer=policy_producer,\n q_producer=q_producer,\n action_space=expl_env.action_space,\n ))\nprint(kwargs)\nkwargs.update(variant['trainer_kwargs'])\ntrainer = trainer(**kwargs)\n# try:\n# experiment = path + '/best.zip_pkl'\n# exp = load_gzip_pickle(experiment)\n# print(exp['epoch'])\n# trainer.restore_from_snapshot(exp['trainer'])\n# except:\nexperiment = path + '/params.zip_pkl'\nexp = load_gzip_pickle(experiment)\nprint(exp['epoch'])\ntrainer.restore_from_snapshot(exp['trainer'])\n\nfor i in range(10):\n s = expl_env.reset()\n done = False\n ret = 0\n t = 0\n while not done and t < 400:\n expl_env.render()\n if hasattr(trainer, 'target_policy'):\n a, agent_info = trainer.target_policy.get_action(s, deterministic=True)\n else:\n a, agent_info = trainer.policy.get_action(s, deterministic=True)\n s, r, done, _ = expl_env.step(a)\n t += 1\n ret += r\n expl_env.render()\n print(\"Return: \", ret)\n input()" ]
[ [ "torch.manual_seed", "numpy.random.seed", "numpy.random.randint" ] ]
JustinWingChungHui/okkindred_facial_recognition
[ "e6744e604d0bf25f9024a2ef2ba7ca9d0760c8b1" ]
[ "train_face_recognition.py" ]
[ "# https://github.com/ageitgey/face_recognition/blob/master/examples/face_recognition_knn.py\n\nimport math\nimport os\nimport pickle\nfrom PIL import Image as PilImage\nfrom sklearn import neighbors\nfrom models import Person, Image, Tag, FaceModel\nfrom secrets import TRAIN_FACE_RECOGNITION_TEMP_DIR\nfrom file_downloader import download_file, clear_directory\nimport face_recognition\n\n\ndef get_file_for_tag(tag, image, session, dir_name):\n '''\n Gets file for tag and image\n '''\n print(' = Processing Tag and Image =')\n print(' tag.id: {}'.format(tag.id))\n print(' image.id: {}'.format(image.id))\n\n file = download_file(dir_name, image.large_thumbnail)\n\n print(' Opening Image')\n original = PilImage.open(file)\n\n print(' Cropping image')\n left = tag.x1 * image.large_thumbnail_width\n right = tag.x2 * image.large_thumbnail_width\n top = tag.y1 * image.large_thumbnail_height\n bottom = tag.y2 * image.large_thumbnail_height\n\n cropped = original.crop((left, top, right, bottom))\n cropped.save(file)\n\n return file\n\n\n\ndef process_person(person, session, X, y):\n '''\n Processes images for one person\n '''\n print(' == Processing person name: {0} id: {1} =='.format(person.name, person.id))\n dir_name = os.path.join(TRAIN_FACE_RECOGNITION_TEMP_DIR, str(person.id))\n\n print(' Creating directory {}'.format(dir_name))\n os.mkdir(dir_name)\n\n files = []\n\n if person.large_thumbnail:\n print(' Getting profile photo'.format(dir_name))\n files.append(download_file(dir_name, person.large_thumbnail))\n\n print(' Get all face detected tags for person')\n tags_and_images = session.query(Tag, Image). \\\n filter(Tag.person_id == person.id). \\\n filter(Tag.face_detected == True). \\\n filter(Tag.image_id == Image.id).all()\n\n print(' Total number of tags: {}'.format(len(tags_and_images)))\n\n for tag, image in tags_and_images:\n files.append(get_file_for_tag(tag, image, session, dir_name))\n\n print(' Process Images')\n for file in files:\n process_file(file, X, y, person.id)\n\n\ndef process_file(file, X, y, person_id):\n print(' Creating face encoding for {}'.format(file))\n im = face_recognition.load_image_file(file)\n face_bounding_boxes = face_recognition.face_locations(im)\n\n # Add face encoding for current image to the training set\n if len(face_bounding_boxes) == 1:\n print(' Adding face to model')\n X.append(face_recognition.face_encodings(im, known_face_locations=face_bounding_boxes)[0])\n y.append(person_id)\n else:\n print(' XXX No Face Found!!! XXX')\n\n\ndef process_family(family_id, session):\n '''\n Creates a K Nearest neighbour model for a family\n '''\n print('')\n print('===== Processing Family_id: {} ====='.format(family_id))\n print('Clearing working directory')\n clear_directory(TRAIN_FACE_RECOGNITION_TEMP_DIR)\n\n face_model = FaceModel(family_id = family_id)\n\n print('Get all people for family')\n people = session.query(Person).filter(Person.family_id == family_id).all()\n print('Total number of people: {}'.format(len(people)))\n\n\n X = []\n y = []\n\n\n for person in people:\n process_person(person, session, X, y)\n\n if (len(X) > 0):\n n_neighbors = int(round(math.sqrt(len(X))))\n print('Setting n_neighbors to {}'.format(n_neighbors))\n\n print('Creating and training the KNN classifier')\n knn_clf = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors, algorithm='ball_tree', weights='distance')\n knn_clf.fit(X, y)\n\n print('y:')\n print(y)\n\n print('Pickling and saving to db')\n face_model.fit_data_faces = pickle.dumps(X)\n face_model.fit_data_person_ids = pickle.dumps(y)\n face_model.n_neighbors = n_neighbors\n face_model.trained_knn_model = pickle.dumps(knn_clf)\n\n session.add(face_model)\n session.commit()\n\n else:\n print('Not enough data to create model')\n\n\n\n#print('#############################################')\n#print('')\n\n#print('Connecting to db')\n# mysql+mysqldb://<user>:<password>@<host>/<dbname>\n#connection_string = 'mysql+mysqldb://{0}:{1}@{2}/{3}'.format(DATABASE['USER'],\n# DATABASE['PASSWORD'],\n# DATABASE['HOST'],\n# DATABASE['NAME'])\n#engine = create_engine(connection_string)\n#Base.metadata.bind = engine\n#DBSession = sessionmaker()\n#DBSession.bind = engine\n#session = DBSession()\n\n#print('Get all families')\n#families = session.query(Family).all()\n#print('Total number of families: {}'.format(len(families)))\n\n#for family in families:\n# process_family(family.id, session)\n\n\n" ]
[ [ "sklearn.neighbors.KNeighborsClassifier" ] ]
ruppinlab/tcga-microbiome-prediction
[ "e7923b94738f9bd1b7862bb109002554430d9ace" ]
[ "sklearn_extensions/model_selection/_search.py" ]
[ "\"\"\"\nThe :mod:`sklearn_extesions.model_selection._search` includes utilities to\nfine-tune the parameters of an estimator.\n\"\"\"\n\n# Author: Alexandre Gramfort <[email protected]>,\n# Gael Varoquaux <[email protected]>\n# Andreas Mueller <[email protected]>\n# Olivier Grisel <[email protected]>\n# Raghav RV <[email protected]>\n# Leandro Cruz Hermida <[email protected]>\n# License: BSD 3 clause\n\nfrom abc import abstractmethod\nfrom collections import defaultdict\nfrom collections.abc import Sequence\nfrom functools import partial\nfrom itertools import product\nimport numbers\nimport time\nimport warnings\n\nimport numpy as np\nfrom joblib import Parallel, delayed\nfrom scipy.stats import rankdata\n\nfrom sklearn.base import is_classifier, clone\nfrom sklearn.exceptions import NotFittedError\nfrom sklearn.model_selection import GridSearchCV, ParameterGrid\nfrom sklearn.model_selection._search import BaseSearchCV\nfrom sklearn.model_selection._split import check_cv\nfrom sklearn.model_selection._validation import _aggregate_score_dicts\nfrom sklearn.utils.fixes import MaskedArray\nfrom sklearn.utils.metaestimators import if_delegate_has_method\nfrom sklearn.utils.validation import (indexable, check_is_fitted,\n _check_fit_params)\n\nfrom ..metrics._scorer import _check_multimetric_scoring\nfrom ..utils.metaestimators import check_routing\nfrom ._validation import _fit_and_score\n\n\n__all__ = ['ExtendedGridSearchCV']\n\n\ndef _check_param_grid(param_grid):\n if hasattr(param_grid, 'items'):\n param_grid = [param_grid]\n\n for p in param_grid:\n for name, v in p.items():\n if isinstance(v, np.ndarray) and v.ndim > 2:\n raise ValueError(\"Parameter array should be one- or \"\n \"two-dimensional.\")\n\n if (isinstance(v, str) or\n not isinstance(v, (np.ndarray, Sequence))):\n raise ValueError(\"Parameter values for parameter ({0}) need \"\n \"to be a sequence(but not a string) or\"\n \" np.ndarray.\".format(name))\n\n if len(v) == 0:\n raise ValueError(\"Parameter values for parameter ({0}) need \"\n \"to be a non-empty sequence.\".format(name))\n\n\nclass ExtendedBaseSearchCV(BaseSearchCV):\n \"\"\"Abstract base class for hyper parameter search with cross-validation.\n \"\"\"\n\n @abstractmethod\n def __init__(self, estimator, scoring=None, n_jobs=None, iid='deprecated',\n refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs',\n error_score=np.nan, return_train_score=True,\n param_routing=None):\n\n self.scoring = scoring\n self.estimator = estimator\n self.n_jobs = n_jobs\n self.iid = iid\n self.refit = refit\n self.cv = cv\n self.verbose = verbose\n self.pre_dispatch = pre_dispatch\n self.error_score = error_score\n self.return_train_score = return_train_score\n self.param_routing = param_routing\n self.router = check_routing(\n self.param_routing, ['estimator', 'cv', 'scoring'],\n {'cv': {'groups': 'groups', 'weights': 'group_weights'},\n 'estimator': ['-groups', '-group_weights']})\n\n @property\n def _estimator_type(self):\n return self.estimator._estimator_type\n\n @property\n def _pairwise(self):\n # allows cross-validation to see 'precomputed' metrics\n return getattr(self.estimator, '_pairwise', False)\n\n def set_params(self, **params):\n super().set_params(**params)\n if 'param_routing' in params:\n self.router = check_routing(\n self.param_routing, ['estimator', 'cv', 'scoring'],\n {'cv': {'groups': 'groups', 'weights': 'group_weights'},\n 'estimator': ['-groups', '-group_weights']})\n return self\n\n def score(self, X, y=None, **score_params):\n \"\"\"Returns the score on the given data, if the estimator has been refit.\n\n This uses the score defined by ``scoring`` where provided, and the\n ``best_estimator_.score`` method otherwise.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Input data, where n_samples is the number of samples and\n n_features is the number of features.\n\n y : array-like of shape (n_samples, n_output) or (n_samples,), optional\n Target relative to X for classification or regression;\n None for unsupervised learning.\n\n Returns\n -------\n score : float\n \"\"\"\n self._check_is_fitted('score')\n if self.scorer_ is None:\n raise ValueError(\"No score function explicitly defined, \"\n \"and the estimator doesn't provide one %s\"\n % self.best_estimator_)\n score = self.scorer_[self.refit] if self.multimetric_ else self.scorer_\n return score(self.best_estimator_, X, y, **score_params)\n\n def _check_is_fitted(self, method_name):\n if not self.refit:\n raise NotFittedError('This %s instance was initialized '\n 'with refit=False. %s is '\n 'available only after refitting on the best '\n 'parameters. You can refit an estimator '\n 'manually using the ``best_params_`` '\n 'attribute'\n % (type(self).__name__, method_name))\n else:\n check_is_fitted(self)\n\n @if_delegate_has_method(delegate=('best_estimator_', 'estimator'))\n def predict(self, X, **predict_params):\n \"\"\"Call predict on the estimator with the best found parameters.\n\n Only available if ``refit=True`` and the underlying estimator supports\n ``predict``.\n\n Parameters\n ----------\n X : indexable, length n_samples\n Must fulfill the input assumptions of the\n underlying estimator.\n\n \"\"\"\n self._check_is_fitted('predict')\n return self.best_estimator_.predict(X, **predict_params)\n\n @if_delegate_has_method(delegate=('best_estimator_', 'estimator'))\n def predict_proba(self, X, **predict_params):\n \"\"\"Call predict_proba on the estimator with the best found parameters.\n\n Only available if ``refit=True`` and the underlying estimator supports\n ``predict_proba``.\n\n Parameters\n ----------\n X : indexable, length n_samples\n Must fulfill the input assumptions of the\n underlying estimator.\n\n \"\"\"\n self._check_is_fitted('predict_proba')\n return self.best_estimator_.predict_proba(X, **predict_params)\n\n @if_delegate_has_method(delegate=('best_estimator_', 'estimator'))\n def predict_log_proba(self, X, **predict_params):\n \"\"\"Call predict_log_proba on the estimator with the best found parameters.\n\n Only available if ``refit=True`` and the underlying estimator supports\n ``predict_log_proba``.\n\n Parameters\n ----------\n X : indexable, length n_samples\n Must fulfill the input assumptions of the\n underlying estimator.\n\n \"\"\"\n self._check_is_fitted('predict_log_proba')\n return self.best_estimator_.predict_log_proba(X, **predict_params)\n\n @if_delegate_has_method(delegate=('best_estimator_', 'estimator'))\n def decision_function(self, X, **predict_params):\n \"\"\"Call decision_function on the estimator with the best found parameters.\n\n Only available if ``refit=True`` and the underlying estimator supports\n ``decision_function``.\n\n Parameters\n ----------\n X : indexable, length n_samples\n Must fulfill the input assumptions of the\n underlying estimator.\n\n \"\"\"\n self._check_is_fitted('decision_function')\n return self.best_estimator_.decision_function(X, **predict_params)\n\n @if_delegate_has_method(delegate=('best_estimator_', 'estimator'))\n def transform(self, X, **transform_params):\n \"\"\"Call transform on the estimator with the best found parameters.\n\n Only available if the underlying estimator supports ``transform`` and\n ``refit=True``.\n\n Parameters\n ----------\n X : indexable, length n_samples\n Must fulfill the input assumptions of the\n underlying estimator.\n\n \"\"\"\n self._check_is_fitted('transform')\n return self.best_estimator_.transform(X, **transform_params)\n\n @if_delegate_has_method(delegate=('best_estimator_', 'estimator'))\n def inverse_transform(self, Xt, **transform_params):\n \"\"\"Call inverse_transform on the estimator with the best found params.\n\n Only available if the underlying estimator implements\n ``inverse_transform`` and ``refit=True``.\n\n Parameters\n ----------\n Xt : indexable, length n_samples\n Must fulfill the input assumptions of the\n underlying estimator.\n\n \"\"\"\n self._check_is_fitted('inverse_transform')\n return self.best_estimator_.inverse_transform(Xt, **transform_params)\n\n @property\n def classes_(self):\n self._check_is_fitted(\"classes_\")\n return self.best_estimator_.classes_\n\n def _run_search(self, evaluate_candidates):\n \"\"\"Repeatedly calls `evaluate_candidates` to conduct a search.\n\n This method, implemented in sub-classes, makes it possible to\n customize the the scheduling of evaluations: GridSearchCV and\n RandomizedSearchCV schedule evaluations for their whole parameter\n search space at once but other more sequential approaches are also\n possible: for instance is possible to iteratively schedule evaluations\n for new regions of the parameter search space based on previously\n collected evaluation results. This makes it possible to implement\n Bayesian optimization or more generally sequential model-based\n optimization by deriving from the BaseSearchCV abstract base class.\n\n Parameters\n ----------\n evaluate_candidates : callable\n This callback accepts a list of candidates, where each candidate is\n a dict of parameter settings. It returns a dict of all results so\n far, formatted like ``cv_results_``.\n\n Examples\n --------\n\n ::\n\n def _run_search(self, evaluate_candidates):\n 'Try C=0.1 only if C=1 is better than C=10'\n all_results = evaluate_candidates([{'C': 1}, {'C': 10}])\n score = all_results['mean_test_score']\n if score[0] < score[1]:\n evaluate_candidates([{'C': 0.1}])\n \"\"\"\n raise NotImplementedError(\"_run_search not implemented.\")\n\n def fit(self, X, y=None, **fit_params):\n \"\"\"Run fit with all sets of parameters.\n\n Parameters\n ----------\n\n X : array-like of shape (n_samples, n_features)\n Training vector, where n_samples is the number of samples and\n n_features is the number of features.\n\n y : array-like of shape (n_samples, n_output) or (n_samples,), optional\n Target relative to X for classification or regression;\n None for unsupervised learning.\n\n groups : array-like, with shape (n_samples,), optional\n Group labels for the samples used while splitting the dataset into\n train/test set. Only used in conjunction with a \"Group\" :term:`cv`\n instance (e.g., :class:`~sklearn.model_selection.GroupKFold`).\n\n **fit_params : dict of string -> object\n Parameters passed to the ``fit`` method of the estimator\n \"\"\"\n estimator = self.estimator\n cv = check_cv(self.cv, y, classifier=is_classifier(estimator))\n\n scorers, self.multimetric_ = _check_multimetric_scoring(\n self.estimator, scoring=self.scoring)\n\n if self.multimetric_:\n if self.refit is not False and (\n not isinstance(self.refit, str) or\n # This will work for both dict / list (tuple)\n self.refit not in scorers) and not callable(self.refit):\n raise ValueError(\"For multi-metric scoring, the parameter \"\n \"refit must be set to a scorer key or a \"\n \"callable to refit an estimator with the \"\n \"best parameter setting on the whole \"\n \"data and make the best_* attributes \"\n \"available for that metric. If this is \"\n \"not needed, refit should be set to \"\n \"False explicitly. %r was passed.\"\n % self.refit)\n else:\n refit_metric = self.refit\n else:\n refit_metric = 'score'\n\n # so feature metadata/properties can work\n feature_params = {k: v for k, v in fit_params.items()\n if k == 'feature_meta'}\n fit_params = {k: v for k, v in fit_params.items()\n if k != 'feature_meta'}\n\n X, y, *fit_params_values = indexable(X, y, *fit_params.values())\n fit_params = dict(zip(fit_params.keys(), fit_params_values))\n fit_params = _check_fit_params(X, fit_params)\n\n (fit_params, cv_params, score_params), remainder = (\n self.router(fit_params))\n if remainder:\n raise TypeError('fit() got unexpected keyword arguments %r'\n % sorted(remainder))\n\n n_splits = cv.get_n_splits(X, y, **cv_params)\n\n base_estimator = clone(self.estimator)\n\n parallel = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,\n pre_dispatch=self.pre_dispatch)\n\n fit_and_score_kwargs = dict(scorer=scorers,\n fit_params=fit_params,\n score_params=score_params,\n feature_params=feature_params,\n return_train_score=self.return_train_score,\n return_n_test_samples=True,\n return_times=True,\n return_parameters=False,\n error_score=self.error_score,\n verbose=self.verbose)\n results = {}\n with parallel:\n all_candidate_params = []\n all_out = []\n\n def evaluate_candidates(candidate_params):\n candidate_params = list(candidate_params)\n n_candidates = len(candidate_params)\n\n if self.verbose > 0:\n print(\"Fitting {0} folds for each of {1} candidates,\"\n \" totalling {2} fits\".format(\n n_splits, n_candidates, n_candidates * n_splits))\n\n out = parallel(delayed(_fit_and_score)(clone(base_estimator),\n X, y,\n train=train, test=test,\n parameters=parameters,\n **fit_and_score_kwargs)\n for parameters, (train, test)\n in product(candidate_params,\n cv.split(X, y, **cv_params)))\n\n if len(out) < 1:\n raise ValueError('No fits were performed. '\n 'Was the CV iterator empty? '\n 'Were there no candidates?')\n elif len(out) != n_candidates * n_splits:\n raise ValueError('cv.split and cv.get_n_splits returned '\n 'inconsistent results. Expected {} '\n 'splits, got {}'\n .format(n_splits,\n len(out) // n_candidates))\n\n all_candidate_params.extend(candidate_params)\n all_out.extend(out)\n\n nonlocal results\n results = self._format_results(\n all_candidate_params, scorers, n_splits, all_out)\n return results\n\n self._run_search(evaluate_candidates)\n\n # For multi-metric evaluation, store the best_index_, best_params_ and\n # best_score_ iff refit is one of the scorer names\n # In single metric evaluation, refit_metric is \"score\"\n if self.refit or not self.multimetric_:\n # If callable, refit is expected to return the index of the best\n # parameter set.\n if callable(self.refit):\n self.best_index_ = self.refit(results)\n if not isinstance(self.best_index_, numbers.Integral):\n raise TypeError('best_index_ returned is not an integer')\n if (self.best_index_ < 0 or\n self.best_index_ >= len(results[\"params\"])):\n raise IndexError('best_index_ index out of range')\n else:\n self.best_index_ = results[\"rank_test_%s\"\n % refit_metric].argmin()\n self.best_score_ = results[\"mean_test_%s\" % refit_metric][\n self.best_index_]\n self.best_params_ = results[\"params\"][self.best_index_]\n\n if self.refit:\n # we clone again after setting params in case some\n # of the params are estimators as well.\n self.best_estimator_ = clone(clone(base_estimator).set_params(\n **self.best_params_))\n refit_start_time = time.time()\n if y is not None:\n self.best_estimator_.fit(X, y, **fit_params, **feature_params)\n else:\n self.best_estimator_.fit(X, **fit_params, **feature_params)\n refit_end_time = time.time()\n self.refit_time_ = refit_end_time - refit_start_time\n\n # Store the only scorer not as a dict for single metric evaluation\n self.scorer_ = scorers if self.multimetric_ else scorers['score']\n\n self.cv_results_ = results\n self.n_splits_ = n_splits\n\n return self\n\n def _format_results(self, candidate_params, scorers, n_splits, out):\n n_candidates = len(candidate_params)\n\n # if one choose to see train score, \"out\" will contain train score info\n if self.return_train_score:\n (train_score_dicts, test_score_dicts, test_sample_counts, fit_time,\n score_time) = zip(*out)\n else:\n (test_score_dicts, test_sample_counts, fit_time,\n score_time) = zip(*out)\n\n # test_score_dicts and train_score dicts are lists of dictionaries and\n # we make them into dict of lists\n test_scores = _aggregate_score_dicts(test_score_dicts)\n if self.return_train_score:\n train_scores = _aggregate_score_dicts(train_score_dicts)\n\n results = {}\n\n def _store(key_name, array, weights=None, splits=False, rank=False):\n \"\"\"A small helper to store the scores/times to the cv_results_\"\"\"\n # When iterated first by splits, then by parameters\n # We want `array` to have `n_candidates` rows and `n_splits` cols.\n array = np.array(array, dtype=np.float64).reshape(n_candidates,\n n_splits)\n if splits:\n for split_i in range(n_splits):\n # Uses closure to alter the results\n results[\"split%d_%s\"\n % (split_i, key_name)] = array[:, split_i]\n\n array_means = np.average(array, axis=1, weights=weights)\n results['mean_%s' % key_name] = array_means\n # Weighted std is not directly available in numpy\n array_stds = np.sqrt(np.average((array -\n array_means[:, np.newaxis]) ** 2,\n axis=1, weights=weights))\n results['std_%s' % key_name] = array_stds\n\n if rank:\n results[\"rank_%s\" % key_name] = np.asarray(\n rankdata(-array_means, method='min'), dtype=np.int32)\n\n _store('fit_time', fit_time)\n _store('score_time', score_time)\n # Use one MaskedArray and mask all the places where the param is not\n # applicable for that candidate. Use defaultdict as each candidate may\n # not contain all the params\n param_results = defaultdict(partial(MaskedArray,\n np.empty(n_candidates,),\n mask=True,\n dtype=object))\n for cand_i, params in enumerate(candidate_params):\n for name, value in params.items():\n # An all masked empty array gets created for the key\n # `\"param_%s\" % name` at the first occurrence of `name`.\n # Setting the value at an index also unmasks that index\n param_results[\"param_%s\" % name][cand_i] = value\n\n results.update(param_results)\n # Store a list of param dicts at the key 'params'\n results['params'] = candidate_params\n\n # NOTE test_sample counts (weights) remain the same for all candidates\n test_sample_counts = np.array(test_sample_counts[:n_splits],\n dtype=np.int)\n\n if self.iid != 'deprecated':\n warnings.warn(\n \"The parameter 'iid' is deprecated in 0.22 and will be \"\n \"removed in 0.24.\", FutureWarning\n )\n iid = self.iid\n else:\n iid = False\n\n for scorer_name in scorers.keys():\n # Computed the (weighted) mean and std for test scores alone\n _store('test_%s' % scorer_name, test_scores[scorer_name],\n splits=True, rank=True,\n weights=test_sample_counts if iid else None)\n if self.return_train_score:\n _store('train_%s' % scorer_name, train_scores[scorer_name],\n splits=True)\n\n return results\n\n\nclass ExtendedGridSearchCV(ExtendedBaseSearchCV, GridSearchCV):\n \"\"\"Exhaustive search over specified parameter values for an estimator.\n\n Important members are fit, predict.\n\n GridSearchCV implements a \"fit\" and a \"score\" method.\n It also implements \"predict\", \"predict_proba\", \"decision_function\",\n \"transform\" and \"inverse_transform\" if they are implemented in the\n estimator used.\n\n The parameters of the estimator used to apply these methods are optimized\n by cross-validated grid-search over a parameter grid.\n\n Read more in the :ref:`User Guide <grid_search>`.\n\n Parameters\n ----------\n estimator : estimator object.\n This is assumed to implement the scikit-learn estimator interface.\n Either estimator needs to provide a ``score`` function,\n or ``scoring`` must be passed.\n\n param_grid : dict or list of dictionaries\n Dictionary with parameters names (string) as keys and lists of\n parameter settings to try as values, or a list of such\n dictionaries, in which case the grids spanned by each dictionary\n in the list are explored. This enables searching over any sequence\n of parameter settings.\n\n scoring : string, callable, list/tuple, dict or None, default: None\n A single string (see :ref:`scoring_parameter`) or a callable\n (see :ref:`scoring`) to evaluate the predictions on the test set.\n\n For evaluating multiple metrics, either give a list of (unique) strings\n or a dict with names as keys and callables as values.\n\n NOTE that when using custom scorers, each scorer should return a single\n value. Metric functions returning a list/array of values can be wrapped\n into multiple scorers that return one value each.\n\n See :ref:`multimetric_grid_search` for an example.\n\n If None, the estimator's score method is used.\n\n n_jobs : int or None, optional (default=None)\n Number of jobs to run in parallel.\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\n for more details.\n\n pre_dispatch : int, or string, optional\n Controls the number of jobs that get dispatched during parallel\n execution. Reducing this number can be useful to avoid an\n explosion of memory consumption when more jobs get dispatched\n than CPUs can process. This parameter can be:\n\n - None, in which case all the jobs are immediately\n created and spawned. Use this for lightweight and\n fast-running jobs, to avoid delays due to on-demand\n spawning of the jobs\n\n - An int, giving the exact number of total jobs that are\n spawned\n\n - A string, giving an expression as a function of n_jobs,\n as in '2*n_jobs'\n\n iid : boolean, default=False\n If True, return the average score across folds, weighted by the number\n of samples in each test set. In this case, the data is assumed to be\n identically distributed across the folds, and the loss minimized is\n the total loss per sample, and not the mean loss across the folds.\n\n .. deprecated:: 0.22\n Parameter ``iid`` is deprecated in 0.22 and will be removed in 0.24\n\n cv : int, cross-validation generator or an iterable, optional\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n\n - None, to use the default 5-fold cross validation,\n - integer, to specify the number of folds in a `(Stratified)KFold`,\n - :term:`CV splitter`,\n - An iterable yielding (train, test) splits as arrays of indices.\n\n For integer/None inputs, if the estimator is a classifier and ``y`` is\n either binary or multiclass, :class:`StratifiedKFold` is used. In all\n other cases, :class:`KFold` is used.\n\n Refer :ref:`User Guide <cross_validation>` for the various\n cross-validation strategies that can be used here.\n\n .. versionchanged:: 0.22\n ``cv`` default value if None changed from 3-fold to 5-fold.\n\n refit : boolean, string, or callable, default=True\n Refit an estimator using the best found parameters on the whole\n dataset.\n\n For multiple metric evaluation, this needs to be a string denoting the\n scorer that would be used to find the best parameters for refitting\n the estimator at the end.\n\n Where there are considerations other than maximum score in\n choosing a best estimator, ``refit`` can be set to a function which\n returns the selected ``best_index_`` given ``cv_results_``. In that\n case, the ``best_estimator_`` and ``best_parameters_`` will be set\n according to the returned ``best_index_`` while the ``best_score_``\n attribute will not be available.\n\n The refitted estimator is made available at the ``best_estimator_``\n attribute and permits using ``predict`` directly on this\n ``GridSearchCV`` instance.\n\n Also for multiple metric evaluation, the attributes ``best_index_``,\n ``best_score_`` and ``best_params_`` will only be available if\n ``refit`` is set and all of them will be determined w.r.t this specific\n scorer.\n\n See ``scoring`` parameter to know more about multiple metric\n evaluation.\n\n .. versionchanged:: 0.20\n Support for callable added.\n\n verbose : integer\n Controls the verbosity: the higher, the more messages.\n\n error_score : 'raise' or numeric\n Value to assign to the score if an error occurs in estimator fitting.\n If set to 'raise', the error is raised. If a numeric value is given,\n FitFailedWarning is raised. This parameter does not affect the refit\n step, which will always raise the error. Default is ``np.nan``.\n\n return_train_score : boolean, default=False\n If ``False``, the ``cv_results_`` attribute will not include training\n scores.\n Computing training scores is used to get insights on how different\n parameter settings impact the overfitting/underfitting trade-off.\n However computing the scores on the training set can be computationally\n expensive and is not strictly required to select the parameters that\n yield the best generalization performance.\n\n\n Examples\n --------\n >>> from sklearn import svm, datasets\n >>> from sklearn.model_selection import GridSearchCV\n >>> iris = datasets.load_iris()\n >>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}\n >>> svc = svm.SVC()\n >>> clf = GridSearchCV(svc, parameters)\n >>> clf.fit(iris.data, iris.target)\n GridSearchCV(estimator=SVC(),\n param_grid={'C': [1, 10], 'kernel': ('linear', 'rbf')})\n >>> sorted(clf.cv_results_.keys())\n ['mean_fit_time', 'mean_score_time', 'mean_test_score',...\n 'param_C', 'param_kernel', 'params',...\n 'rank_test_score', 'split0_test_score',...\n 'split2_test_score', ...\n 'std_fit_time', 'std_score_time', 'std_test_score']\n\n Attributes\n ----------\n cv_results_ : dict of numpy (masked) ndarrays\n A dict with keys as column headers and values as columns, that can be\n imported into a pandas ``DataFrame``.\n\n For instance the below given table\n\n +------------+-----------+------------+-----------------+---+---------+\n |param_kernel|param_gamma|param_degree|split0_test_score|...|rank_t...|\n +============+===========+============+=================+===+=========+\n | 'poly' | -- | 2 | 0.80 |...| 2 |\n +------------+-----------+------------+-----------------+---+---------+\n | 'poly' | -- | 3 | 0.70 |...| 4 |\n +------------+-----------+------------+-----------------+---+---------+\n | 'rbf' | 0.1 | -- | 0.80 |...| 3 |\n +------------+-----------+------------+-----------------+---+---------+\n | 'rbf' | 0.2 | -- | 0.93 |...| 1 |\n +------------+-----------+------------+-----------------+---+---------+\n\n will be represented by a ``cv_results_`` dict of::\n\n {\n 'param_kernel': masked_array(data = ['poly', 'poly', 'rbf', 'rbf'],\n mask = [False False False False]...)\n 'param_gamma': masked_array(data = [-- -- 0.1 0.2],\n mask = [ True True False False]...),\n 'param_degree': masked_array(data = [2.0 3.0 -- --],\n mask = [False False True True]...),\n 'split0_test_score' : [0.80, 0.70, 0.80, 0.93],\n 'split1_test_score' : [0.82, 0.50, 0.70, 0.78],\n 'mean_test_score' : [0.81, 0.60, 0.75, 0.85],\n 'std_test_score' : [0.01, 0.10, 0.05, 0.08],\n 'rank_test_score' : [2, 4, 3, 1],\n 'split0_train_score' : [0.80, 0.92, 0.70, 0.93],\n 'split1_train_score' : [0.82, 0.55, 0.70, 0.87],\n 'mean_train_score' : [0.81, 0.74, 0.70, 0.90],\n 'std_train_score' : [0.01, 0.19, 0.00, 0.03],\n 'mean_fit_time' : [0.73, 0.63, 0.43, 0.49],\n 'std_fit_time' : [0.01, 0.02, 0.01, 0.01],\n 'mean_score_time' : [0.01, 0.06, 0.04, 0.04],\n 'std_score_time' : [0.00, 0.00, 0.00, 0.01],\n 'params' : [{'kernel': 'poly', 'degree': 2}, ...],\n }\n\n NOTE\n\n The key ``'params'`` is used to store a list of parameter\n settings dicts for all the parameter candidates.\n\n The ``mean_fit_time``, ``std_fit_time``, ``mean_score_time`` and\n ``std_score_time`` are all in seconds.\n\n For multi-metric evaluation, the scores for all the scorers are\n available in the ``cv_results_`` dict at the keys ending with that\n scorer's name (``'_<scorer_name>'``) instead of ``'_score'`` shown\n above. ('split0_test_precision', 'mean_train_precision' etc.)\n\n best_estimator_ : estimator\n Estimator that was chosen by the search, i.e. estimator\n which gave highest score (or smallest loss if specified)\n on the left out data. Not available if ``refit=False``.\n\n See ``refit`` parameter for more information on allowed values.\n\n best_score_ : float\n Mean cross-validated score of the best_estimator\n\n For multi-metric evaluation, this is present only if ``refit`` is\n specified.\n\n This attribute is not available if ``refit`` is a function.\n\n best_params_ : dict\n Parameter setting that gave the best results on the hold out data.\n\n For multi-metric evaluation, this is present only if ``refit`` is\n specified.\n\n best_index_ : int\n The index (of the ``cv_results_`` arrays) which corresponds to the best\n candidate parameter setting.\n\n The dict at ``search.cv_results_['params'][search.best_index_]`` gives\n the parameter setting for the best model, that gives the highest\n mean score (``search.best_score_``).\n\n For multi-metric evaluation, this is present only if ``refit`` is\n specified.\n\n scorer_ : function or a dict\n Scorer function used on the held out data to choose the best\n parameters for the model.\n\n For multi-metric evaluation, this attribute holds the validated\n ``scoring`` dict which maps the scorer key to the scorer callable.\n\n n_splits_ : int\n The number of cross-validation splits (folds/iterations).\n\n refit_time_ : float\n Seconds used for refitting the best model on the whole dataset.\n\n This is present only if ``refit`` is not False.\n\n Notes\n -----\n The parameters selected are those that maximize the score of the left out\n data, unless an explicit score is passed in which case it is used instead.\n\n If `n_jobs` was set to a value higher than one, the data is copied for each\n point in the grid (and not `n_jobs` times). This is done for efficiency\n reasons if individual jobs take very little time, but may raise errors if\n the dataset is large and not enough memory is available. A workaround in\n this case is to set `pre_dispatch`. Then, the memory is copied only\n `pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *\n n_jobs`.\n\n See Also\n ---------\n :class:`ParameterGrid`:\n generates all the combinations of a hyperparameter grid.\n\n :func:`sklearn.model_selection.train_test_split`:\n utility function to split the data into a development set usable\n for fitting a GridSearchCV instance and an evaluation set for\n its final evaluation.\n\n :func:`sklearn.metrics.make_scorer`:\n Make a scorer from a performance metric or loss function.\n\n \"\"\"\n _required_parameters = [\"estimator\", \"param_grid\"]\n\n def __init__(self, estimator, param_grid, scoring=None, n_jobs=None,\n iid='deprecated', refit=True, cv=None, verbose=0,\n pre_dispatch='2*n_jobs', error_score=np.nan,\n return_train_score=False, param_routing=None):\n super().__init__(\n estimator=estimator, scoring=scoring,\n n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,\n pre_dispatch=pre_dispatch, error_score=error_score,\n return_train_score=return_train_score, param_routing=param_routing)\n self.param_grid = param_grid\n _check_param_grid(param_grid)\n\n def _run_search(self, evaluate_candidates):\n \"\"\"Search all candidates in param_grid\"\"\"\n evaluate_candidates(ParameterGrid(self.param_grid))\n" ]
[ [ "sklearn.utils.validation.check_is_fitted", "scipy.stats.rankdata", "sklearn.base.clone", "sklearn.model_selection.ParameterGrid", "sklearn.utils.validation._check_fit_params", "sklearn.model_selection._validation._aggregate_score_dicts", "sklearn.base.is_classifier", "numpy.average", "numpy.array", "numpy.empty", "sklearn.utils.metaestimators.if_delegate_has_method" ] ]
anbasile/mwe
[ "2a56b889c7c7f28aa479e477f8e52da7501c2691" ]
[ "app/words.py" ]
[ "import requests\nfrom bs4 import BeautifulSoup\nfrom collections import defaultdict\nimport pandas as pd\nimport json\nimport networkx as nx\nfrom networkx.readwrite import json_graph\nimport numpy as np\nfrom lightning import Lightning\nfrom colorsys import hsv_to_rgb\nfrom sklearn import datasets\nlgn = Lightning(host='http://public.lightning-viz.org')\n\ndef calculate(words):\n # instantiate a dictionary to later be filled with word:miscores\n wc = defaultdict(float)\n frames = []\n print(\"...it will take a while. Wait a sec...\")\n for word in words:\n payload = {'searchstring': word.encode('ascii'),\n 'searchpositional':'word',\n 'searchpostag':'all',\n 'contextsize':'60c',\n 'sort2':'right',\n 'terminate':'100',\n 'searchtype':'coll',\n 'mistat':'on',\n 'collocspanleft':'2',\n 'collocspanright':'2',\n 'collocfilter':'noun'}\n\n r = requests.get(\"http://clic.cimec.unitn.it/cgi-bin/cqp/cqp.pl?corpuslist=WEBBIT\", params=payload)\n soup = BeautifulSoup(r.content, 'lxml')\n\n # parse the html table and extract words and miscores. Add scores\n \n temp = []\n for tr in soup.find_all('tr')[1:]:\n tds = tr.find_all('td')\n word = tds[0].text.split('~~')[1]\n mi = float(tds[4].text)\n wc[word] += mi\n temp.append(map(lambda x:x.text,tds[0:]))\n x = pd.DataFrame(temp)\n df = pd.DataFrame()\n df['coll'] = x.ix[0:,0].apply(lambda x: x.split('~~')[1])\n df['word'] = x.ix[0:,0].apply(lambda x: x.split('~~')[0])\n df['mi'] = x.ix[0:,4]\n frames.append(df)\n\n #sort the results in decreasing order \n results = []\n for w in sorted(wc, key=wc.get, reverse=True):\n results.append((w, wc[w]))\n\n #spit out the top result. If using ipython you can check the rest of the list by tiping `results`\n #viz part\n results_df = pd.concat(frames)\n\n G=nx.from_pandas_dataframe(results_df, 'word','coll',['mi'])\n mat = nx.adjacency_matrix(G).todense()\n viz = lgn.force(mat)\n vid = viz.id\n print(vid)\n url = '<iframe src=\"http://public.lightning-viz.org/visualizations/'+vid+'/iframe/\" width=100% height=400px>'\n return (results[0][0].strip(),url)\n" ]
[ [ "pandas.concat", "pandas.DataFrame" ] ]
ritvikshrivastava/mindmeld
[ "48eccac059439ea0f32fa3ac9079415bb006233b" ]
[ "mindmeld/models/text_models.py" ]
[ "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis module contains all code required to perform multinomial classification\nof text.\n\"\"\"\nimport logging\nimport operator\nimport os\nimport random\n\nimport numpy as np\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.externals import joblib\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.feature_selection import SelectFromModel, SelectPercentile\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.preprocessing import LabelEncoder as SKLabelEncoder\nfrom sklearn.preprocessing import MaxAbsScaler, StandardScaler\nfrom sklearn.svm import SVC\nfrom sklearn.tree import DecisionTreeClassifier\n\nfrom .evaluation import EvaluatedExample, StandardModelEvaluation\nfrom .helpers import (\n CHAR_NGRAM_FREQ_RSC,\n QUERY_FREQ_RSC,\n WORD_FREQ_RSC,\n WORD_NGRAM_FREQ_RSC,\n)\nfrom .model import ModelConfig, Model, PytorchModel\n\nlogger = logging.getLogger(__name__)\n\n\nclass TextModel(Model):\n # classifier types\n LOG_REG_TYPE = \"logreg\"\n DECISION_TREE_TYPE = \"dtree\"\n RANDOM_FOREST_TYPE = \"rforest\"\n SVM_TYPE = \"svm\"\n ALLOWED_CLASSIFIER_TYPES = [LOG_REG_TYPE, DECISION_TREE_TYPE, RANDOM_FOREST_TYPE, SVM_TYPE]\n\n # default model scoring type\n ACCURACY_SCORING = \"accuracy\"\n\n _NEG_INF = -1e10\n\n def __init__(self, config):\n super().__init__(config)\n self._class_encoder = SKLabelEncoder()\n self._feat_vectorizer = DictVectorizer()\n self._feat_selector = self._get_feature_selector()\n self._feat_scaler = self._get_feature_scaler()\n self._meta_type = None\n self._meta_feat_vectorizer = DictVectorizer(sparse=False)\n self._base_clfs = {}\n self.cv_loss_ = None\n self.train_acc_ = None\n\n def __getstate__(self):\n \"\"\"Returns the information needed pickle an instance of this class.\n\n By default, pickling removes attributes with names starting with\n underscores. This overrides that behavior.\n \"\"\"\n attributes = self.__dict__.copy()\n attributes[\"_resources\"] = {\n rname: self._resources.get(rname, {})\n for rname in [\n WORD_FREQ_RSC,\n QUERY_FREQ_RSC,\n WORD_NGRAM_FREQ_RSC,\n CHAR_NGRAM_FREQ_RSC,\n ]\n }\n return attributes\n\n def _get_model_constructor(self):\n \"\"\"Returns the class of the actual underlying model\"\"\"\n classifier_type = self.config.model_settings[\"classifier_type\"]\n try:\n return {\n TextModel.LOG_REG_TYPE: LogisticRegression,\n TextModel.DECISION_TREE_TYPE: DecisionTreeClassifier,\n TextModel.RANDOM_FOREST_TYPE: RandomForestClassifier,\n TextModel.SVM_TYPE: SVC,\n }[classifier_type]\n except KeyError as e:\n msg = \"{}: Classifier type {!r} not recognized\"\n raise ValueError(msg.format(self.__class__.__name__, classifier_type)) from e\n\n def _get_cv_scorer(self, selection_settings):\n \"\"\"\n Returns the scorer to use based on the selection settings and classifier type,\n defaulting to accuracy.\n \"\"\"\n return selection_settings.get(\"scoring\", TextModel.ACCURACY_SCORING)\n\n def select_params(self, examples, labels, selection_settings=None):\n y = self._label_encoder.encode(labels)\n X, y, groups = self.get_feature_matrix(examples, y, fit=True)\n clf, params = self._fit_cv(X, y, groups, selection_settings)\n self._clf = clf\n return params\n\n def _fit(self, examples, labels, params=None):\n \"\"\"Trains a classifier without cross-validation.\n\n Args:\n examples (numpy.matrix): The feature matrix for a dataset.\n labels (numpy.array): The target output values.\n params (dict): Parameters of the classifier\n\n \"\"\"\n params = self._convert_params(params, labels, is_grid=False)\n model_class = self._get_model_constructor()\n params = self._clean_params(model_class, params)\n return model_class(**params).fit(examples, labels)\n\n def predict_log_proba(self, examples, dynamic_resource=None):\n X, _, _ = self.get_feature_matrix(examples, dynamic_resource=dynamic_resource)\n predictions = self._predict_proba(X, self._clf.predict_log_proba)\n\n # JSON can't reliably encode infinity, so replace it with large number\n for row in predictions:\n _, probas = row\n for label, proba in probas.items():\n if proba == -np.Infinity:\n probas[label] = TextModel._NEG_INF\n return predictions\n\n def _get_feature_weight(self, feat_name, label_class):\n \"\"\"Retrieves the feature weight from the coefficient matrix. If there are only two\n classes, the feature vector is actually collapsed into one so we need some logic to\n handle that case.\n\n Args:\n feat_name (str) : The feature name\n label_class (int): The index of the label\n\n Returns:\n (ndarray float): The ndarray with a single float element\n \"\"\"\n if len(self._class_encoder.classes_) == 2 and label_class >= 1:\n return np.array([0.0])\n else:\n return self._clf.coef_[\n label_class, self._feat_vectorizer.vocabulary_[feat_name]\n ]\n\n def inspect(self, example, gold_label=None, dynamic_resource=None):\n \"\"\"This class takes an example and returns a 2D list for every feature with feature\n name, feature value, feature weight and their product for the predicted label. If gold\n label is passed in, we will also include the feature value and weight for the gold\n label and returns the log probability of the difference.\n\n Args:\n example (Query): The query to be predicted\n gold_label (str): The gold label for this string\n dynamic_resource (dict, optional): A dynamic resource to aid NLP inference\n\n Returns:\n (list of lists): A 2D array that includes every feature, their value, weight and \\\n probability\n \"\"\"\n if not isinstance(self._clf, LogisticRegression):\n logging.warning(\n \"Currently inspection is only available for Logistic Regression Model\"\n )\n return []\n\n try:\n gold_class = self._class_encoder.transform([gold_label])\n except ValueError:\n logger.warning(\"Unable to decode label `%s`\", gold_label)\n gold_class = None\n\n pred_label = self.predict([example], dynamic_resource=dynamic_resource)[0]\n pred_class = self._class_encoder.transform([pred_label])\n features = self._extract_features(\n example, dynamic_resource=dynamic_resource,\n text_preparation_pipeline=self.text_preparation_pipeline\n )\n\n logging.info(\"Predicted: %s.\", pred_label)\n\n if gold_class is None:\n columns = [\"Feature\", \"Value\", \"Pred_W({0})\".format(pred_label), \"Pred_P\"]\n else:\n columns = [\n \"Feature\",\n \"Value\",\n \"Pred_W({0})\".format(pred_label),\n \"Pred_P\",\n \"Gold_W({0})\".format(gold_label),\n \"Gold_P\",\n \"Diff\",\n ]\n logging.info(\"Gold: %s.\", gold_label)\n\n inspect_table = [columns]\n\n # Get all active features sorted alphabetically by name\n features = sorted(features.items(), key=operator.itemgetter(0))\n for feature in features:\n feat_name = feature[0]\n feat_value = feature[1]\n\n # Features we haven't seen before won't be in our vectorizer\n # e.g., an exact match feature for a query we've never seen before\n if feat_name not in self._feat_vectorizer.vocabulary_:\n continue\n\n weight = self._get_feature_weight(feat_name, pred_class)\n product = feat_value * weight\n\n if gold_class is None:\n row = [\n feat_name,\n round(feat_value, 4),\n weight.round(4),\n product.round(4),\n \"-\",\n \"-\",\n \"-\",\n ]\n else:\n gold_w = self._get_feature_weight(feat_name, gold_class)\n gold_p = feat_value * gold_w\n diff = gold_p - product\n row = [\n feat_name,\n round(feat_value, 4),\n weight.round(4),\n product.round(4),\n gold_w.round(4),\n gold_p.round(4),\n diff.round(4),\n ]\n\n inspect_table.append(row)\n\n return inspect_table\n\n def _predict_proba(self, X, predictor):\n predictions = []\n for row in predictor(X):\n probabilities = {}\n top_class = None\n for class_index, proba in enumerate(row):\n raw_class = self._class_encoder.inverse_transform([class_index])[0]\n decoded_class = self._label_encoder.decode([raw_class])[0]\n probabilities[decoded_class] = proba\n if proba > probabilities.get(top_class, -1.0):\n top_class = decoded_class\n predictions.append((top_class, probabilities))\n\n return predictions\n\n def get_feature_matrix(self, examples, y=None, fit=False, dynamic_resource=None):\n \"\"\"Transforms a list of examples into a feature matrix.\n\n Args:\n examples (list): The examples.\n\n Returns:\n (tuple): tuple containing:\n\n * (numpy.matrix): The feature matrix.\n * (numpy.array): The group labels for examples.\n \"\"\"\n groups = []\n feats = []\n for idx, example in enumerate(examples):\n feats.append(\n self._extract_features(example, dynamic_resource, self.text_preparation_pipeline)\n )\n groups.append(idx)\n\n X, y = self._preprocess_data(feats, y, fit=fit)\n return X, y, groups\n\n def _preprocess_data(self, X, y=None, fit=False):\n\n if fit:\n y = self._class_encoder.fit_transform(y)\n X = self._feat_vectorizer.fit_transform(X)\n if self._feat_scaler is not None:\n X = self._feat_scaler.fit_transform(X)\n if self._feat_selector is not None:\n X = self._feat_selector.fit_transform(X, y)\n else:\n X = self._feat_vectorizer.transform(X)\n if self._feat_scaler is not None:\n X = self._feat_scaler.transform(X)\n if self._feat_selector is not None:\n X = self._feat_selector.transform(X)\n\n return X, y\n\n def _convert_params(self, param_grid, y, is_grid=True):\n \"\"\"\n Convert the params from the style given by the config to the style\n passed in to the actual classifier.\n\n Args:\n param_grid (dict): lists of classifier parameter values, keyed by parameter name\n\n Returns:\n (dict): revised param_grid\n \"\"\"\n if \"class_weight\" in param_grid:\n raw_weights = (\n param_grid[\"class_weight\"] if is_grid else [param_grid[\"class_weight\"]]\n )\n weights = [\n {\n k\n if isinstance(k, int)\n else self._class_encoder.transform((k,))[0]: v\n for k, v in cw_dict.items()\n }\n for cw_dict in raw_weights\n ]\n param_grid[\"class_weight\"] = weights if is_grid else weights[0]\n elif \"class_bias\" in param_grid:\n # interpolate between class_bias=0 => class_weight=None\n # and class_bias=1 => class_weight='balanced'\n class_count = np.bincount(y)\n classes = self._class_encoder.classes_\n weights = []\n raw_bias = (\n param_grid[\"class_bias\"] if is_grid else [param_grid[\"class_bias\"]]\n )\n for class_bias in raw_bias:\n # these weights are same as sklearn's class_weight='balanced'\n balanced_w = [(len(y) / len(classes) / c) for c in class_count]\n balanced_tuples = list(zip(list(range(len(classes))), balanced_w))\n\n weights.append(\n {c: (1 - class_bias) + class_bias * w for c, w in balanced_tuples}\n )\n param_grid[\"class_weight\"] = weights if is_grid else weights[0]\n del param_grid[\"class_bias\"]\n\n return param_grid\n\n def _get_feature_selector(self):\n \"\"\"Get a feature selector instance based on the feature_selector model\n parameter\n\n Returns:\n (Object): a feature selector which returns a reduced feature matrix, \\\n given the full feature matrix, X and the class labels, y\n \"\"\"\n if self.config.model_settings is None:\n selector_type = None\n else:\n selector_type = self.config.model_settings.get(\"feature_selector\")\n selector = {\n \"l1\": SelectFromModel(LogisticRegression(penalty=\"l1\", C=1)),\n \"f\": SelectPercentile(),\n }.get(selector_type)\n return selector\n\n def _get_feature_scaler(self):\n \"\"\"Get a feature value scaler based on the model settings\"\"\"\n if self.config.model_settings is None:\n scale_type = None\n else:\n scale_type = self.config.model_settings.get(\"feature_scaler\")\n scaler = {\n \"std-dev\": StandardScaler(with_mean=False),\n \"max-abs\": MaxAbsScaler(),\n }.get(scale_type)\n return scaler\n\n def evaluate(self, examples, labels):\n \"\"\"Evaluates a model against the given examples and labels\n\n Args:\n examples: A list of examples to predict\n labels: A list of expected labels\n\n Returns:\n ModelEvaluation: an object containing information about the \\\n evaluation\n \"\"\"\n # TODO: also expose feature weights?\n predictions = self.predict_proba(examples)\n\n # Create a model config object for the current effective config (after param selection)\n config = self._get_effective_config()\n\n evaluations = [\n EvaluatedExample(\n e, labels[i], predictions[i][0], predictions[i][1], config.label_type\n )\n for i, e in enumerate(examples)\n ]\n\n model_eval = StandardModelEvaluation(config, evaluations)\n return model_eval\n\n def fit(self, examples, labels, params=None):\n \"\"\"Trains this model.\n\n This method inspects instance attributes to determine the classifier\n object and cross-validation strategy, and then fits the model to the\n training examples passed in.\n\n Args:\n examples (ProcessedQueryList.*Iterator): A list of examples.\n labels (ProcessedQueryList.*Iterator): A parallel list to examples. The gold labels\n for each example.\n params (dict, optional): Parameters to use when training. Parameter\n selection will be bypassed if this is provided\n\n Returns:\n (TextModel): Returns self to match classifier scikit-learn \\\n interfaces.\n \"\"\"\n params = params or self.config.params\n skip_param_selection = params is not None or self.config.param_selection is None\n\n # Shuffle to prevent order effects\n indices = list(range(len(labels)))\n random.shuffle(indices)\n examples.reorder(indices)\n labels.reorder(indices)\n distinct_labels = set(labels)\n if len(set(distinct_labels)) <= 1:\n return self\n\n # Extract features and classes\n y = self._label_encoder.encode(labels)\n X, y, groups = self.get_feature_matrix(examples, y, fit=True)\n\n if skip_param_selection:\n self._clf = self._fit(X, y, params)\n self._current_params = params\n else:\n # run cross validation to select params\n best_clf, best_params = self._fit_cv(X, y, groups)\n self._clf = best_clf\n self._current_params = best_params\n\n return self\n\n def predict(self, examples, dynamic_resource=None):\n X, _, _ = self.get_feature_matrix(examples, dynamic_resource=dynamic_resource)\n y = self._clf.predict(X)\n predictions = self._class_encoder.inverse_transform(y)\n return self._label_encoder.decode(predictions)\n\n def predict_proba(self, examples, dynamic_resource=None):\n X, _, _ = self.get_feature_matrix(examples, dynamic_resource=dynamic_resource)\n return self._predict_proba(X, self._clf.predict_proba)\n\n def view_extracted_features(self, example, dynamic_resource=None):\n return self._extract_features(\n example, dynamic_resource=dynamic_resource,\n text_preparation_pipeline=self.text_preparation_pipeline\n )\n\n @classmethod\n def load(cls, path):\n metadata = joblib.load(path)\n\n # backwards compatability check for RoleClassifiers\n if isinstance(metadata, dict):\n return metadata[\"model\"]\n\n # in this case, metadata = model which was serialized and dumped\n return metadata\n\n def _dump(self, path):\n os.makedirs(os.path.dirname(path), exist_ok=True)\n joblib.dump(self, path)\n\n\nclass PytorchTextModel(PytorchModel):\n ALLOWED_CLASSIFIER_TYPES = [\"embedder\", \"cnn\", \"lstm\"]\n pass\n\n\nclass AutoTextModel:\n\n @staticmethod\n def get_model_class(config: ModelConfig):\n\n CLASSES = [TextModel, PytorchTextModel]\n classifier_type = config.model_settings[\"classifier_type\"]\n\n for _class in CLASSES:\n if classifier_type in _class.ALLOWED_CLASSIFIER_TYPES:\n return _class\n\n msg = f\"Invalid 'classifier_type': {classifier_type}. \" \\\n f\"Allowed types are: {[_class.ALLOWED_CLASSIFIER_TYPES for _class in CLASSES]}\"\n raise ValueError(msg)\n" ]
[ [ "sklearn.externals.joblib.dump", "sklearn.externals.joblib.load", "sklearn.linear_model.LogisticRegression", "sklearn.preprocessing.MaxAbsScaler", "sklearn.preprocessing.LabelEncoder", "numpy.bincount", "sklearn.feature_selection.SelectPercentile", "sklearn.feature_extraction.DictVectorizer", "sklearn.preprocessing.StandardScaler", "numpy.array" ] ]
sagieppel/Classification-of-the-material-given-region-of-an-image-using-a-convolutional-neural-net-with-attent
[ "2c78f069d4f4d9be7197b5bff6df39fc239270e4" ]
[ "EvaluateAccuracy.py" ]
[ "# Evaluate precision of image classification in a given image region\n# Instructions:\n# a) Set folder of images in Image_Dir\n# c) Set folder for ground truth Annotation in AnnotationDir\n# The Label Maps should be saved as png image with same name as the corresponding image and png ending. The value of each pixel correspond to it class\n# d) Set number of classes number in NUM_CLASSES\n# e) Set path to trained model weights in Trained_model_path\n# e) Run script\n##########################################################################################################################################################################\n\n\n\nimport Reader as Reader\nimport torch\nimport numpy as np\nimport AttentionNet as Net\n#...........................................Input Parameters.................................................\nUseCuda=True\nImageDir=\"ExampleData/TrainVal_Set/Images/\"\nAnnotationDir=\"ExampleData/TrainVal_Set/Annotations/\"\nTrained_model_path=\"logs/WeightRegionMaterialClassificationOpenSurface.torch\" # If you want tos start from pretrained model\nEvaluationFile=Trained_model_path.replace(\".torch\",\"Eval.xls\")\nNumClasses=44 # Number of classes if -1 read num classes from the reader\nBackgroundClass=0 # Marking for background/unknown class that will be ignored\n#---------------------Create reader for data set--------------------------------------------------------------------------------------------------------------\n#----------------------------------------Create reader for data set--------------------------------------------------------------------------------------------------------------\nReader = Reader.Reader(ImageDir=ImageDir, AnnotationDir=AnnotationDir,NumClasses=NumClasses,BackgroundClass=BackgroundClass)\nif NumClasses==-1: NumClasses = Reader.NumClass+1\n\n#---------------------Load an initiate Initiate neural net------------------------------------------------------------------------------------\nNet=Net.Net(NumClasses=NumClasses,UseGPU=UseCuda)\nNet.AddAttententionLayer()\nNet.load_state_dict(torch.load(Trained_model_path))\nif UseCuda: Net.cuda()\nNet.eval()\n#==============================Region size ranges in pixesl=============================================================================================\nSizes=[1000,2000,4000,8000,16000,32000,64000,128000,256000,500000,1000000] #sizes pixels\nNumSizes=len(Sizes)\n#--------------------Evaluate net accuracy---------------------------------------------------------------------------------\nTP=np.zeros([Reader.NumClass+1],dtype=np.float64) # True positive per class\nFP=np.zeros([Reader.NumClass+1],dtype=np.float64) # False positive per class\nFN=np.zeros([Reader.NumClass+1],dtype=np.float64) # False Negative per class\nSumPred=np.zeros([Reader.NumClass+1],dtype=np.float64)\n\nSzTP=np.zeros([Reader.NumClass+1,NumSizes],dtype=np.float64) # True positive per class per size\nSzFP=np.zeros([Reader.NumClass+1,NumSizes],dtype=np.float64) # False positive per class per size\nSzFN=np.zeros([Reader.NumClass+1,NumSizes],dtype=np.float64) # False Negative per class per size\nSzSumPred=np.zeros([Reader.NumClass+1,NumSizes],dtype=np.float64)\n # Counter of segment of specific class appearence\nuu=0\nwhile (Reader.ImageN<len(Reader.FileList)):\n # for i,sz in enumerate(Sizes):\n\n Images, SegmentMask, Labels, LabelsOneHot = Reader.ReadNextImageClean()\n uu+=1\n print(uu)\n BatchSize = Images.shape[0]\n for i in range(BatchSize):\n#.........................Use net to make predicition.........................................\n Prob, Lb = Net.forward(Images[i:i+1], ROI=SegmentMask[i:i+1],EvalMode=True) # Run net inference and get prediction\n PredLb = Lb.data.cpu().numpy()\n#.................................Evaluate accuracy per size range......................................................\n LbSize=SegmentMask[i].sum()\n SzInd=-1\n for f,sz in enumerate(Sizes): # Find size range of the ROI region\n if LbSize<sz:\n SzInd=f\n break\n\n if PredLb[0] == Labels[i]:\n # print(\"Correct\")\n TP[Labels[i]] += 1\n SzTP[Labels[i],SzInd] += 1\n else:\n # print(\"Wrong\")\n FN[Labels[i]] += 1\n FP[PredLb[0]] += 1\n SzFN[Labels[i],SzInd] += 1\n SzFP[PredLb[0],SzInd] += 1\n SumPred[Labels[i]] += 1\n SzSumPred[Labels[i],SzInd] += 1\n\n#==============================Write to file=======================================================================\nf = open(EvaluationFile, \"w\")\n\nNrmF=len(SumPred)/(np.sum(SumPred>0)) # Normalization factor for classes with zero occurrences\n\ntxt=\"Mean Accuracy All Class Average =\\t\"+ str((TP/(SumPred+0.00000001)).mean()*NrmF*100)+\"%\"+\"\\r\\n\"\nprint(txt)\nf.write(txt)\n\ntxt=\"Mean Accuracy Images =\\t\"+ str((TP.mean()/SumPred.mean())*100)+\"%\"+\"\\r\\n\"\nprint(txt)\nf.write(txt)\n\n\nprint(\"\\r\\n=============================================================================\\r\\n\")\nprint(txt)\nf.write(txt)\n\ntxt=\"SizeMax\\tMeanClasses\\tMeanGlobal\\tNum Instances\\tNumValidClasses\\r\\n\"\nprint(txt)\nf.write(txt)\nfor i,sz in enumerate(Sizes):\n if SzSumPred[:,i].sum()==0: continue\n NumValidClass=np.sum(SzSumPred[:, i] > 0)\n NrmF = len(SzSumPred[:,i]) / NumValidClass # Normalization factor for classes with zero occurrences\n txt=str(sz)+\"\\t\"+str((SzTP[:,i]/(SzSumPred[:,i]+0.00001)).mean()*NrmF*100)+\"%\\t\"+str(100*(SzTP[:,i]).mean()/(SzSumPred[:,i].mean()))+\"%\\t\"+str(SzSumPred[:,i].sum())+\"\\t\"+str(NumValidClass)+\"\\r\\n\"\n print(txt)\n f.write(txt)\nf.close()\n\n" ]
[ [ "numpy.zeros", "numpy.sum", "torch.load" ] ]
HanumanJat8698/numpy
[ "cbec2c8054ea6150490b9e72eb051848b79344d1" ]
[ "numpy/core/tests/test_casting_unittests.py" ]
[ "\"\"\"\nThe tests exercise the casting machinery in a more low-level manner.\nThe reason is mostly to test a new implementation of the casting machinery.\n\nUnlike most tests in NumPy, these are closer to unit-tests rather\nthan integration tests.\n\"\"\"\n\nimport pytest\nimport textwrap\nimport enum\nimport itertools\nimport random\n\nimport numpy as np\nfrom numpy.lib.stride_tricks import as_strided\n\nfrom numpy.testing import assert_array_equal\nfrom numpy.core._multiarray_umath import _get_castingimpl as get_castingimpl\n\n\n# Simple skips object, parametric and long double (unsupported by struct)\nsimple_dtypes = \"?bhilqBHILQefdFD\"\nif np.dtype(\"l\").itemsize != np.dtype(\"q\").itemsize:\n # Remove l and L, the table was generated with 64bit linux in mind.\n simple_dtypes = simple_dtypes.replace(\"l\", \"\").replace(\"L\", \"\")\nsimple_dtypes = [type(np.dtype(c)) for c in simple_dtypes]\n\n\ndef simple_dtype_instances():\n for dtype_class in simple_dtypes:\n dt = dtype_class()\n yield pytest.param(dt, id=str(dt))\n if dt.byteorder != \"|\":\n dt = dt.newbyteorder()\n yield pytest.param(dt, id=str(dt))\n\n\ndef get_expected_stringlength(dtype):\n \"\"\"Returns the string length when casting the basic dtypes to strings.\n \"\"\"\n if dtype == np.bool_:\n return 5\n if dtype.kind in \"iu\":\n if dtype.itemsize == 1:\n length = 3\n elif dtype.itemsize == 2:\n length = 5\n elif dtype.itemsize == 4:\n length = 10\n elif dtype.itemsize == 8:\n length = 20\n else:\n raise AssertionError(f\"did not find expected length for {dtype}\")\n\n if dtype.kind == \"i\":\n length += 1 # adds one character for the sign\n\n return length\n\n # Note: Can't do dtype comparison for longdouble on windows\n if dtype.char == \"g\":\n return 48\n elif dtype.char == \"G\":\n return 48 * 2\n elif dtype.kind == \"f\":\n return 32 # also for half apparently.\n elif dtype.kind == \"c\":\n return 32 * 2\n\n raise AssertionError(f\"did not find expected length for {dtype}\")\n\n\nclass Casting(enum.IntEnum):\n no = 0\n equiv = 1\n safe = 2\n same_kind = 3\n unsafe = 4\n cast_is_view = 1 << 16\n\n\ndef _get_cancast_table():\n table = textwrap.dedent(\"\"\"\n X ? b h i l q B H I L Q e f d g F D G S U V O M m\n ? # = = = = = = = = = = = = = = = = = = = = = . =\n b . # = = = = . . . . . = = = = = = = = = = = . =\n h . ~ # = = = . . . . . ~ = = = = = = = = = = . =\n i . ~ ~ # = = . . . . . ~ ~ = = ~ = = = = = = . =\n l . ~ ~ ~ # # . . . . . ~ ~ = = ~ = = = = = = . =\n q . ~ ~ ~ # # . . . . . ~ ~ = = ~ = = = = = = . =\n B . ~ = = = = # = = = = = = = = = = = = = = = . =\n H . ~ ~ = = = ~ # = = = ~ = = = = = = = = = = . =\n I . ~ ~ ~ = = ~ ~ # = = ~ ~ = = ~ = = = = = = . =\n L . ~ ~ ~ ~ ~ ~ ~ ~ # # ~ ~ = = ~ = = = = = = . ~\n Q . ~ ~ ~ ~ ~ ~ ~ ~ # # ~ ~ = = ~ = = = = = = . ~\n e . . . . . . . . . . . # = = = = = = = = = = . .\n f . . . . . . . . . . . ~ # = = = = = = = = = . .\n d . . . . . . . . . . . ~ ~ # = ~ = = = = = = . .\n g . . . . . . . . . . . ~ ~ ~ # ~ ~ = = = = = . .\n F . . . . . . . . . . . . . . . # = = = = = = . .\n D . . . . . . . . . . . . . . . ~ # = = = = = . .\n G . . . . . . . . . . . . . . . ~ ~ # = = = = . .\n S . . . . . . . . . . . . . . . . . . # = = = . .\n U . . . . . . . . . . . . . . . . . . . # = = . .\n V . . . . . . . . . . . . . . . . . . . . # = . .\n O . . . . . . . . . . . . . . . . . . . . = # . .\n M . . . . . . . . . . . . . . . . . . . . = = # .\n m . . . . . . . . . . . . . . . . . . . . = = . #\n \"\"\").strip().split(\"\\n\")\n dtypes = [type(np.dtype(c)) for c in table[0][2::2]]\n\n convert_cast = {\".\": Casting.unsafe, \"~\": Casting.same_kind,\n \"=\": Casting.safe, \"#\": Casting.equiv,\n \" \": -1}\n\n cancast = {}\n for from_dt, row in zip(dtypes, table[1:]):\n cancast[from_dt] = {}\n for to_dt, c in zip(dtypes, row[2::2]):\n cancast[from_dt][to_dt] = convert_cast[c]\n\n return cancast\n\nCAST_TABLE = _get_cancast_table()\n\n\nclass TestChanges:\n \"\"\"\n These test cases excercise some behaviour changes\n \"\"\"\n @pytest.mark.parametrize(\"string\", [\"S\", \"U\"])\n @pytest.mark.parametrize(\"floating\", [\"e\", \"f\", \"d\", \"g\"])\n def test_float_to_string(self, floating, string):\n assert np.can_cast(floating, string)\n # 100 is long enough to hold any formatted floating\n assert np.can_cast(floating, f\"{string}100\")\n\n def test_to_void(self):\n # But in general, we do consider these safe:\n assert np.can_cast(\"d\", \"V\")\n assert np.can_cast(\"S20\", \"V\")\n\n # Do not consider it a safe cast if the void is too smaller:\n assert not np.can_cast(\"d\", \"V1\")\n assert not np.can_cast(\"S20\", \"V1\")\n assert not np.can_cast(\"U1\", \"V1\")\n # Structured to unstructured is just like any other:\n assert np.can_cast(\"d,i\", \"V\", casting=\"same_kind\")\n # Unstructured void to unstructured is actually no cast at all:\n assert np.can_cast(\"V3\", \"V\", casting=\"no\")\n assert np.can_cast(\"V0\", \"V\", casting=\"no\")\n\n\nclass TestCasting:\n size = 1500 # Best larger than NPY_LOWLEVEL_BUFFER_BLOCKSIZE * itemsize\n\n def get_data(self, dtype1, dtype2):\n if dtype2 is None or dtype1.itemsize >= dtype2.itemsize:\n length = self.size // dtype1.itemsize\n else:\n length = self.size // dtype2.itemsize\n\n # Assume that the base array is well enough aligned for all inputs.\n arr1 = np.empty(length, dtype=dtype1)\n assert arr1.flags.c_contiguous\n assert arr1.flags.aligned\n\n values = [random.randrange(-128, 128) for _ in range(length)]\n\n for i, value in enumerate(values):\n # Use item assignment to ensure this is not using casting:\n arr1[i] = value\n\n if dtype2 is None:\n if dtype1.char == \"?\":\n values = [bool(v) for v in values]\n return arr1, values\n\n if dtype2.char == \"?\":\n values = [bool(v) for v in values]\n\n arr2 = np.empty(length, dtype=dtype2)\n assert arr2.flags.c_contiguous\n assert arr2.flags.aligned\n\n for i, value in enumerate(values):\n # Use item assignment to ensure this is not using casting:\n arr2[i] = value\n\n return arr1, arr2, values\n\n def get_data_variation(self, arr1, arr2, aligned=True, contig=True):\n \"\"\"\n Returns a copy of arr1 that may be non-contiguous or unaligned, and a\n matching array for arr2 (although not a copy).\n \"\"\"\n if contig:\n stride1 = arr1.dtype.itemsize\n stride2 = arr2.dtype.itemsize\n elif aligned:\n stride1 = 2 * arr1.dtype.itemsize\n stride2 = 2 * arr2.dtype.itemsize\n else:\n stride1 = arr1.dtype.itemsize + 1\n stride2 = arr2.dtype.itemsize + 1\n\n max_size1 = len(arr1) * 3 * arr1.dtype.itemsize + 1\n max_size2 = len(arr2) * 3 * arr2.dtype.itemsize + 1\n from_bytes = np.zeros(max_size1, dtype=np.uint8)\n to_bytes = np.zeros(max_size2, dtype=np.uint8)\n\n # Sanity check that the above is large enough:\n assert stride1 * len(arr1) <= from_bytes.nbytes\n assert stride2 * len(arr2) <= to_bytes.nbytes\n\n if aligned:\n new1 = as_strided(from_bytes[:-1].view(arr1.dtype),\n arr1.shape, (stride1,))\n new2 = as_strided(to_bytes[:-1].view(arr2.dtype),\n arr2.shape, (stride2,))\n else:\n new1 = as_strided(from_bytes[1:].view(arr1.dtype),\n arr1.shape, (stride1,))\n new2 = as_strided(to_bytes[1:].view(arr2.dtype),\n arr2.shape, (stride2,))\n\n new1[...] = arr1\n\n if not contig:\n # Ensure we did not overwrite bytes that should not be written:\n offset = arr1.dtype.itemsize if aligned else 0\n buf = from_bytes[offset::stride1].tobytes()\n assert buf.count(b\"\\0\") == len(buf)\n\n if contig:\n assert new1.flags.c_contiguous\n assert new2.flags.c_contiguous\n else:\n assert not new1.flags.c_contiguous\n assert not new2.flags.c_contiguous\n\n if aligned:\n assert new1.flags.aligned\n assert new2.flags.aligned\n else:\n assert not new1.flags.aligned or new1.dtype.alignment == 1\n assert not new2.flags.aligned or new2.dtype.alignment == 1\n\n return new1, new2\n\n @pytest.mark.parametrize(\"from_Dt\", simple_dtypes)\n def test_simple_cancast(self, from_Dt):\n for to_Dt in simple_dtypes:\n cast = get_castingimpl(from_Dt, to_Dt)\n\n for from_dt in [from_Dt(), from_Dt().newbyteorder()]:\n default = cast._resolve_descriptors((from_dt, None))[1][1]\n assert default == to_Dt()\n del default\n\n for to_dt in [to_Dt(), to_Dt().newbyteorder()]:\n casting, (from_res, to_res) = cast._resolve_descriptors(\n (from_dt, to_dt))\n assert(type(from_res) == from_Dt)\n assert(type(to_res) == to_Dt)\n if casting & Casting.cast_is_view:\n # If a view is acceptable, this is \"no\" casting\n # and byte order must be matching.\n assert casting == Casting.no | Casting.cast_is_view\n # The above table lists this as \"equivalent\"\n assert Casting.equiv == CAST_TABLE[from_Dt][to_Dt]\n # Note that to_res may not be the same as from_dt\n assert from_res.isnative == to_res.isnative\n else:\n if from_Dt == to_Dt:\n # Note that to_res may not be the same as from_dt\n assert from_res.isnative != to_res.isnative\n assert casting == CAST_TABLE[from_Dt][to_Dt]\n\n if from_Dt is to_Dt:\n assert(from_dt is from_res)\n assert(to_dt is to_res)\n\n\n @pytest.mark.filterwarnings(\"ignore::numpy.ComplexWarning\")\n @pytest.mark.parametrize(\"from_dt\", simple_dtype_instances())\n def test_simple_direct_casts(self, from_dt):\n \"\"\"\n This test checks numeric direct casts for dtypes supported also by the\n struct module (plus complex). It tries to be test a wide range of\n inputs, but skips over possibly undefined behaviour (e.g. int rollover).\n Longdouble and CLongdouble are tested, but only using double precision.\n\n If this test creates issues, it should possibly just be simplified\n or even removed (checking whether unaligned/non-contiguous casts give\n the same results is useful, though).\n \"\"\"\n for to_dt in simple_dtype_instances():\n to_dt = to_dt.values[0]\n cast = get_castingimpl(type(from_dt), type(to_dt))\n\n casting, (from_res, to_res) = cast._resolve_descriptors(\n (from_dt, to_dt))\n\n if from_res is not from_dt or to_res is not to_dt:\n # Do not test this case, it is handled in multiple steps,\n # each of which should is tested individually.\n return\n\n safe = (casting & ~Casting.cast_is_view) <= Casting.safe\n del from_res, to_res, casting\n\n arr1, arr2, values = self.get_data(from_dt, to_dt)\n\n cast._simple_strided_call((arr1, arr2))\n\n # Check via python list\n assert arr2.tolist() == values\n\n # Check that the same results are achieved for strided loops\n arr1_o, arr2_o = self.get_data_variation(arr1, arr2, True, False)\n cast._simple_strided_call((arr1_o, arr2_o))\n\n assert_array_equal(arr2_o, arr2)\n assert arr2_o.tobytes() == arr2.tobytes()\n\n # Check if alignment makes a difference, but only if supported\n # and only if the alignment can be wrong\n if ((from_dt.alignment == 1 and to_dt.alignment == 1) or\n not cast._supports_unaligned):\n return\n\n arr1_o, arr2_o = self.get_data_variation(arr1, arr2, False, True)\n cast._simple_strided_call((arr1_o, arr2_o))\n\n assert_array_equal(arr2_o, arr2)\n assert arr2_o.tobytes() == arr2.tobytes()\n\n arr1_o, arr2_o = self.get_data_variation(arr1, arr2, False, False)\n cast._simple_strided_call((arr1_o, arr2_o))\n\n assert_array_equal(arr2_o, arr2)\n assert arr2_o.tobytes() == arr2.tobytes()\n\n del arr1_o, arr2_o, cast\n\n @pytest.mark.parametrize(\"from_Dt\", simple_dtypes)\n def test_numeric_to_times(self, from_Dt):\n # We currently only implement contiguous loops, so only need to\n # test those.\n from_dt = from_Dt()\n\n time_dtypes = [np.dtype(\"M8\"), np.dtype(\"M8[ms]\"), np.dtype(\"M8[4D]\"),\n np.dtype(\"m8\"), np.dtype(\"m8[ms]\"), np.dtype(\"m8[4D]\")]\n for time_dt in time_dtypes:\n cast = get_castingimpl(type(from_dt), type(time_dt))\n\n casting, (from_res, to_res) = cast._resolve_descriptors(\n (from_dt, time_dt))\n\n assert from_res is from_dt\n assert to_res is time_dt\n del from_res, to_res\n\n assert(casting & CAST_TABLE[from_Dt][type(time_dt)])\n\n int64_dt = np.dtype(np.int64)\n arr1, arr2, values = self.get_data(from_dt, int64_dt)\n arr2 = arr2.view(time_dt)\n arr2[...] = np.datetime64(\"NaT\")\n\n if time_dt == np.dtype(\"M8\"):\n # This is a bit of a strange path, and could probably be removed\n arr1[-1] = 0 # ensure at least one value is not NaT\n\n # The cast currently succeeds, but the values are invalid:\n cast._simple_strided_call((arr1, arr2))\n with pytest.raises(ValueError):\n str(arr2[-1]) # e.g. conversion to string fails\n return\n\n cast._simple_strided_call((arr1, arr2))\n\n assert [int(v) for v in arr2.tolist()] == values\n\n # Check that the same results are achieved for strided loops\n arr1_o, arr2_o = self.get_data_variation(arr1, arr2, True, False)\n cast._simple_strided_call((arr1_o, arr2_o))\n\n assert_array_equal(arr2_o, arr2)\n assert arr2_o.tobytes() == arr2.tobytes()\n\n @pytest.mark.parametrize(\n [\"from_dt\", \"to_dt\", \"expected_casting\", \"nom\", \"denom\"],\n [(\"M8[ns]\", None,\n Casting.no | Casting.cast_is_view, 1, 1),\n (str(np.dtype(\"M8[ns]\").newbyteorder()), None, Casting.equiv, 1, 1),\n (\"M8\", \"M8[ms]\", Casting.safe | Casting.cast_is_view, 1, 1),\n (\"M8[ms]\", \"M8\", Casting.unsafe, 1, 1), # should be invalid cast\n (\"M8[5ms]\", \"M8[5ms]\", Casting.no | Casting.cast_is_view, 1, 1),\n (\"M8[ns]\", \"M8[ms]\", Casting.same_kind, 1, 10**6),\n (\"M8[ms]\", \"M8[ns]\", Casting.safe, 10**6, 1),\n (\"M8[ms]\", \"M8[7ms]\", Casting.same_kind, 1, 7),\n (\"M8[4D]\", \"M8[1M]\", Casting.same_kind, None,\n # give full values based on NumPy 1.19.x\n [-2**63, 0, -1, 1314, -1315, 564442610]),\n (\"m8[ns]\", None, Casting.no | Casting.cast_is_view, 1, 1),\n (str(np.dtype(\"m8[ns]\").newbyteorder()), None, Casting.equiv, 1, 1),\n (\"m8\", \"m8[ms]\", Casting.safe | Casting.cast_is_view, 1, 1),\n (\"m8[ms]\", \"m8\", Casting.unsafe, 1, 1), # should be invalid cast\n (\"m8[5ms]\", \"m8[5ms]\", Casting.no | Casting.cast_is_view, 1, 1),\n (\"m8[ns]\", \"m8[ms]\", Casting.same_kind, 1, 10**6),\n (\"m8[ms]\", \"m8[ns]\", Casting.safe, 10**6, 1),\n (\"m8[ms]\", \"m8[7ms]\", Casting.same_kind, 1, 7),\n (\"m8[4D]\", \"m8[1M]\", Casting.unsafe, None,\n # give full values based on NumPy 1.19.x\n [-2**63, 0, 0, 1314, -1315, 564442610])])\n def test_time_to_time(self, from_dt, to_dt, expected_casting, nom, denom):\n from_dt = np.dtype(from_dt)\n if to_dt is not None:\n to_dt = np.dtype(to_dt)\n\n # Test a few values for casting (results generated with NumPy 1.19)\n values = np.array([-2**63, 1, 2**63-1, 10000, -10000, 2**32])\n values = values.astype(np.dtype(\"int64\").newbyteorder(from_dt.byteorder))\n assert values.dtype.byteorder == from_dt.byteorder\n assert np.isnat(values.view(from_dt)[0])\n\n DType = type(from_dt)\n cast = get_castingimpl(DType, DType)\n casting, (from_res, to_res) = cast._resolve_descriptors((from_dt, to_dt))\n assert from_res is from_dt\n assert to_res is to_dt or to_dt is None\n assert casting == expected_casting\n\n if nom is not None:\n expected_out = (values * nom // denom).view(to_res)\n expected_out[0] = \"NaT\"\n else:\n expected_out = np.empty_like(values)\n expected_out[...] = denom\n expected_out = expected_out.view(to_dt)\n\n orig_arr = values.view(from_dt)\n orig_out = np.empty_like(expected_out)\n\n if casting == Casting.unsafe and (to_dt == \"m8\" or to_dt == \"M8\"):\n # Casting from non-generic to generic units is an error and should\n # probably be reported as an invalid cast earlier.\n with pytest.raises(ValueError):\n cast._simple_strided_call((orig_arr, orig_out))\n return\n\n for aligned in [True, True]:\n for contig in [True, True]:\n arr, out = self.get_data_variation(\n orig_arr, orig_out, aligned, contig)\n out[...] = 0\n cast._simple_strided_call((arr, out))\n assert_array_equal(out.view(\"int64\"), expected_out.view(\"int64\"))\n\n def string_with_modified_length(self, dtype, change_length):\n fact = 1 if dtype.char == \"S\" else 4\n length = dtype.itemsize // fact + change_length\n return np.dtype(f\"{dtype.byteorder}{dtype.char}{length}\")\n\n @pytest.mark.parametrize(\"other_DT\", simple_dtypes)\n @pytest.mark.parametrize(\"string_char\", [\"S\", \"U\"])\n def test_string_cancast(self, other_DT, string_char):\n fact = 1 if string_char == \"S\" else 4\n\n string_DT = type(np.dtype(string_char))\n cast = get_castingimpl(other_DT, string_DT)\n\n other_dt = other_DT()\n expected_length = get_expected_stringlength(other_dt)\n string_dt = np.dtype(f\"{string_char}{expected_length}\")\n\n safety, (res_other_dt, res_dt) = cast._resolve_descriptors((other_dt, None))\n assert res_dt.itemsize == expected_length * fact\n assert safety == Casting.safe # we consider to string casts \"safe\"\n assert isinstance(res_dt, string_DT)\n\n # These casts currently implement changing the string length, so\n # check the cast-safety for too long/fixed string lengths:\n for change_length in [-1, 0, 1]:\n if change_length >= 0:\n expected_safety = Casting.safe\n else:\n expected_safety = Casting.same_kind\n\n to_dt = self.string_with_modified_length(string_dt, change_length)\n safety, (_, res_dt) = cast._resolve_descriptors((other_dt, to_dt))\n assert res_dt is to_dt\n assert safety == expected_safety\n\n # The opposite direction is always considered unsafe:\n cast = get_castingimpl(string_DT, other_DT)\n\n safety, _ = cast._resolve_descriptors((string_dt, other_dt))\n assert safety == Casting.unsafe\n\n cast = get_castingimpl(string_DT, other_DT)\n safety, (_, res_dt) = cast._resolve_descriptors((string_dt, None))\n assert safety == Casting.unsafe\n assert other_dt is res_dt # returns the singleton for simple dtypes\n\n @pytest.mark.parametrize(\"string_char\", [\"S\", \"U\"])\n @pytest.mark.parametrize(\"other_dt\", simple_dtype_instances())\n def test_simple_string_casts_roundtrip(self, other_dt, string_char):\n \"\"\"\n Tests casts from and to string by checking the roundtripping property.\n\n The test also covers some string to string casts (but not all).\n\n If this test creates issues, it should possibly just be simplified\n or even removed (checking whether unaligned/non-contiguous casts give\n the same results is useful, though).\n \"\"\"\n string_DT = type(np.dtype(string_char))\n\n cast = get_castingimpl(type(other_dt), string_DT)\n cast_back = get_castingimpl(string_DT, type(other_dt))\n _, (res_other_dt, string_dt) = cast._resolve_descriptors((other_dt, None))\n\n if res_other_dt is not other_dt:\n # do not support non-native byteorder, skip test in that case\n assert other_dt.byteorder != res_other_dt.byteorder\n return\n\n orig_arr, values = self.get_data(other_dt, None)\n str_arr = np.zeros(len(orig_arr), dtype=string_dt)\n string_dt_short = self.string_with_modified_length(string_dt, -1)\n str_arr_short = np.zeros(len(orig_arr), dtype=string_dt_short)\n string_dt_long = self.string_with_modified_length(string_dt, 1)\n str_arr_long = np.zeros(len(orig_arr), dtype=string_dt_long)\n\n assert not cast._supports_unaligned # if support is added, should test\n assert not cast_back._supports_unaligned\n\n for contig in [True, False]:\n other_arr, str_arr = self.get_data_variation(\n orig_arr, str_arr, True, contig)\n _, str_arr_short = self.get_data_variation(\n orig_arr, str_arr_short.copy(), True, contig)\n _, str_arr_long = self.get_data_variation(\n orig_arr, str_arr_long, True, contig)\n\n cast._simple_strided_call((other_arr, str_arr))\n\n cast._simple_strided_call((other_arr, str_arr_short))\n assert_array_equal(str_arr.astype(string_dt_short), str_arr_short)\n\n cast._simple_strided_call((other_arr, str_arr_long))\n assert_array_equal(str_arr, str_arr_long)\n\n if other_dt.kind == \"b\":\n # Booleans do not roundtrip\n continue\n\n other_arr[...] = 0\n cast_back._simple_strided_call((str_arr, other_arr))\n assert_array_equal(orig_arr, other_arr)\n\n other_arr[...] = 0\n cast_back._simple_strided_call((str_arr_long, other_arr))\n assert_array_equal(orig_arr, other_arr)\n\n @pytest.mark.parametrize(\"other_dt\", [\"S8\", \"<U8\", \">U8\"])\n @pytest.mark.parametrize(\"string_char\", [\"S\", \"U\"])\n def test_string_to_string_cancast(self, other_dt, string_char):\n other_dt = np.dtype(other_dt)\n\n fact = 1 if string_char == \"S\" else 4\n div = 1 if other_dt.char == \"S\" else 4\n\n string_DT = type(np.dtype(string_char))\n cast = get_castingimpl(type(other_dt), string_DT)\n\n expected_length = other_dt.itemsize // div\n string_dt = np.dtype(f\"{string_char}{expected_length}\")\n\n safety, (res_other_dt, res_dt) = cast._resolve_descriptors((other_dt, None))\n assert res_dt.itemsize == expected_length * fact\n assert isinstance(res_dt, string_DT)\n\n if other_dt.char == string_char:\n if other_dt.isnative:\n expected_safety = Casting.no | Casting.cast_is_view\n else:\n expected_safety = Casting.equiv\n elif string_char == \"U\":\n expected_safety = Casting.safe\n else:\n expected_safety = Casting.unsafe\n\n assert expected_safety == safety\n\n for change_length in [-1, 0, 1]:\n to_dt = self.string_with_modified_length(string_dt, change_length)\n safety, (_, res_dt) = cast._resolve_descriptors((other_dt, to_dt))\n\n assert res_dt is to_dt\n if expected_safety == Casting.unsafe:\n assert safety == expected_safety\n elif change_length < 0:\n assert safety == Casting.same_kind\n elif change_length == 0:\n assert safety == expected_safety\n elif change_length > 0:\n assert safety == Casting.safe\n\n @pytest.mark.parametrize(\"order1\", [\">\", \"<\"])\n @pytest.mark.parametrize(\"order2\", [\">\", \"<\"])\n def test_unicode_byteswapped_cast(self, order1, order2):\n # Very specific tests (not using the castingimpl directly)\n # that tests unicode bytedwaps including for unaligned array data.\n dtype1 = np.dtype(f\"{order1}U30\")\n dtype2 = np.dtype(f\"{order2}U30\")\n data1 = np.empty(30 * 4 + 1, dtype=np.uint8)[1:].view(dtype1)\n data2 = np.empty(30 * 4 + 1, dtype=np.uint8)[1:].view(dtype2)\n if dtype1.alignment != 1:\n # alignment should always be >1, but skip the check if not\n assert not data1.flags.aligned\n assert not data2.flags.aligned\n\n element = \"this is a ünicode string‽\"\n data1[()] = element\n # Test both `data1` and `data1.copy()` (which should be aligned)\n for data in [data1, data1.copy()]:\n data2[...] = data1\n assert data2[()] == element\n assert data2.copy()[()] == element\n\n def test_void_to_string_special_case(self):\n # Cover a small special case in void to string casting that could\n # probably just as well be turned into an error (compare\n # `test_object_to_parametric_internal_error` below).\n assert np.array([], dtype=\"V5\").astype(\"S\").dtype.itemsize == 5\n assert np.array([], dtype=\"V5\").astype(\"U\").dtype.itemsize == 4 * 5\n\n def test_object_to_parametric_internal_error(self):\n # We reject casting from object to a parametric type, without\n # figuring out the correct instance first.\n object_dtype = type(np.dtype(object))\n other_dtype = type(np.dtype(str))\n cast = get_castingimpl(object_dtype, other_dtype)\n with pytest.raises(TypeError,\n match=\"casting from object to the parametric DType\"):\n cast._resolve_descriptors((np.dtype(\"O\"), None))\n\n @pytest.mark.parametrize(\"casting\", [\"no\", \"unsafe\"])\n def test_void_and_structured_with_subarray(self, casting):\n # test case corresponding to gh-19325\n dtype = np.dtype([(\"foo\", \"<f4\", (3, 2))])\n expected = casting == \"unsafe\"\n assert np.can_cast(\"V4\", dtype, casting=casting) == expected\n assert np.can_cast(dtype, \"V4\", casting=casting) == expected\n" ]
[ [ "numpy.can_cast", "numpy.empty_like", "numpy.dtype", "numpy.datetime64", "numpy.testing.assert_array_equal", "numpy.core._multiarray_umath._get_castingimpl", "numpy.array", "numpy.zeros", "numpy.empty" ] ]
malovannaya-lab/gpgrouper
[ "45cb948bfa9ed256e450ad8f257ec24324f786ca" ]
[ "gpgrouper/containers.py" ]
[ "\"\"\"Container for each experiment, has a dataframe and metadata\"\"\"\nimport os\nimport re\nfrom datetime import datetime\nimport traceback\n\nimport pandas as pd\n\n\nfrom . import _version\n\nclass UserData:\n\n def __init__(self, recno=None, datafile=None, runno=1, searchno=1, no_taxa_redistrib=0,\n addedby='', indir = '.', outdir='.', rawfiledir='.',\n labeltype='none', quant_source=None, phospho=False,\n searchdb=None, taxonid=None, miscuts=2):\n if recno is None:\n raise ValueError('Must supply record number (recno)')\n self.recno = recno\n self.runno = runno\n self.searchno = searchno\n self.taxonid = taxonid\n self.added_by = addedby\n self.labeltype = labeltype\n self.no_taxa_redistrib = no_taxa_redistrib\n self.filtervalues = dict()\n self.indir = indir\n self.outdir = outdir\n self.rawfiledir = rawfiledir\n self.searchdb = searchdb # file name for refseq\n self.datafile = datafile\n self.df = pd.DataFrame()\n self.pipeline = None\n self.original_columns = None\n\n rrs = '{}_{}_{}_'.format(recno, runno, searchno)\n basename = os.path.splitext(os.path.basename(datafile))[0]\n self.basename = basename.split(rrs)[-1]\n\n self.LOGFILE = os.path.join(outdir, self.output_name(ext='log'))\n self._LOGSTACK = list()\n self.EXIT_CODE = 0\n self.ERROR = None\n self.taxon_ratio_totals = dict()\n self.miscuts = miscuts\n self.phospho = phospho\n\n with open(self.LOGFILE, 'w') as f:\n f.write('{} PyGrouper {}'.format(datetime.now(), _version.__version__))\n\n @property\n def taxon_miscut_id(self):\n return hash(self.taxonid) + hash(self.miscuts)\n\n\n def __repr__(self):\n return '{}_{}_{}'.format(self.recno, self.runno, self.searchno)\n\n def __bool__(self):\n if self.datafile is not None and self.recno is not None:\n return True\n return False\n\n def to_log(self, message):\n if self._LOGSTACK: # flush\n messages = self._LOGSTACK + (messages,)\n else:\n messages = (message,)\n with open(self.LOGFILE, 'w+') as f:\n for message in messages:\n f.write(message)\n # f.write(sep)\n f.write('\\n')\n\n def to_logq(self, message):\n self._LOGSTACK.append(message+'\\n')\n return self\n\n def flush_log(self):\n if self._LOGSTACK:\n stack, self._LOGSTACK = self._LOGSTACK, list()\n self.to_log('\\n'.join(stack))\n return self\n\n\n def full_path(self, in_or_out='in'):\n \"\"\"returns data file with given path\"\"\"\n if in_or_out == 'in':\n mydir = self.indir\n elif in_or_out == 'out':\n mydir = self.outdir\n else:\n mydir = '.'\n return os.path.join(mydir, self.datafile or '')\n\n def read_csv(self, *args, **kwargs):\n \"\"\"Uses pandas read_csv function to read an input file\n args and kwargs are passed to this function\"\"\"\n try:\n self.df = pd.read_csv(self.full_path(), *args, **kwargs)\n self.original_columns = self.df.columns.values\n except Exception as e:\n # self.to_log(''.join(traceback.format_exc()))\n self.to_log(traceback.format_exc())\n self.ERROR = traceback.format_exc()\n self.EXIT_CODE = 1\n return 1\n if len(self.df) == 0:\n self.EXIT_CODE = 1\n return 2\n return 0\n\n def output_name(self, suffix=None, ext='tab'):\n \"\"\"generate an appropriate output file name\n returns rec_run_search_labeltype_filetype.tab\"\"\"\n # suffix = '_'.join([str(ix) for ix in suffix])\n return '{!r}_{}_{}{}.{}'.format(self,\n self.labeltype,\n self.basename,\n '_' + suffix if suffix else '',\n ext\n )\n\n def populate_base_data(self):\n \"\"\"Populate dataframe with base data prior to grouping\"\"\"\n\n self.categorical_assign('EXPRecNo', self.recno)\n self.categorical_assign('EXPRunNo', self.runno)\n self.categorical_assign('EXPSearchNo', self.searchno)\n self.categorical_assign('CreationTS', datetime.now().strftime(\"%m/%d/%Y) %H:%M:%S\"))\n self.categorical_assign('AddedBy', self.added_by)\n # self.categorical_assign('metadatainfo', '') # not sure if this is okay\n\n # self.df['EXPRecNo'] = self._categorical_assign(self.recno)\n # self.df['EXPRunNo'] = self._categorical_assign(self.runno)\n # self.df['EXPSearchNo'] = self._categorical_assign(self.searchno)\n # self.df['CreationTS'] = self._categorical_assign(datetime.now().strftime(\"%m/%d/%Y) %H:%M:%S\"))\n # self.df['AddedBy'] = self._categorical_assign(self.added_by)\n\n # self.df['psm_EXPTechRepNo'] = self.techrepno\n # self.df['psm_TaxonID'] = self.taxonid\n #self.df['psm_GeneList'] = ''\n #self.df['psm_ProteinList'] = ''\n #self.df['psm_GeneCount'] = 0\n #self.df['psm_ProteinCount'] = 0\n #self.df['psm_HomologeneID'] = ''\n #self.df['psm_ProteinCapacity'] = ''\n # self.df['metadatainfo'] = [tuple()] * len(self.df)\n self.df['metadatainfo'] = ''\n if not 'ion_score_bins' in self.filtervalues:\n self.filtervalues['ion_score_bins'] = (10, 20, 30)\n return self\n\n @property\n def filterstamp(self):\n s = 'is{ion_score}_qv{qvalue}_pep{pep}_idg{idg}_z{zmin}to{zmax}_mo{modi}_is_bins{ion_score_bins}'.format(**self.filtervalues)\n if self.phospho:\n s += '_phospho_only'\n return s\n\n def categorical_assign(self, name, value, **kwargs):\n \"\"\"\n Assign a static value to a new column.\n Saves memory by using pandas Categorical dtype.\n :kwargs: passed to pd.Series.astype\n \"\"\"\n self.df[name] = value\n self.df[name] = self.df[name].astype('category', **kwargs)\n return self\n" ]
[ [ "pandas.DataFrame" ] ]
Chibi-Shem/Hacktoberfest2020-Expert
[ "324843464aec039e130e85a16e74b76d310f1497" ]
[ "Python Programs/The-Imvisible-Man/opcv.py" ]
[ "import numpy as np\nimport cv2\nimport time\n\ncap = cv2.VideoCapture(0)\ntime.sleep(2)\nbackground=0\n\n#capture the background\nfor i in range(30):\n ret,background = cap.read()\n\nwhile(cap.isOpened()):\n ret , img = cap.read()\n\n if not ret:\n break\n\n hsv = cv2.cvtColor(img , cv2.COLOR_BGR2HSV)\n\n lower_red = np.array([0,120,70])\n upper_red = np.array([10, 255, 255])\n mask1 = cv2.inRange(hsv , lower_red , upper_red) #sepatreting the clock part\n\n lower_red = np.array([170, 120, 70])\n upper_red = np.array([180, 255, 255])\n mask2 = cv2.inRange(hsv, lower_red, upper_red) # sepatreting the clock part\n\n mask1 = mask1+mask2 #OR 1 or x\n\n mask1 = cv2.morphologyEx(mask1, cv2.MORPH_OPEN,\n np.ones((3,3),np.uint8),iterations=2) #Noise Removal\n mask1 = cv2.morphologyEx(mask1, cv2.MORPH_DILATE,\n np.ones((3,3), np.uint8), iterations=1) #smmoting the image\n\n mask2 = cv2.bitwise_not(mask1) #Except the clock\n\n res1=cv2.bitwise_and(background , background , mask = mask1)\n res2 = cv2.bitwise_and(img , img , mask = mask2)\n\n final_output = cv2.addWeighted(res1,1,res2,1,0)\n\n cv2.imshow(\"Hey invisible..!\", final_output)\n k = cv2.waitKey(10)\n if k == ord('s'):\n break\n\ncap.release()\ncv2.destroyAllWindows()" ]
[ [ "numpy.array", "numpy.ones" ] ]
scilicet64/keras-spp
[ "23da20561fe92c585208af9bf3e0ef8f51bc5dcc" ]
[ "tests/test_roi_pooling.py" ]
[ "import keras.backend as K\nimport numpy as np\nfrom keras.layers import Input\nfrom keras.models import Model\n\nfrom spp.RoiPooling import RoiPooling\n\ndim_ordering = K.image_data_format()\nassert dim_ordering in {'channels_last','channels_first'}, 'dim_ordering must be in {channels_last,channels_first}'\n\npooling_regions = [1, 2, 4]\nnum_rois = 2\nnum_channels = 3\n\nif dim_ordering == 'channels_last':\n in_img = Input(shape=(None, None, num_channels))\nelif dim_ordering == 'channels_first':\n in_img = Input(shape=(num_channels, None, None))\n\nin_roi = Input(shape=(num_rois, 4))\n\nout_roi_pool = RoiPooling(pooling_regions, num_rois)([in_img, in_roi])\n\nmodel = Model([in_img, in_roi], out_roi_pool)\nmodel.summary()\n\nmodel.compile(loss='mse', optimizer='sgd')\n\nfor img_size in [8, 16, 32]:\n\n if dim_ordering == 'channels_first':\n X_img = np.random.rand(1, num_channels, img_size, img_size)\n row_length = [float(X_img.shape[2]) / i for i in pooling_regions]\n col_length = [float(X_img.shape[3]) / i for i in pooling_regions]\n elif dim_ordering == 'channels_last':\n X_img = np.random.rand(1, img_size, img_size, num_channels)\n row_length = [float(X_img.shape[1]) / i for i in pooling_regions]\n col_length = [float(X_img.shape[2]) / i for i in pooling_regions]\n\n X_roi = np.array([[0, 0, img_size / 1, img_size / 1],\n [0, 0, img_size / 2, img_size / 2]])\n\n X_roi = np.reshape(X_roi, (1, num_rois, 4)).astype(int)\n\n Y = model.predict([X_img, X_roi])\n\n for roi in range(num_rois):\n\n if dim_ordering == 'channels_first':\n X_curr = X_img[0, :, X_roi[0, roi, 0]:X_roi[0, roi, 2], X_roi[0, roi, 1]:X_roi[0, roi, 3]]\n row_length = [float(X_curr.shape[1]) / i for i in pooling_regions]\n col_length = [float(X_curr.shape[2]) / i for i in pooling_regions]\n elif dim_ordering == 'channels_last':\n X_curr = X_img[0, X_roi[0, roi, 0]:X_roi[0, roi, 2], X_roi[0, roi, 1]:X_roi[0, roi, 3], :]\n row_length = [float(X_curr.shape[0]) / i for i in pooling_regions]\n col_length = [float(X_curr.shape[1]) / i for i in pooling_regions]\n\n idx = 0\n\n for pool_num, num_pool_regions in enumerate(pooling_regions):\n for ix in range(num_pool_regions):\n for jy in range(num_pool_regions):\n for cn in range(num_channels):\n\n x1 = int(round(ix * col_length[pool_num]))\n x2 = int(round(ix * col_length[pool_num] + col_length[pool_num]))\n y1 = int(round(jy * row_length[pool_num]))\n y2 = int(round(jy * row_length[pool_num] + row_length[pool_num]))\n\n if dim_ordering == 'channels_first':\n m_val = np.max(X_curr[cn, y1:y2, x1:x2])\n elif dim_ordering == 'channels_last':\n m_val = np.max(X_curr[y1:y2, x1:x2, cn])\n\n np.testing.assert_almost_equal(\n m_val, Y[0, roi, idx], decimal=6)\n idx += 1\n \nprint('Passed roi pooling test')" ]
[ [ "numpy.reshape", "numpy.testing.assert_almost_equal", "numpy.max", "numpy.random.rand", "numpy.array" ] ]
akashkj/superset
[ "8a157d8446780e4e71550405cbedde8a4d64d92a" ]
[ "tests/integration_tests/core_tests.py" ]
[ "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# isort:skip_file\n\"\"\"Unit tests for Superset\"\"\"\nimport csv\nimport datetime\nimport doctest\nimport html\nimport io\nimport json\nimport logging\nfrom typing import Dict, List\nfrom urllib.parse import quote\n\nimport superset.utils.database\nfrom tests.integration_tests.fixtures.birth_names_dashboard import (\n load_birth_names_dashboard_with_slices,\n load_birth_names_data,\n)\n\nimport pytest\nimport pytz\nimport random\nimport re\nimport unittest\nfrom unittest import mock\n\nimport pandas as pd\nimport sqlalchemy as sqla\nfrom sqlalchemy.exc import SQLAlchemyError\nfrom superset.models.cache import CacheKey\nfrom superset.utils.database import get_example_database\nfrom tests.integration_tests.conftest import with_feature_flags\nfrom tests.integration_tests.fixtures.energy_dashboard import (\n load_energy_table_with_slice,\n load_energy_table_data,\n)\nfrom tests.integration_tests.test_app import app\nimport superset.views.utils\nfrom superset import (\n dataframe,\n db,\n security_manager,\n sql_lab,\n)\nfrom superset.common.db_query_status import QueryStatus\nfrom superset.connectors.sqla.models import SqlaTable\nfrom superset.db_engine_specs.base import BaseEngineSpec\nfrom superset.db_engine_specs.mssql import MssqlEngineSpec\nfrom superset.exceptions import SupersetException\nfrom superset.extensions import async_query_manager\nfrom superset.models import core as models\nfrom superset.models.annotations import Annotation, AnnotationLayer\nfrom superset.models.dashboard import Dashboard\nfrom superset.models.datasource_access_request import DatasourceAccessRequest\nfrom superset.models.slice import Slice\nfrom superset.models.sql_lab import Query\nfrom superset.result_set import SupersetResultSet\nfrom superset.utils import core as utils\nfrom superset.views import core as views\nfrom superset.views.database.views import DatabaseView\n\nfrom .base_tests import SupersetTestCase\nfrom tests.integration_tests.fixtures.world_bank_dashboard import (\n load_world_bank_dashboard_with_slices,\n load_world_bank_data,\n)\n\nlogger = logging.getLogger(__name__)\n\n\nclass TestCore(SupersetTestCase):\n def setUp(self):\n db.session.query(Query).delete()\n db.session.query(DatasourceAccessRequest).delete()\n db.session.query(models.Log).delete()\n self.table_ids = {\n tbl.table_name: tbl.id for tbl in (db.session.query(SqlaTable).all())\n }\n self.original_unsafe_db_setting = app.config[\"PREVENT_UNSAFE_DB_CONNECTIONS\"]\n\n def tearDown(self):\n db.session.query(Query).delete()\n app.config[\"PREVENT_UNSAFE_DB_CONNECTIONS\"] = self.original_unsafe_db_setting\n\n def test_login(self):\n resp = self.get_resp(\"/login/\", data=dict(username=\"admin\", password=\"general\"))\n self.assertNotIn(\"User confirmation needed\", resp)\n\n resp = self.get_resp(\"/logout/\", follow_redirects=True)\n self.assertIn(\"User confirmation needed\", resp)\n\n resp = self.get_resp(\n \"/login/\", data=dict(username=\"admin\", password=\"wrongPassword\")\n )\n self.assertIn(\"User confirmation needed\", resp)\n\n def test_dashboard_endpoint(self):\n self.login()\n resp = self.client.get(\"/superset/dashboard/-1/\")\n assert resp.status_code == 404\n\n @pytest.mark.usefixtures(\"load_birth_names_dashboard_with_slices\")\n def test_slice_endpoint(self):\n self.login(username=\"admin\")\n slc = self.get_slice(\"Girls\", db.session)\n resp = self.get_resp(\"/superset/slice/{}/\".format(slc.id))\n assert \"Original value\" in resp\n assert \"List Roles\" in resp\n\n # Testing overrides\n resp = self.get_resp(\"/superset/slice/{}/?standalone=true\".format(slc.id))\n assert '<div class=\"navbar' not in resp\n\n resp = self.client.get(\"/superset/slice/-1/\")\n assert resp.status_code == 404\n\n @pytest.mark.usefixtures(\"load_birth_names_dashboard_with_slices\")\n def test_viz_cache_key(self):\n self.login(username=\"admin\")\n slc = self.get_slice(\"Girls\", db.session)\n\n viz = slc.viz\n qobj = viz.query_obj()\n cache_key = viz.cache_key(qobj)\n\n qobj[\"groupby\"] = []\n cache_key_with_groupby = viz.cache_key(qobj)\n self.assertNotEqual(cache_key, cache_key_with_groupby)\n\n self.assertNotEqual(\n viz.cache_key(qobj), viz.cache_key(qobj, time_compare=\"12 weeks\")\n )\n\n self.assertNotEqual(\n viz.cache_key(qobj, time_compare=\"28 days\"),\n viz.cache_key(qobj, time_compare=\"12 weeks\"),\n )\n\n qobj[\"inner_from_dttm\"] = datetime.datetime(1901, 1, 1)\n\n self.assertEqual(cache_key_with_groupby, viz.cache_key(qobj))\n\n def test_get_superset_tables_not_allowed(self):\n example_db = superset.utils.database.get_example_database()\n schema_name = self.default_schema_backend_map[example_db.backend]\n self.login(username=\"gamma\")\n uri = f\"superset/tables/{example_db.id}/{schema_name}/undefined/\"\n rv = self.client.get(uri)\n self.assertEqual(rv.status_code, 404)\n\n def test_get_superset_tables_substr(self):\n example_db = superset.utils.database.get_example_database()\n if example_db.backend in {\"presto\", \"hive\"}:\n # TODO: change table to the real table that is in examples.\n return\n self.login(username=\"admin\")\n schema_name = self.default_schema_backend_map[example_db.backend]\n uri = f\"superset/tables/{example_db.id}/{schema_name}/ab_role/\"\n rv = self.client.get(uri)\n response = json.loads(rv.data.decode(\"utf-8\"))\n self.assertEqual(rv.status_code, 200)\n\n expected_response = {\n \"options\": [\n {\n \"label\": \"ab_role\",\n \"schema\": schema_name,\n \"title\": \"ab_role\",\n \"type\": \"table\",\n \"value\": \"ab_role\",\n \"extra\": None,\n }\n ],\n \"tableLength\": 1,\n }\n self.assertEqual(response, expected_response)\n\n def test_get_superset_tables_not_found(self):\n self.login(username=\"admin\")\n uri = f\"superset/tables/invalid/public/undefined/\"\n rv = self.client.get(uri)\n self.assertEqual(rv.status_code, 404)\n\n def test_annotation_json_endpoint(self):\n # Set up an annotation layer and annotation\n layer = AnnotationLayer(name=\"foo\", descr=\"bar\")\n db.session.add(layer)\n db.session.commit()\n\n annotation = Annotation(\n layer_id=layer.id,\n short_descr=\"my_annotation\",\n start_dttm=datetime.datetime(2020, 5, 20, 18, 21, 51),\n end_dttm=datetime.datetime(2020, 5, 20, 18, 31, 51),\n )\n\n db.session.add(annotation)\n db.session.commit()\n\n self.login()\n resp_annotations = json.loads(\n self.get_resp(\"annotationlayermodelview/api/read\")\n )\n # the UI needs id and name to function\n self.assertIn(\"id\", resp_annotations[\"result\"][0])\n self.assertIn(\"name\", resp_annotations[\"result\"][0])\n\n response = self.get_resp(\n f\"/superset/annotation_json/{layer.id}?form_data=\"\n + quote(json.dumps({\"time_range\": \"100 years ago : now\"}))\n )\n assert \"my_annotation\" in response\n\n # Rollback changes\n db.session.delete(annotation)\n db.session.delete(layer)\n db.session.commit()\n\n def test_admin_only_permissions(self):\n def assert_admin_permission_in(role_name, assert_func):\n role = security_manager.find_role(role_name)\n permissions = [p.permission.name for p in role.permissions]\n assert_func(\"can_sync_druid_source\", permissions)\n assert_func(\"can_approve\", permissions)\n\n assert_admin_permission_in(\"Admin\", self.assertIn)\n assert_admin_permission_in(\"Alpha\", self.assertNotIn)\n assert_admin_permission_in(\"Gamma\", self.assertNotIn)\n\n def test_admin_only_menu_views(self):\n def assert_admin_view_menus_in(role_name, assert_func):\n role = security_manager.find_role(role_name)\n view_menus = [p.view_menu.name for p in role.permissions]\n assert_func(\"ResetPasswordView\", view_menus)\n assert_func(\"RoleModelView\", view_menus)\n assert_func(\"Security\", view_menus)\n assert_func(\"SQL Lab\", view_menus)\n\n assert_admin_view_menus_in(\"Admin\", self.assertIn)\n assert_admin_view_menus_in(\"Alpha\", self.assertNotIn)\n assert_admin_view_menus_in(\"Gamma\", self.assertNotIn)\n\n @pytest.mark.usefixtures(\"load_energy_table_with_slice\")\n def test_save_slice(self):\n self.login(username=\"admin\")\n slice_name = f\"Energy Sankey\"\n slice_id = self.get_slice(slice_name, db.session).id\n copy_name_prefix = \"Test Sankey\"\n copy_name = f\"{copy_name_prefix}[save]{random.random()}\"\n tbl_id = self.table_ids.get(\"energy_usage\")\n new_slice_name = f\"{copy_name_prefix}[overwrite]{random.random()}\"\n\n url = (\n \"/superset/explore/table/{}/?slice_name={}&\"\n \"action={}&datasource_name=energy_usage\"\n )\n\n form_data = {\n \"adhoc_filters\": [],\n \"viz_type\": \"sankey\",\n \"groupby\": [\"target\"],\n \"metric\": \"sum__value\",\n \"row_limit\": 5000,\n \"slice_id\": slice_id,\n \"time_range_endpoints\": [\"inclusive\", \"exclusive\"],\n }\n # Changing name and save as a new slice\n resp = self.client.post(\n url.format(tbl_id, copy_name, \"saveas\"),\n data={\"form_data\": json.dumps(form_data)},\n )\n db.session.expunge_all()\n new_slice_id = resp.json[\"form_data\"][\"slice_id\"]\n slc = db.session.query(Slice).filter_by(id=new_slice_id).one()\n\n self.assertEqual(slc.slice_name, copy_name)\n form_data.pop(\"slice_id\") # We don't save the slice id when saving as\n self.assertEqual(slc.viz.form_data, form_data)\n\n form_data = {\n \"adhoc_filters\": [],\n \"viz_type\": \"sankey\",\n \"groupby\": [\"source\"],\n \"metric\": \"sum__value\",\n \"row_limit\": 5000,\n \"slice_id\": new_slice_id,\n \"time_range\": \"now\",\n \"time_range_endpoints\": [\"inclusive\", \"exclusive\"],\n }\n # Setting the name back to its original name by overwriting new slice\n self.client.post(\n url.format(tbl_id, new_slice_name, \"overwrite\"),\n data={\"form_data\": json.dumps(form_data)},\n )\n db.session.expunge_all()\n slc = db.session.query(Slice).filter_by(id=new_slice_id).one()\n self.assertEqual(slc.slice_name, new_slice_name)\n self.assertEqual(slc.viz.form_data, form_data)\n\n # Cleanup\n slices = (\n db.session.query(Slice)\n .filter(Slice.slice_name.like(copy_name_prefix + \"%\"))\n .all()\n )\n for slc in slices:\n db.session.delete(slc)\n db.session.commit()\n\n @pytest.mark.usefixtures(\"load_energy_table_with_slice\")\n def test_filter_endpoint(self):\n self.login(username=\"admin\")\n slice_name = \"Energy Sankey\"\n slice_id = self.get_slice(slice_name, db.session).id\n db.session.commit()\n tbl_id = self.table_ids.get(\"energy_usage\")\n table = db.session.query(SqlaTable).filter(SqlaTable.id == tbl_id)\n table.filter_select_enabled = True\n url = (\n \"/superset/filter/table/{}/target/?viz_type=sankey&groupby=source\"\n \"&metric=sum__value&flt_col_0=source&flt_op_0=in&flt_eq_0=&\"\n \"slice_id={}&datasource_name=energy_usage&\"\n \"datasource_id=1&datasource_type=table\"\n )\n\n # Changing name\n resp = self.get_resp(url.format(tbl_id, slice_id))\n assert len(resp) > 0\n assert \"energy_target0\" in resp\n\n @pytest.mark.usefixtures(\"load_birth_names_dashboard_with_slices\")\n def test_slice_data(self):\n # slice data should have some required attributes\n self.login(username=\"admin\")\n slc = self.get_slice(\n slice_name=\"Girls\", session=db.session, expunge_from_session=False\n )\n slc_data_attributes = slc.data.keys()\n assert \"changed_on\" in slc_data_attributes\n assert \"modified\" in slc_data_attributes\n assert \"owners\" in slc_data_attributes\n\n @pytest.mark.usefixtures(\"load_energy_table_with_slice\")\n def test_slices(self):\n # Testing by hitting the two supported end points for all slices\n self.login(username=\"admin\")\n Slc = Slice\n urls = []\n for slc in db.session.query(Slc).all():\n urls += [\n (slc.slice_name, \"explore\", slc.slice_url),\n ]\n for name, method, url in urls:\n logger.info(f\"[{name}]/[{method}]: {url}\")\n print(f\"[{name}]/[{method}]: {url}\")\n resp = self.client.get(url)\n self.assertEqual(resp.status_code, 200)\n\n def test_tablemodelview_list(self):\n self.login(username=\"admin\")\n\n url = \"/tablemodelview/list/\"\n resp = self.get_resp(url)\n\n # assert that a table is listed\n table = db.session.query(SqlaTable).first()\n assert table.name in resp\n assert \"/superset/explore/table/{}\".format(table.id) in resp\n\n def test_add_slice(self):\n self.login(username=\"admin\")\n # assert that /chart/add responds with 200\n url = \"/chart/add\"\n resp = self.client.get(url)\n self.assertEqual(resp.status_code, 200)\n\n @pytest.mark.usefixtures(\"load_birth_names_dashboard_with_slices\")\n def test_get_user_slices_for_owners(self):\n self.login(username=\"alpha\")\n user = security_manager.find_user(\"alpha\")\n slice_name = \"Girls\"\n\n # ensure user is not owner of any slices\n url = f\"/superset/user_slices/{user.id}/\"\n resp = self.client.get(url)\n data = json.loads(resp.data)\n self.assertEqual(data, [])\n\n # make user owner of slice and verify that endpoint returns said slice\n slc = self.get_slice(\n slice_name=slice_name, session=db.session, expunge_from_session=False\n )\n slc.owners = [user]\n db.session.merge(slc)\n db.session.commit()\n url = f\"/superset/user_slices/{user.id}/\"\n resp = self.client.get(url)\n data = json.loads(resp.data)\n self.assertEqual(len(data), 1)\n self.assertEqual(data[0][\"title\"], slice_name)\n\n # remove ownership and ensure user no longer gets slice\n slc = self.get_slice(\n slice_name=slice_name, session=db.session, expunge_from_session=False\n )\n slc.owners = []\n db.session.merge(slc)\n db.session.commit()\n url = f\"/superset/user_slices/{user.id}/\"\n resp = self.client.get(url)\n data = json.loads(resp.data)\n self.assertEqual(data, [])\n\n def test_get_user_slices(self):\n self.login(username=\"admin\")\n userid = security_manager.find_user(\"admin\").id\n url = f\"/sliceasync/api/read?_flt_0_created_by={userid}\"\n resp = self.client.get(url)\n self.assertEqual(resp.status_code, 200)\n\n @pytest.mark.usefixtures(\"load_energy_table_with_slice\")\n def test_slices_V2(self):\n # Add explore-v2-beta role to admin user\n # Test all slice urls as user with with explore-v2-beta role\n security_manager.add_role(\"explore-v2-beta\")\n\n security_manager.add_user(\n \"explore_beta\",\n \"explore_beta\",\n \" user\",\n \"[email protected]\",\n security_manager.find_role(\"explore-v2-beta\"),\n password=\"general\",\n )\n self.login(username=\"explore_beta\", password=\"general\")\n\n Slc = Slice\n urls = []\n for slc in db.session.query(Slc).all():\n urls += [(slc.slice_name, \"slice_url\", slc.slice_url)]\n for name, method, url in urls:\n print(f\"[{name}]/[{method}]: {url}\")\n self.client.get(url)\n\n def test_doctests(self):\n modules = [utils, models, sql_lab]\n for mod in modules:\n failed, tests = doctest.testmod(mod)\n if failed:\n raise Exception(\"Failed a doctest\")\n\n def test_misc(self):\n assert self.get_resp(\"/health\") == \"OK\"\n assert self.get_resp(\"/healthcheck\") == \"OK\"\n assert self.get_resp(\"/ping\") == \"OK\"\n\n def test_testconn(self, username=\"admin\"):\n # need to temporarily allow sqlite dbs, teardown will undo this\n app.config[\"PREVENT_UNSAFE_DB_CONNECTIONS\"] = False\n self.login(username=username)\n database = superset.utils.database.get_example_database()\n # validate that the endpoint works with the password-masked sqlalchemy uri\n data = json.dumps(\n {\n \"uri\": database.safe_sqlalchemy_uri(),\n \"name\": \"examples\",\n \"impersonate_user\": False,\n }\n )\n response = self.client.post(\n \"/superset/testconn\", data=data, content_type=\"application/json\"\n )\n assert response.status_code == 200\n assert response.headers[\"Content-Type\"] == \"application/json\"\n\n # validate that the endpoint works with the decrypted sqlalchemy uri\n data = json.dumps(\n {\n \"uri\": database.sqlalchemy_uri_decrypted,\n \"name\": \"examples\",\n \"impersonate_user\": False,\n }\n )\n response = self.client.post(\n \"/superset/testconn\", data=data, content_type=\"application/json\"\n )\n assert response.status_code == 200\n assert response.headers[\"Content-Type\"] == \"application/json\"\n\n def test_testconn_failed_conn(self, username=\"admin\"):\n self.login(username=username)\n\n data = json.dumps(\n {\"uri\": \"broken://url\", \"name\": \"examples\", \"impersonate_user\": False}\n )\n response = self.client.post(\n \"/superset/testconn\", data=data, content_type=\"application/json\"\n )\n assert response.status_code == 400\n assert response.headers[\"Content-Type\"] == \"application/json\"\n response_body = json.loads(response.data.decode(\"utf-8\"))\n expected_body = {\"error\": \"Could not load database driver: broken\"}\n assert response_body == expected_body, \"%s != %s\" % (\n response_body,\n expected_body,\n )\n\n data = json.dumps(\n {\n \"uri\": \"mssql+pymssql://url\",\n \"name\": \"examples\",\n \"impersonate_user\": False,\n }\n )\n response = self.client.post(\n \"/superset/testconn\", data=data, content_type=\"application/json\"\n )\n assert response.status_code == 400\n assert response.headers[\"Content-Type\"] == \"application/json\"\n response_body = json.loads(response.data.decode(\"utf-8\"))\n expected_body = {\"error\": \"Could not load database driver: mssql+pymssql\"}\n assert response_body == expected_body, \"%s != %s\" % (\n response_body,\n expected_body,\n )\n\n def test_testconn_unsafe_uri(self, username=\"admin\"):\n self.login(username=username)\n app.config[\"PREVENT_UNSAFE_DB_CONNECTIONS\"] = True\n\n response = self.client.post(\n \"/superset/testconn\",\n data=json.dumps(\n {\n \"uri\": \"sqlite:///home/superset/unsafe.db\",\n \"name\": \"unsafe\",\n \"impersonate_user\": False,\n }\n ),\n content_type=\"application/json\",\n )\n self.assertEqual(400, response.status_code)\n response_body = json.loads(response.data.decode(\"utf-8\"))\n expected_body = {\n \"error\": \"SQLiteDialect_pysqlite cannot be used as a data source for security reasons.\"\n }\n self.assertEqual(expected_body, response_body)\n\n def test_custom_password_store(self):\n database = superset.utils.database.get_example_database()\n conn_pre = sqla.engine.url.make_url(database.sqlalchemy_uri_decrypted)\n\n def custom_password_store(uri):\n return \"password_store_test\"\n\n models.custom_password_store = custom_password_store\n conn = sqla.engine.url.make_url(database.sqlalchemy_uri_decrypted)\n if conn_pre.password:\n assert conn.password == \"password_store_test\"\n assert conn.password != conn_pre.password\n # Disable for password store for later tests\n models.custom_password_store = None\n\n def test_databaseview_edit(self, username=\"admin\"):\n # validate that sending a password-masked uri does not over-write the decrypted\n # uri\n self.login(username=username)\n database = superset.utils.database.get_example_database()\n sqlalchemy_uri_decrypted = database.sqlalchemy_uri_decrypted\n url = \"databaseview/edit/{}\".format(database.id)\n data = {k: database.__getattribute__(k) for k in DatabaseView.add_columns}\n data[\"sqlalchemy_uri\"] = database.safe_sqlalchemy_uri()\n self.client.post(url, data=data)\n database = superset.utils.database.get_example_database()\n self.assertEqual(sqlalchemy_uri_decrypted, database.sqlalchemy_uri_decrypted)\n\n # Need to clean up after ourselves\n database.impersonate_user = False\n database.allow_dml = False\n database.allow_run_async = False\n db.session.commit()\n\n @pytest.mark.usefixtures(\n \"load_energy_table_with_slice\", \"load_birth_names_dashboard_with_slices\"\n )\n def test_warm_up_cache(self):\n self.login()\n slc = self.get_slice(\"Girls\", db.session)\n data = self.get_json_resp(\"/superset/warm_up_cache?slice_id={}\".format(slc.id))\n self.assertEqual(\n data, [{\"slice_id\": slc.id, \"viz_error\": None, \"viz_status\": \"success\"}]\n )\n\n data = self.get_json_resp(\n \"/superset/warm_up_cache?table_name=energy_usage&db_name=main\"\n )\n assert len(data) > 0\n\n dashboard = self.get_dash_by_slug(\"births\")\n\n assert self.get_json_resp(\n f\"/superset/warm_up_cache?dashboard_id={dashboard.id}&slice_id={slc.id}\"\n ) == [{\"slice_id\": slc.id, \"viz_error\": None, \"viz_status\": \"success\"}]\n\n assert self.get_json_resp(\n f\"/superset/warm_up_cache?dashboard_id={dashboard.id}&slice_id={slc.id}&extra_filters=\"\n + quote(json.dumps([{\"col\": \"name\", \"op\": \"in\", \"val\": [\"Jennifer\"]}]))\n ) == [{\"slice_id\": slc.id, \"viz_error\": None, \"viz_status\": \"success\"}]\n\n @pytest.mark.usefixtures(\"load_birth_names_dashboard_with_slices\")\n def test_cache_logging(self):\n self.login(\"admin\")\n store_cache_keys = app.config[\"STORE_CACHE_KEYS_IN_METADATA_DB\"]\n app.config[\"STORE_CACHE_KEYS_IN_METADATA_DB\"] = True\n girls_slice = self.get_slice(\"Girls\", db.session)\n self.get_json_resp(\"/superset/warm_up_cache?slice_id={}\".format(girls_slice.id))\n ck = db.session.query(CacheKey).order_by(CacheKey.id.desc()).first()\n assert ck.datasource_uid == f\"{girls_slice.table.id}__table\"\n app.config[\"STORE_CACHE_KEYS_IN_METADATA_DB\"] = store_cache_keys\n\n def test_shortner(self):\n self.login(username=\"admin\")\n data = (\n \"//superset/explore/table/1/?viz_type=sankey&groupby=source&\"\n \"groupby=target&metric=sum__value&row_limit=5000&where=&having=&\"\n \"flt_col_0=source&flt_op_0=in&flt_eq_0=&slice_id=78&slice_name=\"\n \"Energy+Sankey&collapsed_fieldsets=&action=&datasource_name=\"\n \"energy_usage&datasource_id=1&datasource_type=table&\"\n \"previous_viz_type=sankey\"\n )\n resp = self.client.post(\"/r/shortner/\", data=dict(data=data))\n assert re.search(r\"\\/r\\/[0-9]+\", resp.data.decode(\"utf-8\"))\n\n def test_shortner_invalid(self):\n self.login(username=\"admin\")\n invalid_urls = [\n \"hhttp://invalid.com\",\n \"hhttps://invalid.com\",\n \"www.invalid.com\",\n ]\n for invalid_url in invalid_urls:\n resp = self.client.post(\"/r/shortner/\", data=dict(data=invalid_url))\n assert resp.status_code == 400\n\n def test_redirect_invalid(self):\n model_url = models.Url(url=\"hhttp://invalid.com\")\n db.session.add(model_url)\n db.session.commit()\n\n self.login(username=\"admin\")\n response = self.client.get(f\"/r/{model_url.id}\")\n assert response.headers[\"Location\"] == \"http://localhost/\"\n db.session.delete(model_url)\n db.session.commit()\n\n @with_feature_flags(KV_STORE=False)\n def test_kv_disabled(self):\n self.login(username=\"admin\")\n\n resp = self.client.get(\"/kv/10001/\")\n self.assertEqual(404, resp.status_code)\n\n value = json.dumps({\"data\": \"this is a test\"})\n resp = self.client.post(\"/kv/store/\", data=dict(data=value))\n self.assertEqual(resp.status_code, 404)\n\n @with_feature_flags(KV_STORE=True)\n def test_kv_enabled(self):\n self.login(username=\"admin\")\n\n resp = self.client.get(\"/kv/10001/\")\n self.assertEqual(404, resp.status_code)\n\n value = json.dumps({\"data\": \"this is a test\"})\n resp = self.client.post(\"/kv/store/\", data=dict(data=value))\n self.assertEqual(resp.status_code, 200)\n kv = db.session.query(models.KeyValue).first()\n kv_value = kv.value\n self.assertEqual(json.loads(value), json.loads(kv_value))\n\n resp = self.client.get(\"/kv/{}/\".format(kv.id))\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(json.loads(value), json.loads(resp.data.decode(\"utf-8\")))\n\n def test_gamma(self):\n self.login(username=\"gamma\")\n assert \"Charts\" in self.get_resp(\"/chart/list/\")\n assert \"Dashboards\" in self.get_resp(\"/dashboard/list/\")\n\n @pytest.mark.usefixtures(\"load_birth_names_dashboard_with_slices\")\n def test_csv_endpoint(self):\n self.login()\n client_id = \"{}\".format(random.getrandbits(64))[:10]\n get_name_sql = \"\"\"\n SELECT name\n FROM birth_names\n LIMIT 1\n \"\"\"\n resp = self.run_sql(get_name_sql, client_id, raise_on_error=True)\n name = resp[\"data\"][0][\"name\"]\n sql = f\"\"\"\n SELECT name\n FROM birth_names\n WHERE name = '{name}'\n LIMIT 1\n \"\"\"\n client_id = \"{}\".format(random.getrandbits(64))[:10]\n self.run_sql(sql, client_id, raise_on_error=True)\n\n resp = self.get_resp(\"/superset/csv/{}\".format(client_id))\n data = csv.reader(io.StringIO(resp))\n expected_data = csv.reader(io.StringIO(f\"name\\n{name}\\n\"))\n\n client_id = \"{}\".format(random.getrandbits(64))[:10]\n self.run_sql(sql, client_id, raise_on_error=True)\n\n resp = self.get_resp(\"/superset/csv/{}\".format(client_id))\n data = csv.reader(io.StringIO(resp))\n expected_data = csv.reader(io.StringIO(f\"name\\n{name}\\n\"))\n\n self.assertEqual(list(expected_data), list(data))\n self.logout()\n\n @pytest.mark.usefixtures(\"load_birth_names_dashboard_with_slices\")\n def test_extra_table_metadata(self):\n self.login()\n example_db = superset.utils.database.get_example_database()\n schema = \"default\" if example_db.backend in {\"presto\", \"hive\"} else \"superset\"\n self.get_json_resp(\n f\"/superset/extra_table_metadata/{example_db.id}/birth_names/{schema}/\"\n )\n\n def test_templated_sql_json(self):\n if superset.utils.database.get_example_database().backend == \"presto\":\n # TODO: make it work for presto\n return\n self.login()\n sql = \"SELECT '{{ 1+1 }}' as test\"\n data = self.run_sql(sql, \"fdaklj3ws\")\n self.assertEqual(data[\"data\"][0][\"test\"], \"2\")\n\n @mock.patch(\n \"tests.integration_tests.superset_test_custom_template_processors.datetime\"\n )\n @mock.patch(\"superset.views.core.get_sql_results\")\n def test_custom_templated_sql_json(self, sql_lab_mock, mock_dt) -> None:\n \"\"\"Test sqllab receives macros expanded query.\"\"\"\n mock_dt.utcnow = mock.Mock(return_value=datetime.datetime(1970, 1, 1))\n self.login()\n sql = \"SELECT '$DATE()' as test\"\n resp = {\n \"status\": QueryStatus.SUCCESS,\n \"query\": {\"rows\": 1},\n \"data\": [{\"test\": \"'1970-01-01'\"}],\n }\n sql_lab_mock.return_value = resp\n\n dbobj = self.create_fake_db_for_macros()\n json_payload = dict(database_id=dbobj.id, sql=sql)\n self.get_json_resp(\n \"/superset/sql_json/\", raise_on_error=False, json_=json_payload\n )\n assert sql_lab_mock.called\n self.assertEqual(sql_lab_mock.call_args[0][1], \"SELECT '1970-01-01' as test\")\n\n self.delete_fake_db_for_macros()\n\n def test_fetch_datasource_metadata(self):\n self.login(username=\"admin\")\n url = \"/superset/fetch_datasource_metadata?\" \"datasourceKey=1__table\"\n resp = self.get_json_resp(url)\n keys = [\n \"name\",\n \"type\",\n \"order_by_choices\",\n \"granularity_sqla\",\n \"time_grain_sqla\",\n \"id\",\n ]\n for k in keys:\n self.assertIn(k, resp.keys())\n\n @staticmethod\n def _get_user_activity_endpoints(user: str):\n userid = security_manager.find_user(user).id\n return (\n f\"/superset/recent_activity/{userid}/\",\n f\"/superset/created_slices/{userid}/\",\n f\"/superset/created_dashboards/{userid}/\",\n f\"/superset/fave_slices/{userid}/\",\n f\"/superset/fave_dashboards/{userid}/\",\n f\"/superset/user_slices/{userid}/\",\n f\"/superset/fave_dashboards_by_username/{user}/\",\n )\n\n @pytest.mark.usefixtures(\"load_birth_names_dashboard_with_slices\")\n def test_user_profile(self, username=\"admin\"):\n self.login(username=username)\n slc = self.get_slice(\"Girls\", db.session)\n\n # Setting some faves\n url = f\"/superset/favstar/Slice/{slc.id}/select/\"\n resp = self.get_json_resp(url)\n self.assertEqual(resp[\"count\"], 1)\n\n dash = db.session.query(Dashboard).filter_by(slug=\"births\").first()\n url = f\"/superset/favstar/Dashboard/{dash.id}/select/\"\n resp = self.get_json_resp(url)\n self.assertEqual(resp[\"count\"], 1)\n\n resp = self.get_resp(f\"/superset/profile/{username}/\")\n self.assertIn('\"app\"', resp)\n\n for endpoint in self._get_user_activity_endpoints(username):\n data = self.get_json_resp(endpoint)\n self.assertNotIn(\"message\", data)\n\n @pytest.mark.usefixtures(\"load_birth_names_dashboard_with_slices\")\n def test_user_activity_access(self, username=\"gamma\"):\n self.login(username=username)\n\n # accessing own and other users' activity is allowed by default\n for user in (\"admin\", \"gamma\"):\n for endpoint in self._get_user_activity_endpoints(user):\n resp = self.client.get(endpoint)\n assert resp.status_code == 200\n\n # disabling flag will block access to other users' activity data\n access_flag = app.config[\"ENABLE_BROAD_ACTIVITY_ACCESS\"]\n app.config[\"ENABLE_BROAD_ACTIVITY_ACCESS\"] = False\n for user in (\"admin\", \"gamma\"):\n for endpoint in self._get_user_activity_endpoints(user):\n resp = self.client.get(endpoint)\n expected_status_code = 200 if user == username else 403\n assert resp.status_code == expected_status_code\n\n # restore flag\n app.config[\"ENABLE_BROAD_ACTIVITY_ACCESS\"] = access_flag\n\n @pytest.mark.usefixtures(\"load_birth_names_dashboard_with_slices\")\n def test_slice_id_is_always_logged_correctly_on_web_request(self):\n # superset/explore case\n self.login(\"admin\")\n slc = db.session.query(Slice).filter_by(slice_name=\"Girls\").one()\n qry = db.session.query(models.Log).filter_by(slice_id=slc.id)\n self.get_resp(slc.slice_url, {\"form_data\": json.dumps(slc.form_data)})\n self.assertEqual(1, qry.count())\n\n def create_sample_csvfile(self, filename: str, content: List[str]) -> None:\n with open(filename, \"w+\") as test_file:\n for l in content:\n test_file.write(f\"{l}\\n\")\n\n def create_sample_excelfile(self, filename: str, content: Dict[str, str]) -> None:\n pd.DataFrame(content).to_excel(filename)\n\n def enable_csv_upload(self, database: models.Database) -> None:\n \"\"\"Enables csv upload in the given database.\"\"\"\n database.allow_file_upload = True\n db.session.commit()\n add_datasource_page = self.get_resp(\"/databaseview/list/\")\n self.assertIn(\"Upload a CSV\", add_datasource_page)\n\n form_get = self.get_resp(\"/csvtodatabaseview/form\")\n self.assertIn(\"CSV to Database configuration\", form_get)\n\n def test_dataframe_timezone(self):\n tz = pytz.FixedOffset(60)\n data = [\n (datetime.datetime(2017, 11, 18, 21, 53, 0, 219225, tzinfo=tz),),\n (datetime.datetime(2017, 11, 18, 22, 6, 30, tzinfo=tz),),\n ]\n results = SupersetResultSet(list(data), [[\"data\"]], BaseEngineSpec)\n df = results.to_pandas_df()\n data = dataframe.df_to_records(df)\n json_str = json.dumps(data, default=utils.pessimistic_json_iso_dttm_ser)\n self.assertDictEqual(\n data[0], {\"data\": pd.Timestamp(\"2017-11-18 21:53:00.219225+0100\", tz=tz)}\n )\n self.assertDictEqual(\n data[1], {\"data\": pd.Timestamp(\"2017-11-18 22:06:30+0100\", tz=tz)}\n )\n self.assertEqual(\n json_str,\n '[{\"data\": \"2017-11-18T21:53:00.219225+01:00\"}, {\"data\": \"2017-11-18T22:06:30+01:00\"}]',\n )\n\n def test_mssql_engine_spec_pymssql(self):\n # Test for case when tuple is returned (pymssql)\n data = [\n (1, 1, datetime.datetime(2017, 10, 19, 23, 39, 16, 660000)),\n (2, 2, datetime.datetime(2018, 10, 19, 23, 39, 16, 660000)),\n ]\n results = SupersetResultSet(\n list(data), [[\"col1\"], [\"col2\"], [\"col3\"]], MssqlEngineSpec\n )\n df = results.to_pandas_df()\n data = dataframe.df_to_records(df)\n self.assertEqual(len(data), 2)\n self.assertEqual(\n data[0],\n {\"col1\": 1, \"col2\": 1, \"col3\": pd.Timestamp(\"2017-10-19 23:39:16.660000\")},\n )\n\n def test_comments_in_sqlatable_query(self):\n clean_query = \"SELECT '/* val 1 */' as c1, '-- val 2' as c2 FROM tbl\"\n commented_query = \"/* comment 1 */\" + clean_query + \"-- comment 2\"\n table = SqlaTable(\n table_name=\"test_comments_in_sqlatable_query_table\",\n sql=commented_query,\n database=get_example_database(),\n )\n rendered_query = str(table.get_from_clause())\n self.assertEqual(clean_query, rendered_query)\n\n def test_slice_payload_no_datasource(self):\n self.login(username=\"admin\")\n data = self.get_json_resp(\"/superset/explore_json/\", raise_on_error=False)\n\n self.assertEqual(\n data[\"errors\"][0][\"message\"],\n \"The dataset associated with this chart no longer exists\",\n )\n\n @pytest.mark.usefixtures(\"load_birth_names_dashboard_with_slices\")\n def test_explore_json(self):\n tbl_id = self.table_ids.get(\"birth_names\")\n form_data = {\n \"datasource\": f\"{tbl_id}__table\",\n \"viz_type\": \"dist_bar\",\n \"time_range_endpoints\": [\"inclusive\", \"exclusive\"],\n \"granularity_sqla\": \"ds\",\n \"time_range\": \"No filter\",\n \"metrics\": [\"count\"],\n \"adhoc_filters\": [],\n \"groupby\": [\"gender\"],\n \"row_limit\": 100,\n }\n self.login(username=\"admin\")\n rv = self.client.post(\n \"/superset/explore_json/\", data={\"form_data\": json.dumps(form_data)},\n )\n data = json.loads(rv.data.decode(\"utf-8\"))\n\n self.assertEqual(rv.status_code, 200)\n self.assertEqual(data[\"rowcount\"], 2)\n\n @pytest.mark.usefixtures(\"load_birth_names_dashboard_with_slices\")\n def test_explore_json_dist_bar_order(self):\n tbl_id = self.table_ids.get(\"birth_names\")\n form_data = {\n \"datasource\": f\"{tbl_id}__table\",\n \"viz_type\": \"dist_bar\",\n \"url_params\": {},\n \"time_range_endpoints\": [\"inclusive\", \"exclusive\"],\n \"granularity_sqla\": \"ds\",\n \"time_range\": 'DATEADD(DATETIME(\"2021-01-22T00:00:00\"), -100, year) : 2021-01-22T00:00:00',\n \"metrics\": [\n {\n \"expressionType\": \"SIMPLE\",\n \"column\": {\n \"id\": 334,\n \"column_name\": \"name\",\n \"verbose_name\": \"null\",\n \"description\": \"null\",\n \"expression\": \"\",\n \"filterable\": True,\n \"groupby\": True,\n \"is_dttm\": False,\n \"type\": \"VARCHAR(255)\",\n \"python_date_format\": \"null\",\n },\n \"aggregate\": \"COUNT\",\n \"sqlExpression\": \"null\",\n \"isNew\": False,\n \"hasCustomLabel\": False,\n \"label\": \"COUNT(name)\",\n \"optionName\": \"metric_xdzsijn42f9_khi4h3v3vci\",\n },\n {\n \"expressionType\": \"SIMPLE\",\n \"column\": {\n \"id\": 332,\n \"column_name\": \"ds\",\n \"verbose_name\": \"null\",\n \"description\": \"null\",\n \"expression\": \"\",\n \"filterable\": True,\n \"groupby\": True,\n \"is_dttm\": True,\n \"type\": \"TIMESTAMP WITHOUT TIME ZONE\",\n \"python_date_format\": \"null\",\n },\n \"aggregate\": \"COUNT\",\n \"sqlExpression\": \"null\",\n \"isNew\": False,\n \"hasCustomLabel\": False,\n \"label\": \"COUNT(ds)\",\n \"optionName\": \"metric_80g1qb9b6o7_ci5vquydcbe\",\n },\n ],\n \"order_desc\": True,\n \"adhoc_filters\": [],\n \"groupby\": [\"name\"],\n \"columns\": [],\n \"row_limit\": 10,\n \"color_scheme\": \"supersetColors\",\n \"label_colors\": {},\n \"show_legend\": True,\n \"y_axis_format\": \"SMART_NUMBER\",\n \"bottom_margin\": \"auto\",\n \"x_ticks_layout\": \"auto\",\n }\n\n self.login(username=\"admin\")\n rv = self.client.post(\n \"/superset/explore_json/\", data={\"form_data\": json.dumps(form_data)},\n )\n data = json.loads(rv.data.decode(\"utf-8\"))\n\n resp = self.run_sql(\n \"\"\"\n SELECT count(name) AS count_name, count(ds) AS count_ds\n FROM birth_names\n WHERE ds >= '1921-01-22 00:00:00.000000' AND ds < '2021-01-22 00:00:00.000000'\n GROUP BY name\n ORDER BY count_name DESC\n LIMIT 10;\n \"\"\",\n client_id=\"client_id_1\",\n user_name=\"admin\",\n )\n count_ds = []\n count_name = []\n for series in data[\"data\"]:\n if series[\"key\"] == \"COUNT(ds)\":\n count_ds = series[\"values\"]\n if series[\"key\"] == \"COUNT(name)\":\n count_name = series[\"values\"]\n for expected, actual_ds, actual_name in zip(resp[\"data\"], count_ds, count_name):\n assert expected[\"count_name\"] == actual_name[\"y\"]\n assert expected[\"count_ds\"] == actual_ds[\"y\"]\n\n @pytest.mark.usefixtures(\"load_birth_names_dashboard_with_slices\")\n @mock.patch.dict(\n \"superset.extensions.feature_flag_manager._feature_flags\",\n GLOBAL_ASYNC_QUERIES=True,\n )\n def test_explore_json_async(self):\n tbl_id = self.table_ids.get(\"birth_names\")\n form_data = {\n \"datasource\": f\"{tbl_id}__table\",\n \"viz_type\": \"dist_bar\",\n \"time_range_endpoints\": [\"inclusive\", \"exclusive\"],\n \"granularity_sqla\": \"ds\",\n \"time_range\": \"No filter\",\n \"metrics\": [\"count\"],\n \"adhoc_filters\": [],\n \"groupby\": [\"gender\"],\n \"row_limit\": 100,\n }\n async_query_manager.init_app(app)\n self.login(username=\"admin\")\n rv = self.client.post(\n \"/superset/explore_json/\", data={\"form_data\": json.dumps(form_data)},\n )\n data = json.loads(rv.data.decode(\"utf-8\"))\n keys = list(data.keys())\n\n self.assertEqual(rv.status_code, 202)\n self.assertCountEqual(\n keys, [\"channel_id\", \"job_id\", \"user_id\", \"status\", \"errors\", \"result_url\"]\n )\n\n @pytest.mark.usefixtures(\"load_birth_names_dashboard_with_slices\")\n @mock.patch.dict(\n \"superset.extensions.feature_flag_manager._feature_flags\",\n GLOBAL_ASYNC_QUERIES=True,\n )\n def test_explore_json_async_results_format(self):\n tbl_id = self.table_ids.get(\"birth_names\")\n form_data = {\n \"datasource\": f\"{tbl_id}__table\",\n \"viz_type\": \"dist_bar\",\n \"time_range_endpoints\": [\"inclusive\", \"exclusive\"],\n \"granularity_sqla\": \"ds\",\n \"time_range\": \"No filter\",\n \"metrics\": [\"count\"],\n \"adhoc_filters\": [],\n \"groupby\": [\"gender\"],\n \"row_limit\": 100,\n }\n async_query_manager.init_app(app)\n self.login(username=\"admin\")\n rv = self.client.post(\n \"/superset/explore_json/?results=true\",\n data={\"form_data\": json.dumps(form_data)},\n )\n self.assertEqual(rv.status_code, 200)\n\n @pytest.mark.usefixtures(\"load_birth_names_dashboard_with_slices\")\n @mock.patch(\n \"superset.utils.cache_manager.CacheManager.cache\",\n new_callable=mock.PropertyMock,\n )\n @mock.patch(\"superset.viz.BaseViz.force_cached\", new_callable=mock.PropertyMock)\n def test_explore_json_data(self, mock_force_cached, mock_cache):\n tbl_id = self.table_ids.get(\"birth_names\")\n form_data = dict(\n {\n \"form_data\": {\n \"datasource\": f\"{tbl_id}__table\",\n \"viz_type\": \"dist_bar\",\n \"time_range_endpoints\": [\"inclusive\", \"exclusive\"],\n \"granularity_sqla\": \"ds\",\n \"time_range\": \"No filter\",\n \"metrics\": [\"count\"],\n \"adhoc_filters\": [],\n \"groupby\": [\"gender\"],\n \"row_limit\": 100,\n }\n }\n )\n\n class MockCache:\n def get(self, key):\n return form_data\n\n def set(self):\n return None\n\n mock_cache.return_value = MockCache()\n mock_force_cached.return_value = False\n\n self.login(username=\"admin\")\n rv = self.client.get(\"/superset/explore_json/data/valid-cache-key\")\n data = json.loads(rv.data.decode(\"utf-8\"))\n\n self.assertEqual(rv.status_code, 200)\n self.assertEqual(data[\"rowcount\"], 2)\n\n @mock.patch(\n \"superset.utils.cache_manager.CacheManager.cache\",\n new_callable=mock.PropertyMock,\n )\n def test_explore_json_data_no_login(self, mock_cache):\n tbl_id = self.table_ids.get(\"birth_names\")\n form_data = dict(\n {\n \"form_data\": {\n \"datasource\": f\"{tbl_id}__table\",\n \"viz_type\": \"dist_bar\",\n \"time_range_endpoints\": [\"inclusive\", \"exclusive\"],\n \"granularity_sqla\": \"ds\",\n \"time_range\": \"No filter\",\n \"metrics\": [\"count\"],\n \"adhoc_filters\": [],\n \"groupby\": [\"gender\"],\n \"row_limit\": 100,\n }\n }\n )\n\n class MockCache:\n def get(self, key):\n return form_data\n\n def set(self):\n return None\n\n mock_cache.return_value = MockCache()\n\n rv = self.client.get(\"/superset/explore_json/data/valid-cache-key\")\n self.assertEqual(rv.status_code, 401)\n\n def test_explore_json_data_invalid_cache_key(self):\n self.login(username=\"admin\")\n cache_key = \"invalid-cache-key\"\n rv = self.client.get(f\"/superset/explore_json/data/{cache_key}\")\n data = json.loads(rv.data.decode(\"utf-8\"))\n\n self.assertEqual(rv.status_code, 404)\n self.assertEqual(data[\"error\"], \"Cached data not found\")\n\n @mock.patch(\n \"superset.security.SupersetSecurityManager.get_schemas_accessible_by_user\"\n )\n @mock.patch(\"superset.security.SupersetSecurityManager.can_access_database\")\n @mock.patch(\"superset.security.SupersetSecurityManager.can_access_all_datasources\")\n def test_schemas_access_for_csv_upload_endpoint(\n self,\n mock_can_access_all_datasources,\n mock_can_access_database,\n mock_schemas_accessible,\n ):\n self.login(username=\"admin\")\n dbobj = self.create_fake_db()\n mock_can_access_all_datasources.return_value = False\n mock_can_access_database.return_value = False\n mock_schemas_accessible.return_value = [\"this_schema_is_allowed_too\"]\n data = self.get_json_resp(\n url=\"/superset/schemas_access_for_file_upload?db_id={db_id}\".format(\n db_id=dbobj.id\n )\n )\n assert data == [\"this_schema_is_allowed_too\"]\n self.delete_fake_db()\n\n @pytest.mark.usefixtures(\"load_birth_names_dashboard_with_slices\")\n def test_select_star(self):\n self.login(username=\"admin\")\n examples_db = superset.utils.database.get_example_database()\n resp = self.get_resp(f\"/superset/select_star/{examples_db.id}/birth_names\")\n self.assertIn(\"gender\", resp)\n\n def test_get_select_star_not_allowed(self):\n \"\"\"\n Database API: Test get select star not allowed\n \"\"\"\n self.login(username=\"gamma\")\n example_db = superset.utils.database.get_example_database()\n resp = self.client.get(f\"/superset/select_star/{example_db.id}/birth_names\")\n self.assertEqual(resp.status_code, 403)\n\n @mock.patch(\"superset.views.core.results_backend_use_msgpack\", False)\n @mock.patch(\"superset.views.core.results_backend\")\n def test_display_limit(self, mock_results_backend):\n self.login()\n\n data = [{\"col_0\": i} for i in range(100)]\n payload = {\n \"status\": QueryStatus.SUCCESS,\n \"query\": {\"rows\": 100},\n \"data\": data,\n }\n # limit results to 1\n expected_key = {\"status\": \"success\", \"query\": {\"rows\": 100}, \"data\": data}\n limited_data = data[:1]\n expected_limited = {\n \"status\": \"success\",\n \"query\": {\"rows\": 100},\n \"data\": limited_data,\n \"displayLimitReached\": True,\n }\n\n query_mock = mock.Mock()\n query_mock.sql = \"SELECT *\"\n query_mock.database = 1\n query_mock.schema = \"superset\"\n\n # do not apply msgpack serialization\n use_msgpack = app.config[\"RESULTS_BACKEND_USE_MSGPACK\"]\n app.config[\"RESULTS_BACKEND_USE_MSGPACK\"] = False\n serialized_payload = sql_lab._serialize_payload(payload, False)\n compressed = utils.zlib_compress(serialized_payload)\n mock_results_backend.get.return_value = compressed\n\n with mock.patch(\"superset.views.core.db\") as mock_superset_db:\n mock_superset_db.session.query().filter_by().one_or_none.return_value = (\n query_mock\n )\n # get all results\n result_key = json.loads(self.get_resp(\"/superset/results/key/\"))\n result_limited = json.loads(self.get_resp(\"/superset/results/key/?rows=1\"))\n\n self.assertEqual(result_key, expected_key)\n self.assertEqual(result_limited, expected_limited)\n\n app.config[\"RESULTS_BACKEND_USE_MSGPACK\"] = use_msgpack\n\n def test_results_default_deserialization(self):\n use_new_deserialization = False\n data = [(\"a\", 4, 4.0, \"2019-08-18T16:39:16.660000\")]\n cursor_descr = (\n (\"a\", \"string\"),\n (\"b\", \"int\"),\n (\"c\", \"float\"),\n (\"d\", \"datetime\"),\n )\n db_engine_spec = BaseEngineSpec()\n results = SupersetResultSet(data, cursor_descr, db_engine_spec)\n query = {\n \"database_id\": 1,\n \"sql\": \"SELECT * FROM birth_names LIMIT 100\",\n \"status\": QueryStatus.PENDING,\n }\n (\n serialized_data,\n selected_columns,\n all_columns,\n expanded_columns,\n ) = sql_lab._serialize_and_expand_data(\n results, db_engine_spec, use_new_deserialization\n )\n payload = {\n \"query_id\": 1,\n \"status\": QueryStatus.SUCCESS,\n \"state\": QueryStatus.SUCCESS,\n \"data\": serialized_data,\n \"columns\": all_columns,\n \"selected_columns\": selected_columns,\n \"expanded_columns\": expanded_columns,\n \"query\": query,\n }\n\n serialized_payload = sql_lab._serialize_payload(\n payload, use_new_deserialization\n )\n self.assertIsInstance(serialized_payload, str)\n\n query_mock = mock.Mock()\n deserialized_payload = superset.views.utils._deserialize_results_payload(\n serialized_payload, query_mock, use_new_deserialization\n )\n\n self.assertDictEqual(deserialized_payload, payload)\n query_mock.assert_not_called()\n\n def test_results_msgpack_deserialization(self):\n use_new_deserialization = True\n data = [(\"a\", 4, 4.0, \"2019-08-18T16:39:16.660000\")]\n cursor_descr = (\n (\"a\", \"string\"),\n (\"b\", \"int\"),\n (\"c\", \"float\"),\n (\"d\", \"datetime\"),\n )\n db_engine_spec = BaseEngineSpec()\n results = SupersetResultSet(data, cursor_descr, db_engine_spec)\n query = {\n \"database_id\": 1,\n \"sql\": \"SELECT * FROM birth_names LIMIT 100\",\n \"status\": QueryStatus.PENDING,\n }\n (\n serialized_data,\n selected_columns,\n all_columns,\n expanded_columns,\n ) = sql_lab._serialize_and_expand_data(\n results, db_engine_spec, use_new_deserialization\n )\n payload = {\n \"query_id\": 1,\n \"status\": QueryStatus.SUCCESS,\n \"state\": QueryStatus.SUCCESS,\n \"data\": serialized_data,\n \"columns\": all_columns,\n \"selected_columns\": selected_columns,\n \"expanded_columns\": expanded_columns,\n \"query\": query,\n }\n\n serialized_payload = sql_lab._serialize_payload(\n payload, use_new_deserialization\n )\n self.assertIsInstance(serialized_payload, bytes)\n\n with mock.patch.object(\n db_engine_spec, \"expand_data\", wraps=db_engine_spec.expand_data\n ) as expand_data:\n query_mock = mock.Mock()\n query_mock.database.db_engine_spec.expand_data = expand_data\n\n deserialized_payload = superset.views.utils._deserialize_results_payload(\n serialized_payload, query_mock, use_new_deserialization\n )\n df = results.to_pandas_df()\n payload[\"data\"] = dataframe.df_to_records(df)\n\n self.assertDictEqual(deserialized_payload, payload)\n expand_data.assert_called_once()\n\n @mock.patch.dict(\n \"superset.extensions.feature_flag_manager._feature_flags\",\n {\"FOO\": lambda x: 1},\n clear=True,\n )\n @pytest.mark.usefixtures(\"load_world_bank_dashboard_with_slices\")\n def test_feature_flag_serialization(self):\n \"\"\"\n Functions in feature flags don't break bootstrap data serialization.\n \"\"\"\n self.login()\n\n encoded = json.dumps(\n {\"FOO\": lambda x: 1, \"super\": \"set\"},\n default=utils.pessimistic_json_iso_dttm_ser,\n )\n html_string = (\n html.escape(encoded, quote=False)\n .replace(\"'\", \"&#39;\")\n .replace('\"', \"&#34;\")\n )\n dash_id = db.session.query(Dashboard.id).first()[0]\n tbl_id = self.table_ids.get(\"wb_health_population\")\n urls = [\n \"/superset/sqllab\",\n \"/superset/welcome\",\n f\"/superset/dashboard/{dash_id}/\",\n \"/superset/profile/admin/\",\n f\"/superset/explore/table/{tbl_id}\",\n ]\n for url in urls:\n data = self.get_resp(url)\n self.assertTrue(html_string in data)\n\n @mock.patch.dict(\n \"superset.extensions.feature_flag_manager._feature_flags\",\n {\"SQLLAB_BACKEND_PERSISTENCE\": True},\n clear=True,\n )\n def test_sqllab_backend_persistence_payload(self):\n username = \"admin\"\n self.login(username)\n user_id = security_manager.find_user(username).id\n\n # create a tab\n data = {\n \"queryEditor\": json.dumps(\n {\n \"title\": \"Untitled Query 1\",\n \"dbId\": 1,\n \"schema\": None,\n \"autorun\": False,\n \"sql\": \"SELECT ...\",\n \"queryLimit\": 1000,\n }\n )\n }\n resp = self.get_json_resp(\"/tabstateview/\", data=data)\n tab_state_id = resp[\"id\"]\n\n # run a query in the created tab\n self.run_sql(\n \"SELECT name FROM birth_names\",\n \"client_id_1\",\n user_name=username,\n raise_on_error=True,\n sql_editor_id=str(tab_state_id),\n )\n # run an orphan query (no tab)\n self.run_sql(\n \"SELECT name FROM birth_names\",\n \"client_id_2\",\n user_name=username,\n raise_on_error=True,\n )\n\n # we should have only 1 query returned, since the second one is not\n # associated with any tabs\n payload = views.Superset._get_sqllab_tabs(user_id=user_id)\n self.assertEqual(len(payload[\"queries\"]), 1)\n\n def test_virtual_table_explore_visibility(self):\n # test that default visibility it set to True\n database = superset.utils.database.get_example_database()\n self.assertEqual(database.allows_virtual_table_explore, True)\n\n # test that visibility is disabled when extra is set to False\n extra = database.get_extra()\n extra[\"allows_virtual_table_explore\"] = False\n database.extra = json.dumps(extra)\n self.assertEqual(database.allows_virtual_table_explore, False)\n\n # test that visibility is enabled when extra is set to True\n extra = database.get_extra()\n extra[\"allows_virtual_table_explore\"] = True\n database.extra = json.dumps(extra)\n self.assertEqual(database.allows_virtual_table_explore, True)\n\n # test that visibility is not broken with bad values\n extra = database.get_extra()\n extra[\"allows_virtual_table_explore\"] = \"trash value\"\n database.extra = json.dumps(extra)\n self.assertEqual(database.allows_virtual_table_explore, True)\n\n def test_explore_database_id(self):\n database = superset.utils.database.get_example_database()\n explore_database = superset.utils.database.get_example_database()\n\n # test that explore_database_id is the regular database\n # id if none is set in the extra\n self.assertEqual(database.explore_database_id, database.id)\n\n # test that explore_database_id is correct if the extra is set\n extra = database.get_extra()\n extra[\"explore_database_id\"] = explore_database.id\n database.extra = json.dumps(extra)\n self.assertEqual(database.explore_database_id, explore_database.id)\n\n def test_get_column_names_from_metric(self):\n simple_metric = {\n \"expressionType\": utils.AdhocMetricExpressionType.SIMPLE.value,\n \"column\": {\"column_name\": \"my_col\"},\n \"aggregate\": \"SUM\",\n \"label\": \"My Simple Label\",\n }\n assert utils.get_column_name_from_metric(simple_metric) == \"my_col\"\n\n sql_metric = {\n \"expressionType\": utils.AdhocMetricExpressionType.SQL.value,\n \"sqlExpression\": \"SUM(my_label)\",\n \"label\": \"My SQL Label\",\n }\n assert utils.get_column_name_from_metric(sql_metric) is None\n assert utils.get_column_names_from_metrics([simple_metric, sql_metric]) == [\n \"my_col\"\n ]\n\n @pytest.mark.usefixtures(\"load_world_bank_dashboard_with_slices\")\n @mock.patch(\"superset.models.core.DB_CONNECTION_MUTATOR\")\n def test_explore_injected_exceptions(self, mock_db_connection_mutator):\n \"\"\"\n Handle injected exceptions from the db mutator\n \"\"\"\n # Assert we can handle a custom exception at the mutator level\n exception = SupersetException(\"Error message\")\n mock_db_connection_mutator.side_effect = exception\n slice = db.session.query(Slice).first()\n url = f\"/superset/explore/?form_data=%7B%22slice_id%22%3A%20{slice.id}%7D\"\n\n self.login()\n data = self.get_resp(url)\n self.assertIn(\"Error message\", data)\n\n # Assert we can handle a driver exception at the mutator level\n exception = SQLAlchemyError(\"Error message\")\n mock_db_connection_mutator.side_effect = exception\n slice = db.session.query(Slice).first()\n url = f\"/superset/explore/?form_data=%7B%22slice_id%22%3A%20{slice.id}%7D\"\n\n self.login()\n data = self.get_resp(url)\n self.assertIn(\"Error message\", data)\n\n @pytest.mark.usefixtures(\"load_world_bank_dashboard_with_slices\")\n @mock.patch(\"superset.models.core.DB_CONNECTION_MUTATOR\")\n def test_dashboard_injected_exceptions(self, mock_db_connection_mutator):\n \"\"\"\n Handle injected exceptions from the db mutator\n \"\"\"\n\n # Assert we can handle a custom excetion at the mutator level\n exception = SupersetException(\"Error message\")\n mock_db_connection_mutator.side_effect = exception\n dash = db.session.query(Dashboard).first()\n url = f\"/superset/dashboard/{dash.id}/\"\n\n self.login()\n data = self.get_resp(url)\n self.assertIn(\"Error message\", data)\n\n # Assert we can handle a driver exception at the mutator level\n exception = SQLAlchemyError(\"Error message\")\n mock_db_connection_mutator.side_effect = exception\n dash = db.session.query(Dashboard).first()\n url = f\"/superset/dashboard/{dash.id}/\"\n\n self.login()\n data = self.get_resp(url)\n self.assertIn(\"Error message\", data)\n\n @mock.patch(\"superset.sql_lab.cancel_query\")\n @mock.patch(\"superset.views.core.db.session\")\n def test_stop_query_not_implemented(\n self, mock_superset_db_session, mock_sql_lab_cancel_query\n ):\n \"\"\"\n Handles stop query when the DB engine spec does not\n have a cancel query method.\n \"\"\"\n form_data = {\"client_id\": \"foo\"}\n query_mock = mock.Mock()\n query_mock.client_id = \"foo\"\n query_mock.status = QueryStatus.RUNNING\n self.login(username=\"admin\")\n mock_superset_db_session.query().filter_by().one().return_value = query_mock\n mock_sql_lab_cancel_query.return_value = False\n rv = self.client.post(\n \"/superset/stop_query/\", data={\"form_data\": json.dumps(form_data)},\n )\n\n assert rv.status_code == 422\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "pandas.Timestamp", "pandas.DataFrame" ] ]
zierenberg/machine_learning_muca
[ "6fcca12ccda7680ea4cb0e1f10bb53a68b6b0a02" ]
[ "2019_noe_deep_boltzmann_tfv2/deep_boltzmann/networks/noninvertible.py" ]
[ "import keras\nimport tensorflow as tf\nimport numpy as np\n\nfrom deep_boltzmann.networks import nonlinear_transform\nfrom deep_boltzmann.networks import connect as _connect\n\nclass NormalTransformer(object):\n\n def __init__(self, mu_layers, sigma_layers):\n self.mu_layers = mu_layers\n self.sigma_layers = sigma_layers\n\n def _compute_x1(self, mu, log_sigma, w1):\n return mu + tf.exp(log_sigma) * w1\n\n def _compute_log_p1(self, mu, log_sigma, x1):\n return -tf.reduce_sum(input_tensor=log_sigma, axis=1) - 0.5 * tf.reduce_sum(input_tensor=((x1 - mu)/(tf.exp(log_sigma)))**2, axis=1)\n\n def connect(self, x0, w1):\n # evaluate mu and sigma\n mu = _connect(x0, self.mu_layers)\n log_sigma = _connect(x0, self.sigma_layers)\n # transform x\n #x1 = mu + sigma * w0\n self.x1 = keras.layers.Lambda(lambda args: self._compute_x1(args[0], args[1], args[2]))([mu, log_sigma, w1])\n # compute density\n #log_p1 = -tf.reduce_sum(sigma, axis=0) - 0.5 * tf.reduce_sum((self.x1 - mu)/sigma, axis=0)\n self.log_p1 = keras.layers.Lambda(lambda args: self._compute_log_p1(args[0], args[1], args[2]))([mu, log_sigma, self.x1])\n # return variable and density\n return self.x1, self.log_p1\n\nclass NormalResidualTransformer(object):\n\n def __init__(self, mu_layers, sigma_layers):\n self.mu_layers = mu_layers\n self.sigma_layers = sigma_layers\n\n def _compute_x1(self, x0, mu, log_sigma, w1):\n return x0 + mu + tf.exp(log_sigma) * w1\n\n def _compute_log_p1(self, x0, mu, log_sigma, x1):\n return -tf.reduce_sum(input_tensor=log_sigma, axis=1) - 0.5 * tf.reduce_sum(input_tensor=((x1 - x0 - mu)/(tf.exp(log_sigma)))**2, axis=1)\n\n def connect(self, x0, w1):\n # evaluate mu and sigma\n mu = _connect(x0, self.mu_layers)\n log_sigma = _connect(x0, self.sigma_layers)\n # transform x\n #x1 = mu + sigma * w0\n self.x1 = keras.layers.Lambda(lambda args: self._compute_x1(args[0], args[1], args[2], args[3]))([x0, mu, log_sigma, w1])\n # compute density\n #log_p1 = -tf.reduce_sum(sigma, axis=0) - 0.5 * tf.reduce_sum((self.x1 - mu)/sigma, axis=0)\n self.log_p1 = keras.layers.Lambda(lambda args: self._compute_log_p1(args[0], args[1], args[2], args[3]))([x0, mu, log_sigma, self.x1])\n # return variable and density\n return self.x1, self.log_p1\n\nclass NoninvNet(object):\n def __init__(self, dim, layers):\n self.dim = dim\n self.layers = layers\n self.log_p_total = None\n\n def connect(self):\n # x0 = 0\n self.x0 = keras.layers.Input(shape=(self.dim,)) # current noise input\n x_last = self.x0\n\n self.xs = []\n self.ws = []\n self.log_ps = []\n for layer in self.layers:\n # noise input\n w = keras.layers.Input(shape=(self.dim,)) # current noise input\n self.ws.append(w)\n # compute x and probability\n x, log_p = layer.connect(x_last, w)\n self.xs.append(x) # new state\n self.log_ps.append(log_p) # conditional generation probability\n # update x_last\n x_last = x\n # output\n self.x_out = self.xs[-1]\n # total probability\n self.log_p_total = keras.layers.Lambda(lambda arg: tf.reduce_sum(input_tensor=arg, axis=0))(self.log_ps)\n\n\n def log_probability(self):\n \"\"\" Computes the total log probability of the current sample\"\"\"\n return tf.reduce_sum(input_tensor=self.log_ps, axis=0)\n\n\ndef normal_transnet(dim, nlayers, mu_shape=(100, 100), mu_activation='relu',\n sigma_shape=(100, 100), sigma_activation='tanh', residual=False,\n **layer_args):\n \"\"\"\n dim : int\n Dimension of variables\n nlayers : int\n Number of layers in the transformer\n mu_shape : int\n Number of hidden units in each nonlinear layer\n mu_activation : str\n Hidden-neuron activation functions used in the nonlinear layers\n sigma_shape : int\n Number of hidden units in each nonlinear layer\n sigma_activation : str\n Hidden-neuron activation functions used in the nonlinear layers\n\n \"\"\"\n layers = []\n for l in range(nlayers):\n mu_net = nonlinear_transform(dim, nlayers=len(mu_shape)+1, nhidden=mu_shape,\n activation=mu_activation, **layer_args)\n sigma_net = nonlinear_transform(dim, nlayers=len(sigma_shape)+1, nhidden=sigma_shape,\n activation=sigma_activation, init_outputs=0, **layer_args)\n if residual:\n layer = NormalResidualTransformer(mu_net, sigma_net)\n else:\n layer = NormalTransformer(mu_net, sigma_net)\n layers.append(layer)\n ninvnet = NoninvNet(dim, layers)\n ninvnet.connect()\n return ninvnet\n" ]
[ [ "tensorflow.reduce_sum", "tensorflow.exp" ] ]
CKPalk/SeattleCrime_DM
[ "0bfbf597ef7c4e87a4030e1c03f62b2f4c9f3c5b" ]
[ "DataMining/Stats/coord_bounds.py" ]
[ "''' Work of Cameron Palk '''\n\nimport sys\nimport pandas as pd\n\ndef main( argv ):\n\ttry:\n\t\tcsv_filepath \t= argv[ 0 ]\n\t\toutput_filepath = argv[ 1 ]\n\texcept IndexError:\n\t\tprint( \"Error, usage: \\\"python3 coord_bounds.py <CSV> <output_file>\\\"\" ) \n\t\treturn\n\t\n\ttraining_data = pd.read_csv( csv_filepath )\n\n\ttraining_data[ 'clean_Latitude' ] = training_data[ training_data.Latitude > 47 ].Latitude\n\ttraining_data[ 'clean_Longitude' ] = training_data[ training_data.Longitude < -122 ].Longitude\n\n\ttraining_data.dropna()\n\n\tprint( training_data[ 'clean_Latitude' ] )\n\n\tfor axis in [ 'clean_Longitude', 'clean_Latitude' ]:\n\t\tprint( \"{:16} min: {:16} max: {:16}\".format( \n\t\t\taxis,\n\t\t\tmin( training_data[ axis ] ), \n\t\t\tmax( training_data[ axis ] ) \n\t\t) )\n\n\t#\n\nif __name__=='__main__':\n\tmain( sys.argv[ 1: ] )\n" ]
[ [ "pandas.read_csv" ] ]
KhalilBryant/PlasmaPy
[ "05f7cb60348c7048fb3b8fbaf25985f2fba47fb7" ]
[ "plasmapy/utils/roman/tests/test_roman.py" ]
[ "import pytest\nimport numpy as np\nimport plasmapy.utils.roman as roman\nfrom plasmapy.utils.pytest_helpers import run_test\n\n\nints_and_roman_numerals = [\n (1, \"I\"),\n (2, \"II\"),\n (3, \"III\"),\n (4, \"IV\"),\n (5, \"V\"),\n (6, \"VI\"),\n (7, \"VII\"),\n (8, \"VIII\"),\n (9, \"IX\"),\n (10, \"X\"),\n (11, \"XI\"),\n (12, \"XII\"),\n (13, \"XIII\"),\n (14, \"XIV\"),\n (15, \"XV\"),\n (16, \"XVI\"),\n (17, \"XVII\"),\n (18, \"XVIII\"),\n (19, \"XIX\"),\n (20, \"XX\"),\n (21, \"XXI\"),\n (22, \"XXII\"),\n (23, \"XXIII\"),\n (24, \"XXIV\"),\n (25, \"XXV\"),\n (26, \"XXVI\"),\n (27, \"XXVII\"),\n (28, \"XXVIII\"),\n (29, \"XXIX\"),\n (30, \"XXX\"),\n (31, \"XXXI\"),\n (32, \"XXXII\"),\n (33, \"XXXIII\"),\n (34, \"XXXIV\"),\n (35, \"XXXV\"),\n (36, \"XXXVI\"),\n (37, \"XXXVII\"),\n (38, \"XXXVIII\"),\n (39, \"XXXIX\"),\n (40, \"XL\"),\n (41, \"XLI\"),\n (42, \"XLII\"),\n (43, \"XLIII\"),\n (44, \"XLIV\"),\n (45, \"XLV\"),\n (46, \"XLVI\"),\n (47, \"XLVII\"),\n (48, \"XLVIII\"),\n (49, \"XLIX\"),\n (50, \"L\"),\n (51, \"LI\"),\n (52, \"LII\"),\n (53, \"LIII\"),\n (54, \"LIV\"),\n (55, \"LV\"),\n (56, \"LVI\"),\n (57, \"LVII\"),\n (58, \"LVIII\"),\n (59, \"LIX\"),\n (60, \"LX\"),\n (61, \"LXI\"),\n (62, \"LXII\"),\n (63, \"LXIII\"),\n (64, \"LXIV\"),\n (65, \"LXV\"),\n (66, \"LXVI\"),\n (67, \"LXVII\"),\n (68, \"LXVIII\"),\n (69, \"LXIX\"),\n (70, \"LXX\"),\n (71, \"LXXI\"),\n (72, \"LXXII\"),\n (73, \"LXXIII\"),\n (74, \"LXXIV\"),\n (75, \"LXXV\"),\n (76, \"LXXVI\"),\n (77, \"LXXVII\"),\n (78, \"LXXVIII\"),\n (79, \"LXXIX\"),\n (80, \"LXXX\"),\n (81, \"LXXXI\"),\n (82, \"LXXXII\"),\n (83, \"LXXXIII\"),\n (84, \"LXXXIV\"),\n (85, \"LXXXV\"),\n (86, \"LXXXVI\"),\n (87, \"LXXXVII\"),\n (88, \"LXXXVIII\"),\n (89, \"LXXXIX\"),\n (90, \"XC\"),\n (91, \"XCI\"),\n (92, \"XCII\"),\n (93, \"XCIII\"),\n (94, \"XCIV\"),\n (95, \"XCV\"),\n (96, \"XCVI\"),\n (97, \"XCVII\"),\n (98, \"XCVIII\"),\n (99, \"XCIX\"),\n (100, \"C\"),\n (101, \"CI\"),\n (102, \"CII\"),\n (103, \"CIII\"),\n (104, \"CIV\"),\n (105, \"CV\"),\n (106, \"CVI\"),\n (107, \"CVII\"),\n (108, \"CVIII\"),\n (109, \"CIX\"),\n (110, \"CX\"),\n (111, \"CXI\"),\n (112, \"CXII\"),\n (113, \"CXIII\"),\n (114, \"CXIV\"),\n (115, \"CXV\"),\n (116, \"CXVI\"),\n (117, \"CXVII\"),\n (118, \"CXVIII\"),\n (119, \"CXIX\"),\n (120, \"CXX\"),\n (121, \"CXXI\"),\n (122, \"CXXII\"),\n (188, \"CLXXXVIII\"),\n (189, \"CLXXXIX\"),\n (198, \"CXCVIII\"),\n (199, \"CXCIX\"),\n (200, \"CC\"),\n (np.int(9), \"IX\"),\n (np.int16(10), \"X\"),\n (np.int32(11), \"XI\"),\n (np.int64(14), \"XIV\"),\n]\n\ntoRoman_exceptions_table = [\n (\"X\", TypeError),\n (-1, roman.OutOfRangeError),\n (0, roman.OutOfRangeError),\n (5000, roman.OutOfRangeError),\n]\n\nfromRoman_exceptions_table = [\n (\"asdfasd\", roman.InvalidRomanNumeralError),\n (1, TypeError),\n (\"xi\", roman.InvalidRomanNumeralError),\n]\n\n\[email protected](\"integer, roman_numeral\", ints_and_roman_numerals)\ndef test_to_roman(integer, roman_numeral):\n \"\"\"\n Test that `~plasmapy.utils.roman.to_roman` correctly converts\n integers to Roman numerals.\n \"\"\"\n run_test(func=roman.to_roman, args=integer, expected_outcome=roman_numeral)\n\n\[email protected](\"integer, roman_numeral\", ints_and_roman_numerals)\ndef test_from_roman(integer, roman_numeral):\n \"\"\"\n Test that `~plasmapy.utils.roman.from_roman` correctly converts\n Roman numerals to integers.\n \"\"\"\n run_test(func=roman.from_roman, args=roman_numeral, expected_outcome=int(integer))\n\n\[email protected](\"input, expected_exception\", toRoman_exceptions_table)\ndef test_to_roman_exceptions(input, expected_exception):\n \"\"\"\n Test that `~plasmapy.utils.roman.to_roman` raises the correct\n exceptions when necessary.\n \"\"\"\n run_test(func=roman.to_roman, args=input, expected_outcome=expected_exception)\n\n\[email protected](\"input, expected_exception\", fromRoman_exceptions_table)\ndef test_from_roman_exceptions(input, expected_exception):\n \"\"\"\n Test that `~plasmapy.utils.roman.from_roman` raises the correct\n exceptions when necessary.\n \"\"\"\n run_test(func=roman.from_roman, args=input, expected_outcome=expected_exception)\n\n\ntest_is_roman_numeral_table = [\n (\"I\", True),\n (\"i\", False),\n (\"CLXXXVIII\", True),\n (1, TypeError),\n (\"khjfda\", False),\n (\"VIIII\", False),\n (\"IXX\", False),\n ((\"I\", \"II\"), TypeError),\n]\n\n\[email protected](\"input, expected\", test_is_roman_numeral_table)\ndef test_is_roman_numeral(input, expected):\n run_test(func=roman.is_roman_numeral, args=input, expected_outcome=expected)\n" ]
[ [ "numpy.int", "numpy.int32", "numpy.int64", "numpy.int16" ] ]
yanzhoupan/dlrm_ssm
[ "49ca1e4487ff0e148065c0a133acb078835a9b86" ]
[ "tricks/lsh_pp_pretaining.py" ]
[ "# data preprocessing for LSH embedding\nimport numpy as np\nimport torch\nfrom min_hash_generator import SparseBitVectorMinHashGenerator\nfrom collections import defaultdict\n# import multiprocessing\nfrom tqdm import tqdm\nimport time\nimport random\nimport concurrent.futures\nimport pdb\n\nseed = 123\nrandom.seed(seed)\nnp.random.seed(seed)\ntorch.manual_seed(seed)\n\n# use partial data set to get minhash table.\nmin_hash_gen = None\nval_indices = None\n\nimport sys\n\nif len(sys.argv) <=1:\n print(\"Usage: <script> embedding hash num_pt\")\n assert(False)\nEMBEDDING = int(sys.argv[1])\nNUM_HASH = int(sys.argv[2])\nNUM_PT = int(sys.argv[3])\nprint(\"EMB:\",EMBEDDING, \"NUMH\",NUM_HASH, \"NUM_PT\",NUM_PT)\n\ndef compute(start, end):\n global min_hash_table\n p_min_hash_table = np.zeros((end-start, EMBEDDING))\n for val_id in range(start, end):\n p_min_hash_table[val_id-start] = min_hash_gen.generate(val_indices[val_id])\n\n return start,end ,p_min_hash_table\n\ndef getBigMinHashTable():\n global min_hash_gen, min_hash_table, val_indices\n data = np.load('./input/kaggleAdDisplayChallenge_processed.npz')\n data_num, cat_num = data[\"X_cat\"].shape # (45840617, 26) for criteo\n partial_idx = np.random.choice(np.arange(data_num), size=NUM_PT, replace=False)\n partial_cat_data = data['X_cat'][partial_idx]\n print(partial_cat_data.shape)\n\n start_time = time.time()\n np.savez(r'./cat_counts.npz', cat_counts = data['counts'])\n\n base = 0\n val_indices = defaultdict(lambda:[])\n # generate signiture matrix for category values (partial data)\n for fea_id in tqdm(range(cat_num)):\n cat_fea = partial_cat_data[:, fea_id]\n \n for doc_id in range(len(cat_fea)): # loop over docs\n val_indices[cat_fea[doc_id] + base].append(doc_id)\n\n for val in range(data['counts'][fea_id]):\n if val_indices[val+base] == []: \n val_indices[val+base] = [random.randint(0, data_num+1)] # set val_indices to a random place if never seen it\n base += data['counts'][fea_id]\n \n embedding_dim = EMBEDDING\n min_hash_table = np.zeros((len(val_indices), embedding_dim))\n input_size = len(cat_fea) # number of the data items\n min_hash_gen = SparseBitVectorMinHashGenerator(input_size, embedding_dim, NUM_HASH)\n\n batch_size=1000\n with concurrent.futures.ProcessPoolExecutor(50) as executor:\n print(\"submitting jobs\")\n futures = []\n print (\"total\", len(val_indices))\n total = len(val_indices)\n num_batches = int(np.ceil(len(val_indices) / batch_size))\n for i in tqdm(range(num_batches)):\n start = i * batch_size\n end = min(total, start + batch_size)\n if end > start:\n futures.append(executor.submit(compute, start, end))\n #compute(start, end)\n ip = 0\n for res in tqdm(concurrent.futures.as_completed(futures), total = num_batches):\n st,ed,output = res.result()\n ip = ip + 1\n min_hash_table[st:ed,:] = output\n #print(st, ed, np.sum(min_hash_table[st:ed]))\n np.savez(r'./input/bigMinHashTable_H'+ str(NUM_HASH) + '_E' + str(EMBEDDING)+ '_P' + str(NUM_PT) + '.npz', big_min_hash_table = min_hash_table.astype(int))\n\n end_time = time.time()\n print(end_time - start_time)\n\n\nif __name__ == \"__main__\":\n # getMinHashTable()\n getBigMinHashTable()\n # bigMinHashTable = np.load('./input/bigMinHashTable.npz')\n # minHashTables = np.load('./input/minHashTables.npz')\n # print(len(minHashTables['arr_0'][:, 0]))\n # print(len(bigMinHashTable['big_min_hash_table'][:, 0]))\n" ]
[ [ "numpy.savez", "numpy.random.seed", "torch.manual_seed", "numpy.arange", "numpy.load", "numpy.zeros" ] ]
pulkit1joshi/SimGNN
[ "199b6014482a1dc8719394de4fc17f03c1b7192c" ]
[ "src/simgnn.py" ]
[ "from tensorflow import keras\nfrom tensorflow.keras import layers\nfrom keras_gcn import GraphConv\nfrom keras.models import Model\nfrom keras.layers import Input\nfrom custom_layers import Attention, NeuralTensorLayer\n\"\"\" \nMain model : Node-to-Node interaction not implemented.\nFunctional API :\nShared layers are shared_gcn1, shared_gcn2, shard_gcn3, shared_attention\n\"\"\"\ndef simgnn(parser):\n inputA = Input(shape=(None,16))\n GinputA = Input(shape=(None,None))\n inputB = Input(shape=(None,16))\n GinputB = Input(shape=(None,None))\n \n shared_gcn1 = GraphConv(units=parser.filters_1,step_num=3, activation=\"relu\")\n shared_gcn2 = GraphConv(units=parser.filters_2,step_num=3, activation=\"relu\")\n shared_gcn3 = GraphConv(units=parser.filters_3,step_num=3, activation=\"relu\")\n shared_attention = Attention(parser)\n\n x = shared_gcn1([inputA, GinputA])\n x = shared_gcn2([x, GinputA])\n x = shared_gcn3([x, GinputA])\n x = shared_attention(x[0])\n\n y = shared_gcn1([inputB, GinputB])\n y = shared_gcn2([y, GinputB])\n y = shared_gcn3([y, GinputB])\n y = shared_attention(y[0])\n\n z = NeuralTensorLayer(output_dim=16, input_dim=16)([x, y])\n z = keras.layers.Dense(16, activation=\"relu\")(z)\n z = keras.layers.Dense(8, activation=\"relu\")(z)\n z = keras.layers.Dense(4, activation=\"relu\")(z)\n z = keras.layers.Dense(1)(z)\n z = keras.activations.sigmoid(z)\n\n return Model(inputs=[inputA, GinputA, inputB, GinputB], outputs=z)" ]
[ [ "tensorflow.keras.layers.Dense", "tensorflow.keras.activations.sigmoid" ] ]
iejMac/TTTArena
[ "056636f064769c3251fb2448e7487b4fa8394733" ]
[ "random/agent.py" ]
[ "from agent import Agent\r\nfrom numpy.random import randint\r\n\r\nclass RandomAgent(Agent):\r\n def __init__(self, name):\r\n super().__init__(name)\r\n\r\n def make_action(self, state):\r\n movex = randint(0, state.shape[1])\r\n movey = randint(0, state.shape[0])\r\n return (movey, movex)\r\n\r\n\r\n" ]
[ [ "numpy.random.randint" ] ]
AsianHam/geoclaw
[ "b5f9ee8cd6e64d107ba8bba1e6d588aa7bf6d417" ]
[ "examples/tsunami/eta_init_force_dry/setrun.py" ]
[ "\"\"\"\nModule to set up run time parameters for Clawpack.\n\nThe values set in the function setrun are then written out to data files\nthat will be read in by the Fortran code.\n\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nimport os, sys\nimport numpy as np\n\n\ntry:\n CLAW = os.environ['CLAW']\nexcept:\n raise Exception(\"*** Must first set CLAW enviornment variable\")\n\nfrom clawpack.geoclaw.data import ForceDry\nfrom clawpack.amrclaw.data import FlagRegion\n\n\n#------------------------------\ndef setrun(claw_pkg='geoclaw'):\n#------------------------------\n\n \"\"\"\n Define the parameters used for running Clawpack.\n\n INPUT:\n claw_pkg expected to be \"geoclaw\" for this setrun.\n\n OUTPUT:\n rundata - object of class ClawRunData\n\n \"\"\"\n\n from clawpack.clawutil import data\n\n assert claw_pkg.lower() == 'geoclaw', \"Expected claw_pkg = 'geoclaw'\"\n\n num_dim = 2\n rundata = data.ClawRunData(claw_pkg, num_dim)\n\n\n #------------------------------------------------------------------\n # Problem-specific parameters to be written to setprob.data:\n #------------------------------------------------------------------\n \n #probdata = rundata.new_UserData(name='probdata',fname='setprob.data')\n #probdata.add_param('variable_eta_init', True) # now in qinit info\n\n\n #------------------------------------------------------------------\n # Standard Clawpack parameters to be written to claw.data:\n # (or to amr2ez.data for AMR)\n #------------------------------------------------------------------\n clawdata = rundata.clawdata # initialized when rundata instantiated\n\n\n # Set single grid parameters first.\n # See below for AMR parameters.\n\n\n # ---------------\n # Spatial domain:\n # ---------------\n\n # Number of space dimensions:\n clawdata.num_dim = num_dim\n\n\n\n # Lower and upper edge of computational domain:\n # x values should be integer multipes of 1/3\"\n # y values should be integer multipes of 1/3\"\n # Note: always satisfied if limits are multiples of 0.01 degree\n\n arcsec16 = 1./(6*3600.)\n\n # choose domain and offset edges by half a 1/3\" cell so\n # cell centers are exactly at DEM grid points:\n\n clawdata.lower[0] = -1.9 - arcsec16 # west longitude\n clawdata.upper[0] = 0.1 - arcsec16 # east longitude\n\n clawdata.lower[1] = -1.9 - arcsec16 # south latitude\n clawdata.upper[1] = 1.9 - arcsec16 # north latitude\n\n # choose mx and my so coarsest grid has 2 minute resolution:\n clawdata.num_cells[0] = 60\n clawdata.num_cells[1] = 114\n\n\n # ---------------\n # Size of system:\n # ---------------\n\n # Number of equations in the system:\n clawdata.num_eqn = 3\n\n # Number of auxiliary variables in the aux array (initialized in setaux)\n clawdata.num_aux = 3\n\n # Index of aux array corresponding to capacity function, if there is one:\n clawdata.capa_index = 2\n\n \n \n # -------------\n # Initial time:\n # -------------\n\n clawdata.t0 = 0.0\n\n\n # Restart from checkpoint file of a previous run?\n # If restarting, t0 above should be from original run, and the\n # restart_file 'fort.chkNNNNN' specified below should be in \n # the OUTDIR indicated in Makefile.\n\n clawdata.restart = False # True to restart from prior results\n clawdata.restart_file = ''\n\n # -------------\n # Output times:\n #--------------\n\n # Specify at what times the results should be written to fort.q files.\n # Note that the time integration stops after the final output time.\n # The solution at initial time t0 is always written in addition.\n\n clawdata.output_style = 1\n\n if clawdata.output_style==1:\n # Output nout frames at equally spaced times up to tfinal:\n clawdata.num_output_times = 15\n clawdata.tfinal = 30*60.\n clawdata.output_t0 = True # output at initial (or restart) time?\n\n elif clawdata.output_style == 2:\n # Specify a list of output times.\n clawdata.output_times = [0.5, 1.0]\n\n elif clawdata.output_style == 3:\n # Output every iout timesteps with a total of ntot time steps:\n clawdata.output_step_interval = 1\n clawdata.total_steps = 20\n clawdata.output_t0 = True\n \n\n clawdata.output_format = 'binary'\n\n clawdata.output_q_components = 'all' # need all\n clawdata.output_aux_components = 'none' # eta=h+B is in q\n clawdata.output_aux_onlyonce = False # output aux arrays each frame\n\n\n\n # ---------------------------------------------------\n # Verbosity of messages to screen during integration:\n # ---------------------------------------------------\n\n # The current t, dt, and cfl will be printed every time step\n # at AMR levels <= verbosity. Set verbosity = 0 for no printing.\n # (E.g. verbosity == 2 means print only on levels 1 and 2.)\n clawdata.verbosity = 1\n\n\n\n # --------------\n # Time stepping:\n # --------------\n\n # if dt_variable==1: variable time steps used based on cfl_desired,\n # if dt_variable==0: fixed time steps dt = dt_initial will always be used.\n clawdata.dt_variable = True\n\n # Initial time step for variable dt.\n # If dt_variable==0 then dt=dt_initial for all steps:\n clawdata.dt_initial = 0.2\n\n # Max time step to be allowed if variable dt used:\n clawdata.dt_max = 1e+99\n\n # Desired Courant number if variable dt used, and max to allow without\n # retaking step with a smaller dt:\n clawdata.cfl_desired = 0.8 \n clawdata.cfl_max = 1.0\n\n # Maximum number of time steps to allow between output times:\n clawdata.steps_max = 5000\n\n\n\n\n # ------------------\n # Method to be used:\n # ------------------\n\n # Order of accuracy: 1 => Godunov, 2 => Lax-Wendroff plus limiters\n clawdata.order = 2\n \n # Use dimensional splitting? (not yet available for AMR)\n clawdata.dimensional_split = 'unsplit'\n \n # For unsplit method, transverse_waves can be \n # 0 or 'none' ==> donor cell (only normal solver used)\n # 1 or 'increment' ==> corner transport of waves\n # 2 or 'all' ==> corner transport of 2nd order corrections too\n clawdata.transverse_waves = 2\n\n # Number of waves in the Riemann solution:\n clawdata.num_waves = 3\n \n # List of limiters to use for each wave family: \n # Required: len(limiter) == num_waves\n # Some options:\n # 0 or 'none' ==> no limiter (Lax-Wendroff)\n # 1 or 'minmod' ==> minmod\n # 2 or 'superbee' ==> superbee\n # 3 or 'mc' ==> MC limiter\n # 4 or 'vanleer' ==> van Leer\n clawdata.limiter = ['mc', 'mc', 'mc']\n\n clawdata.use_fwaves = True # True ==> use f-wave version of algorithms\n \n # Source terms splitting:\n # src_split == 0 or 'none' ==> no source term (src routine never called)\n # src_split == 1 or 'godunov' ==> Godunov (1st order) splitting used, \n # src_split == 2 or 'strang' ==> Strang (2nd order) splitting used, not recommended.\n clawdata.source_split = 'godunov'\n\n\n # --------------------\n # Boundary conditions:\n # --------------------\n\n # Number of ghost cells (usually 2)\n clawdata.num_ghost = 2\n\n # Choice of BCs at xlower and xupper:\n # 0 => user specified (must modify bcN.f to use this option)\n # 1 => extrapolation (non-reflecting outflow)\n # 2 => periodic (must specify this at both boundaries)\n # 3 => solid wall for systems where q(2) is normal velocity\n\n clawdata.bc_lower[0] = 'extrap'\n clawdata.bc_upper[0] = 'extrap'\n\n clawdata.bc_lower[1] = 'extrap'\n clawdata.bc_upper[1] = 'extrap'\n\n\n\n # --------------\n # Checkpointing:\n # --------------\n\n # Specify when checkpoint files should be created that can be\n # used to restart a computation.\n\n # negative checkpoint_style means alternate between aaaaa and bbbbb files\n # so that at most 2 checkpoint files exist at any time, useful when\n # doing frequent checkpoints of large problems.\n\n clawdata.checkpt_style = 0\n\n if clawdata.checkpt_style == 0:\n # Do not checkpoint at all\n pass\n\n elif clawdata.checkpt_style == 1:\n # Checkpoint only at tfinal.\n pass\n\n elif abs(clawdata.checkpt_style) == 2:\n # Specify a list of checkpoint times. \n clawdata.checkpt_times = 3600.*np.arange(1,16,1)\n\n elif abs(clawdata.checkpt_style) == 3:\n # Checkpoint every checkpt_interval timesteps (on Level 1)\n # and at the final time.\n clawdata.checkpt_interval = 5\n\n\n # ---------------\n # AMR parameters:\n # ---------------\n amrdata = rundata.amrdata\n\n # max number of refinement levels:\n amrdata.amr_levels_max = 4\n\n # List of refinement ratios at each level (length at least mxnest-1)\n # dx = dy = 2', 10\", 2\", 1/3\":\n amrdata.refinement_ratios_x = [12,5,6]\n amrdata.refinement_ratios_y = [12,5,6]\n amrdata.refinement_ratios_t = [12,5,6]\n\n\n\n # Specify type of each aux variable in amrdata.auxtype.\n # This must be a list of length maux, each element of which is one of:\n # 'center', 'capacity', 'xleft', or 'yleft' (see documentation).\n\n amrdata.aux_type = ['center','capacity','yleft']\n\n\n # Flag using refinement routine flag2refine rather than richardson error\n amrdata.flag_richardson = False # use Richardson?\n amrdata.flag2refine = True\n\n # steps to take on each level L between regriddings of level L+1:\n amrdata.regrid_interval = 3\n\n # width of buffer zone around flagged points:\n # (typically the same as regrid_interval so waves don't escape):\n amrdata.regrid_buffer_width = 2\n\n # clustering alg. cutoff for (# flagged pts) / (total # of cells refined)\n # (closer to 1.0 => more small grids may be needed to cover flagged cells)\n amrdata.clustering_cutoff = 0.700000\n\n # print info about each regridding up to this level:\n amrdata.verbosity_regrid = 1 \n\n\n # ---------------\n # Regions:\n # ---------------\n #rundata.regiondata.regions = []\n # to specify regions of refinement append lines of the form\n # [minlevel,maxlevel,t1,t2,x1,x2,y1,y2]\n # NO OLD STYLE REGIONS USED HERE\n\n\n # ---------------\n # NEW flagregions\n # ---------------\n\n\n flagregions = rundata.flagregiondata.flagregions # initialized to []\n\n # now append as many flagregions as desired to this list:\n\n # The entire domain restricted to level 1 for illustration:\n # Note that this is a rectangle specified in the new way:\n # (other regions below will force/allow more refinement)\n flagregion = FlagRegion(num_dim=2)\n flagregion.name = 'Region_domain'\n flagregion.minlevel = 1\n flagregion.maxlevel = 1\n flagregion.t1 = 0.\n flagregion.t2 = 1e9\n flagregion.spatial_region_type = 1 # Rectangle\n # domain plus a bit so kml files look nicer:\n flagregion.spatial_region = [clawdata.lower[0] - 0.1,\n clawdata.upper[0] + 0.1,\n clawdata.lower[1] - 0.1,\n clawdata.upper[1] + 0.1]\n flagregions.append(flagregion)\n\n # force 2 levels around dtopo source region for short time:\n flagregion = FlagRegion(num_dim=2)\n flagregion.name = 'Region_level2_dtopo'\n flagregion.minlevel = 2\n flagregion.maxlevel = 2\n flagregion.t1 = 0.\n flagregion.t2 = 2.\n flagregion.spatial_region_type = 1 # Rectangle\n flagregion.spatial_region = [-2,1,-1,1]\n flagregions.append(flagregion)\n\n # allow 3 levels around coastal region for all times:\n flagregion = FlagRegion(num_dim=2)\n flagregion.name = 'Region_level3'\n flagregion.minlevel = 1\n flagregion.maxlevel = 3\n flagregion.t1 = 0.\n flagregion.t2 = 1e9\n flagregion.spatial_region_type = 1 # Rectangle\n flagregion.spatial_region = [-0.01,0.01,-0.01,0.01]\n flagregions.append(flagregion)\n\n # force 4 levels around coastal region starting at 5 minutes:\n flagregion = FlagRegion(num_dim=2)\n flagregion.name = 'Region_level4'\n flagregion.minlevel = 4\n flagregion.maxlevel = 4\n flagregion.t1 = 5*60.\n flagregion.t2 = 1e9\n flagregion.spatial_region_type = 1 # Rectangle\n flagregion.spatial_region = [-0.005, 0.01, -0.011, 0.011]\n flagregions.append(flagregion)\n\n # ---------------\n # Gauges:\n # ---------------\n # for gauges append lines of the form [gaugeno, x, y, t1, t2]\n rundata.gaugedata.gauges = []\n\n\n # Set GeoClaw specific runtime parameters.\n\n try:\n geo_data = rundata.geo_data\n except:\n print(\"*** Error, this rundata has no geo_data attribute\")\n raise AttributeError(\"Missing geo_data attribute\")\n \n # == Physics ==\n geo_data.gravity = 9.81\n geo_data.coordinate_system = 2\n geo_data.earth_radius = 6367.5e3\n\n # == Forcing Options\n geo_data.coriolis_forcing = False\n\n # == Algorithm and Initial Conditions ==\n geo_data.sea_level = 0.0\n geo_data.dry_tolerance = 1.e-3\n geo_data.friction_forcing = True\n geo_data.manning_coefficient =.025\n geo_data.friction_depth = 1e6\n\n # Refinement settings\n refinement_data = rundata.refinement_data\n refinement_data.variable_dt_refinement_ratios = True\n refinement_data.wave_tolerance = 0.2\n\n # == settopo.data values ==\n topofiles = rundata.topo_data.topofiles\n # for topography, append lines of the form\n # [topotype, fname]\n topodir = 'input_files'\n\n topofiles.append([3, topodir + '/topo_ocean.tt3'])\n topofiles.append([3, topodir + '/topo_shore.tt3'])\n\n\n # == setdtopo.data values ==\n dtopo_data = rundata.dtopo_data\n # for moving topography, append lines of the form : (<= 1 allowed for now!)\n # [topotype, fname]\n dtopodir = 'input_files'\n dtopo_data.dtopofiles.append([3, dtopodir + '/dtopo_test.tt3'])\n\n dtopo_data.dt_max_dtopo = 1.0\n\n\n # == setqinit.data values ==\n \n rundata.qinit_data.qinit_type = 0\n rundata.qinit_data.qinitfiles = []\n # for qinit perturbations, append lines of the form: (<= 1 allowed for now!)\n # [fname]\n \n # NEW feature to adjust sea level by dtopo:\n rundata.qinit_data.variable_eta_init = True\n \n # NEW feature to force dry land some locations below sea level:\n force_dry = ForceDry()\n force_dry.tend = 7*60.\n force_dry.fname = 'input_files/force_dry_init.tt3'\n rundata.qinit_data.force_dry_list.append(force_dry)\n\n # == fgmax.data values ==\n #fgmax_files = rundata.fgmax_data.fgmax_files\n # for fixed grids append to this list names of any fgmax input files\n\n\n # ----- For developers ----- \n # Toggle debugging print statements:\n amrdata.dprint = False # print domain flags\n amrdata.eprint = False # print err est flags\n amrdata.edebug = False # even more err est flags\n amrdata.gprint = False # grid bisection/clustering\n amrdata.nprint = False # proper nesting output\n amrdata.pprint = False # proj. of tagged points\n amrdata.rprint = False # print regridding summary\n amrdata.sprint = False # space/memory output\n amrdata.tprint = False # time step reporting each level\n amrdata.uprint = False # update/upbnd reporting\n \n # More AMR parameters can be set -- see the defaults in pyclaw/data.py\n\n return rundata\n # end of function setrun\n # ----------------------\n\n\n\nif __name__ == '__main__':\n # Set up run-time parameters and write all data files.\n import sys\n rundata = setrun(*sys.argv[1:])\n rundata.write()\n \n\n" ]
[ [ "numpy.arange" ] ]
Tobias-Fischer/dreyeve
[ "d73979d738e706d90a8aa9d696c6e4dcb19c1134", "d73979d738e706d90a8aa9d696c6e4dcb19c1134" ]
[ "experiments/actions/action_utils.py", "experiments/train/utils.py" ]
[ "\"\"\"\nUtilities for improve code readability in `predict_actions_with_SVM.py`\n\"\"\"\n\nimport itertools\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom os.path import join, exists\n\n\nclass DreyeveRun:\n \"\"\"\n Single run of the DR(eye)VE dataset.\n \"\"\"\n\n def __init__(self, dataset_data_root, num_run):\n self.num_run = num_run\n self.file_course = join(dataset_data_root, '{:02d}'.format(self.num_run), 'speed_course_coord.txt')\n self.file_steering = join(dataset_data_root, '{:02d}'.format(self.num_run), 'steering_directions.txt')\n self.file_actions = join(dataset_data_root, '{:02d}'.format(self.num_run), 'actions.csv')\n\n\nclass DreyeveDataset:\n \"\"\"\n Class that models the Dreyeve dataset\n \"\"\"\n\n def __init__(self, dataset_root):\n self.dataset_data_root = join(dataset_root, 'DATA')\n self.dataset_pred_root = join(dataset_root, 'PREDICTIONS_2017')\n\n self.train_runs = [DreyeveRun(self.dataset_data_root, r) for r in range(0 + 1, 38)]\n self.test_runs = [DreyeveRun(self.dataset_data_root, r) for r in range(38, 74 + 1)]\n\n self.frames_each_run = 7500\n self.num_train_frames = len(self.train_runs) * self.frames_each_run\n self.num_test_frames = len(self.test_runs) * self.frames_each_run\n\n\ndef plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n", "import numpy as np\n\n# cityscapes dataset palette\npalette = np.array([[128, 64, 128],\n [244, 35, 232],\n [70, 70, 70],\n [102, 102, 156],\n [190, 153, 153],\n [153, 153, 153],\n [250, 170, 30],\n [220, 220, 0],\n [107, 142, 35],\n [152, 251, 152],\n [70, 130, 180],\n [220, 20, 60],\n [255, 0, 0],\n [0, 0, 142],\n [0, 0, 70],\n [0, 60, 100],\n [0, 80, 100],\n [0, 0, 230],\n [119, 11, 32]], dtype='uint8')\n\n\ndef seg_to_colormap(seg, channels_first):\n \"\"\"\n Function to turn segmentation PREDICTION (not probabilities) to colormap.\n\n :param seg: the prediction image, having shape (h,w)\n :param channels_first: if true, returns (c,h,w) rather than (h,w,c)\n :return: the colormap image, having shape (h,w,3)\n \"\"\"\n h, w = seg.shape\n color_image = palette[seg.ravel()].reshape(h, w, 3)\n\n if channels_first:\n color_image = color_image.transpose(2, 0, 1)\n\n return color_image\n\n\ndef read_lines_from_file(filename):\n \"\"\"\n Function to read lines from file\n\n :param filename: The text file to be read.\n :return: content: A list of strings\n \"\"\"\n with open(filename) as f:\n content = f.readlines()\n\n content = [x.strip() for x in content]\n return content\n\n\ndef get_branch_from_experiment_id(experiment_id):\n \"\"\"\n Function to return model branch name given experiment_id.\n :param experiment_id: experiment id\n :return: a string among ['all','image','optical_flow','semseg']\n \"\"\"\n\n assert isinstance(experiment_id, basestring), \"Experiment ID must be a string.\"\n\n branch = None\n if experiment_id.lower().startswith('dreyeve'):\n branch = \"all\"\n elif experiment_id.lower().startswith('color'):\n branch = \"image\"\n elif experiment_id.lower().startswith('flow'):\n branch = \"optical_flow\"\n elif experiment_id.lower().startswith('segm'):\n branch = \"semseg\"\n\n return branch\n" ]
[ [ "matplotlib.pyplot.imshow", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.title", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.yticks", "matplotlib.pyplot.xticks", "matplotlib.pyplot.ylabel" ], [ "numpy.array" ] ]
strieb/VisualQuestionAnswering
[ "28f6ae1f2abd839145306a1d4f34ee84271cf3c1" ]
[ "vqa_image_preprocess.py" ]
[ "import json\nfrom collections import Counter\nimport re\nfrom VQA.PythonHelperTools.vqaTools.vqa import VQA\nimport random\nimport numpy as np\nfrom keras.preprocessing.image import load_img, img_to_array, ImageDataGenerator\nfrom matplotlib import pyplot as plt\nimport os\nimport VQAModel\nfrom keras.applications.xception import decode_predictions, preprocess_input\n# from keras.applications.inception_v3 import decode_predictions, preprocess_input\nfrom PIL import Image, ImageOps\nfrom matplotlib import pyplot as plt\nimport math\n\nfrom Environment import DATADIR\nversionType = 'v2_' # this should be '' when using VQA v2.0 dataset\ntaskType = 'OpenEnded' # 'OpenEnded' only for v2.0. 'OpenEnded' or 'MultipleChoice' for v1.0\ndataType = 'mscoco' # 'mscoco' only for v1.0. 'mscoco' for real and 'abstract_v002' for abstract for v1.0.\ndataSubType = 'train2014'\nsaveDir = 'preprocessed_xcep_24'\nannFile = '%s/Annotations/%s%s_%s_annotations.json' % (DATADIR, versionType, dataType, dataSubType)\nquesFile = '%s/Questions/%s%s_%s_%s_questions.json' % (DATADIR, versionType, taskType, dataType, dataSubType)\nimgDir = '%s/Images/%s/' % (DATADIR, dataSubType)\n\ni = 0\ndirectory = os.fsencode(imgDir)\n\n# 363, 555\n# 427, 619\nsize1 = 299+64\nsize2 = 299+64\n\n\nmodel = VQAModel.createModelXception((size1, size2, 3))\nmodel.summary()\nfor file in os.listdir(directory):\n filename = os.fsdecode(file)\n if filename.endswith(\".jpg\"):\n imgPath = os.path.join(imgDir, filename)\n id = int(filename[-16:-4])\n img = load_img(imgPath)\n width, height = img.size\n if(width >= height):\n img = img.resize((size2, size1), resample=Image.BICUBIC)\n img_array = img_to_array(img)\n img_array = preprocess_input(img_array)\n # img_array = np.tile(img,(32,1,1,1))\n img_array = np.expand_dims(img_array, axis=0)\n predictions = model.predict(img_array)\n pred = predictions[0].reshape(24,2048)\n np.save(imgDir+saveDir+\"/\"+str(id), pred)\n if i < 1000 and i%100 == 0:\n print(i)\n if i % 1000 == 0:\n print(i)\n i += 1\n\nmodel = VQAModel.createModelXception((size2, size1, 3))\nfor file in os.listdir(directory):\n filename = os.fsdecode(file)\n if filename.endswith(\".jpg\"):\n imgPath = os.path.join(imgDir, filename)\n id = int(filename[-16:-4])\n img = load_img(imgPath)\n width, height = img.size\n if(width < height):\n img = img.resize((size1, size2), resample=Image.BICUBIC)\n img_array = img_to_array(img)\n img_array = preprocess_input(img_array)\n # img_array = np.tile(img,(32,1,1,1))\n img_array = np.expand_dims(img_array, axis=0)\n # plt.imshow((img_array[0] + 1)/2)\n # plt.show()\n predictions = model.predict(img_array)\n pred = predictions[0].reshape(24,2048)\n np.save(imgDir+saveDir+\"/\"+str(id), pred)\n if i % 1000 == 0:\n print(i)\n i += 1" ]
[ [ "numpy.expand_dims" ] ]
bakkerjarr/NetTrafClassificationExploration
[ "66febafcbe4820851784ae72c50a49c28fa91df4" ]
[ "initialExp/classifiers/iscx_naive_bayes.py" ]
[ "# Copyright 2016 Jarrod N. Bakker\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom numpy import float32 as np_float\n\nimport numpy.core.multiarray as np_array\nfrom sklearn.naive_bayes import GaussianNB\n\nimport iscx_result_calc as rc\n\n__author__ = \"Jarrod N. Bakker\"\n\n\nclass NaiveBayesCls:\n\n NAME = \"Naive_Bayes\"\n\n def __init__(self, data, labels, skf):\n \"\"\"Initialise.\n\n :param data: Data set for the classifier to use.\n :param labels: Labels indicating if a flow is normal or attack.\n :param skf: StratifiedKFold object representing what data set\n elements belong in each fold.\n \"\"\"\n self._data = data\n self._labels = labels\n self._kfold = skf\n self._classifier = GaussianNB()\n\n def classify(self):\n \"\"\"Classify DDoS flows using Naive Bayes.\n\n The data passed through to the fit() method cannot be a string\n type.\n\n :return: Results of the classification.\n \"\"\"\n all_results = [] # Results from all fold trials\n fold_num = 1\n for train, test in self._kfold:\n print(\"\\tTraining Naive Bayes...\")\n # NOTE: I have switched the training and testing set around.\n train_array = np_array.array(map(self._data.__getitem__,\n test)).astype(np_float)\n train_label_array = np_array.array(map(\n self._labels.__getitem__, test)).astype(np_float)\n self._classifier.fit(train_array, train_label_array)\n print(\"\\tTesting classifier...\")\n test_array = np_array.array(map(self._data.__getitem__,\n train)).astype(np_float)\n test_label_array = np_array.array(map(\n self._labels.__getitem__, train)).astype(np_float)\n test_size = len(train) # Remember the switch of sets!\n pred = self._classifier.predict(test_array)\n mislabeled = (test_label_array != pred).sum()\n tp, tn, fp, fn = rc.calculate_tpn_fpn(test_label_array, pred)\n detection_rate = rc.detection_rate(tp, fn)\n false_pos_rate = rc.false_positive_rate(tn, fp)\n all_results.append([fold_num, tp, tn, fp, fn, detection_rate,\n false_pos_rate, mislabeled, test_size])\n fold_num += 1\n return all_results\n" ]
[ [ "sklearn.naive_bayes.GaussianNB" ] ]
arwhyte/SI664-scripts
[ "99daaac123ebdbfb0fbca59251f711efb9a7d39f" ]
[ "scripts/inspect_un_data_sets.py" ]
[ "import logging\nimport os\nimport pandas as pd\nimport sys as sys\n\n\ndef main(argv=None):\n\t\"\"\"\n\tUtilize Pandas library to read in both UNSD M49 country and area .csv file\n\t(tab delimited) as well as the UNESCO heritage site .csv file (tab delimited).\n\tExtract regions, sub-regions, intermediate regions, country and areas, and\n\tother column data. Filter out duplicate values and NaN values and sort the\n\tseries in alphabetical order. Write out each series to a .csv file for inspection.\n\t\"\"\"\n\tif argv is None:\n\t\targv = sys.argv\n\n\tmsg = [\n\t\t'Source file read {0}',\n\t\t'UNSD M49 regions written to file {0}',\n\t\t'UNSD M49 sub-regions written to file {0}',\n\t\t'UNSD M49 intermediate regions written to file {0}',\n\t\t'UNSD M49 countries and areas written to file {0}',\n\t\t'UNSD M49 development status written to file {0}',\n\t\t'UNESCO heritage site countries/areas written to file {0}',\n\t\t'UNESCO heritage site categories written to file {0}',\n\t\t'UNESCO heritage site regions written to file {0}',\n\t\t'UNESCO heritage site transboundary values written to file {0}'\n\t]\n\n\t# Setting logging format and default level\n\tlogging.basicConfig(format='%(levelname)s: %(message)s', level=logging.DEBUG)\n\n\t# Read in United Nations Statistical Division (UNSD) M49 Standard data set (tabbed separator)\n\tunsd_csv = './input/csv/un_area_country_codes-m49.csv'\n\tunsd_data_frame = read_csv(unsd_csv, '\\t')\n\tlogging.info(msg[0].format(os.path.abspath(unsd_csv)))\n\n\t# Write regions to a .csv file.\n\tunsd_region = extract_filtered_series(unsd_data_frame, 'region_name')\n\tunsd_region_csv = './output/unesco/unsd_region.csv'\n\twrite_series_to_csv(unsd_region, unsd_region_csv, '\\t', False)\n\tlogging.info(msg[1].format(os.path.abspath(unsd_region_csv)))\n\n\t# Write sub-regions to a .csv file.\n\tunsd_sub_region = extract_filtered_series(unsd_data_frame, 'sub_region_name')\n\tunsd_sub_region_csv = './output/unesco/unsd_sub_region.csv'\n\twrite_series_to_csv(unsd_sub_region, unsd_sub_region_csv, '\\t', False)\n\tlogging.info(msg[2].format(os.path.abspath(unsd_sub_region_csv)))\n\n\t# Write intermediate_regions to a .csv file.\n\tunsd_intermed_region = extract_filtered_series(unsd_data_frame, 'intermediate_region_name')\n\tunsd_intermed_region_csv = './output/unesco/unsd_intermed_region.csv'\n\twrite_series_to_csv(unsd_intermed_region, unsd_intermed_region_csv, '\\t', False)\n\tlogging.info(msg[3].format(os.path.abspath(unsd_intermed_region_csv)))\n\n\t# Write countries or areas to a .csv file.\n\tunsd_country_area = extract_filtered_series(unsd_data_frame, 'country_area_name')\n\tunsd_country_area_csv = './output/unesco/unsd_country_area.csv'\n\twrite_series_to_csv(unsd_country_area, unsd_country_area_csv, '\\t', False)\n\tlogging.info(msg[4].format(os.path.abspath(unsd_country_area_csv)))\n\n\t# Write development status to a .csv file.\n\tunsd_dev_status = extract_filtered_series(unsd_data_frame, 'country_area_development_status')\n\tunsd_dev_status_csv = './output/unesco/unsd_dev_status.csv'\n\twrite_series_to_csv(unsd_dev_status, unsd_dev_status_csv, '\\t', False)\n\tlogging.info(msg[5].format(os.path.abspath(unsd_dev_status_csv)))\n\n\t# Read UNESCO heritage sites data (tabbed separator)\n\tunesco_csv = './input/csv/unesco_heritage_sites.csv'\n\tunesco_data_frame = read_csv(unesco_csv, '\\t')\n\tlogging.info(msg[0].format(os.path.abspath(unesco_csv)))\n\n\t# Write UNESCO heritage site countries and areas to a .csv file\n\tunesco_country_area = extract_filtered_series(unesco_data_frame, 'country_area')\n\tunesco_country_area_csv = './output/unesco/unesco_heritage_site_country_area.csv'\n\twrite_series_to_csv(unesco_country_area, unesco_country_area_csv, '\\t', False)\n\tlogging.info(msg[6].format(os.path.abspath(unesco_country_area_csv)))\n\n\t# Write UNESCO heritage site categories to a .csv file\n\tunesco_site_category = extract_filtered_series(unesco_data_frame, 'category')\n\tunesco_site_category_csv = './output/unesco/unesco_heritage_site_category.csv'\n\twrite_series_to_csv(unesco_site_category, unesco_site_category_csv, '\\t', False)\n\tlogging.info(msg[7].format(os.path.abspath(unesco_site_category_csv)))\n\n\t# Write UNESCO heritage site regions to a .csv file\n\tunesco_region = extract_filtered_series(unesco_data_frame, 'region')\n\tunesco_region_csv = './output/unesco/unesco_heritage_site_region.csv'\n\twrite_series_to_csv(unesco_region, unesco_region_csv, '\\t', False)\n\tlogging.info(msg[8].format(os.path.abspath(unesco_region_csv)))\n\n\t# Write UNESCO heritage site transboundary values to a .csv file\n\tunesco_transboundary = extract_filtered_series(unesco_data_frame, 'transboundary')\n\tunesco_transboundary_csv = './output/unesco/unesco_heritage_site_transboundary.csv'\n\twrite_series_to_csv(unesco_transboundary, unesco_transboundary_csv, '\\t', False)\n\tlogging.info(msg[9].format(os.path.abspath(unesco_transboundary_csv)))\n\n\ndef extract_filtered_series(data_frame, column_name):\n\t\"\"\"\n\tReturns a filtered Panda Series one-dimensional ndarray from a targeted column.\n\tDuplicate values and NaN or blank values are dropped from the result set which is\n\treturned sorted (ascending).\n\t:param data_frame: Pandas DataFrame\n\t:param column_name: column name string\n\t:return: Panda Series one-dimensional ndarray\n\t\"\"\"\n\treturn data_frame[column_name].drop_duplicates().dropna().sort_values(by=column_name)\n\n\ndef read_csv(path, delimiter=','):\n\t\"\"\"\n\tUtilize Pandas to read in *.csv file.\n\t:param path: file path\n\t:param delimiter: field delimiter\n\t:return: Pandas DataFrame\n\t\"\"\"\n\treturn pd.read_csv(path, sep=delimiter, encoding='utf-8', engine='python')\n\n\ndef write_series_to_csv(series, path, delimiter=',', row_name=True):\n\t\"\"\"\n\tWrite Pandas DataFrame to a *.csv file.\n\t:param series: Pandas one dimensional ndarray\n\t:param path: file path\n\t:param delimiter: field delimiter\n\t:param row_name: include row name boolean\n\t\"\"\"\n\tseries.to_csv(path, sep=delimiter, index=row_name)\n\n\nif __name__ == '__main__':\n\tsys.exit(main())" ]
[ [ "pandas.read_csv" ] ]
mrzhu666/USCL
[ "8a4741046ef8f337b1e9439d1575db670a11355c" ]
[ "generateFileList.py" ]
[ "import cv2\nimport os\nimport pickle\nfrom numpy.core.fromnumeric import shape\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\nfrom typing import Tuple\nfrom collections import defaultdict\nfrom sklearn.model_selection import train_test_split\nfrom IgAModel66.setting import config\n\n\n# 添加文件名单到 result/csv里\n\nfiles=os.listdir(config['server_path']+'IgAModel/test/M0/')\nfiles.extend(os.listdir(config['server_path']+'IgAModel/test/M1/') )\n\n\neval_All=pd.read_csv('result/eval_All_0.73.csv',header=0)\n\n\neval_All['file']=files\n\n\neval_All.to_csv('result/eval_All_0.73_file.csv',index=False)" ]
[ [ "pandas.read_csv" ] ]
shivam-kotwalia/KittiSeg
[ "ac93c2f0f83bf84f2ba0d645f819b2bbeeeaf58d" ]
[ "continue.py" ]
[ "\"\"\"\nTrains, evaluates and saves the KittiSeg model.\n\n-------------------------------------------------\n\nThe MIT License (MIT)\n\nCopyright (c) 2017 Marvin Teichmann\n\nMore details: https://github.com/MarvinTeichmann/KittiSeg/blob/master/LICENSE\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nimport commentjson\nimport logging\nimport os\nimport sys\n\nimport collections\n\n\ndef dict_merge(dct, merge_dct):\n \"\"\" Recursive dict merge. Inspired by :meth:``dict.update()``, instead of\n updating only top-level keys, dict_merge recurses down into dicts nested\n to an arbitrary depth, updating keys. The ``merge_dct`` is merged into\n ``dct``.\n :param dct: dict onto which the merge is executed\n :param merge_dct: dct merged into dct\n :return: None\n \"\"\"\n for k, v in merge_dct.iteritems():\n if (k in dct and isinstance(dct[k], dict) and\n isinstance(merge_dct[k], collections.Mapping)):\n dict_merge(dct[k], merge_dct[k])\n else:\n dct[k] = merge_dct[k]\n\n\n# configure logging\nif 'TV_IS_DEV' in os.environ and os.environ['TV_IS_DEV']:\n logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',\n level=logging.INFO,\n stream=sys.stdout)\nelse:\n logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',\n level=logging.INFO,\n stream=sys.stdout)\n\n# https://github.com/tensorflow/tensorflow/issues/2034#issuecomment-220820070\nimport numpy as np\n\nflags = tf.app.flags\nFLAGS = flags.FLAGS\n\nsys.path.insert(1, 'incl')\n\nimport tensorvision.train as train\nimport tensorvision.utils as utils\n\nflags.DEFINE_string('name', None,\n 'Append a name Tag to run.')\n\nflags.DEFINE_string('project', None,\n 'Append a name Tag to run.')\n\nflags.DEFINE_string('logdir', None,\n 'File storing model parameters.')\n\nflags.DEFINE_string('mod', None,\n 'Modifier for model parameters.')\n\nif 'TV_SAVE' in os.environ and os.environ['TV_SAVE']:\n tf.app.flags.DEFINE_boolean(\n 'save', True, ('Whether to save the run. In case --nosave (default) '\n 'output will be saved to the folder TV_DIR_RUNS/debug, '\n 'hence it will get overwritten by further runs.'))\nelse:\n tf.app.flags.DEFINE_boolean(\n 'save', True, ('Whether to save the run. In case --nosave (default) '\n 'output will be saved to the folder TV_DIR_RUNS/debug '\n 'hence it will get overwritten by further runs.'))\n\n\ndef main(_):\n utils.set_gpus_to_use()\n\n try:\n import tensorvision.train\n import tensorflow_fcn.utils\n except ImportError:\n logging.error(\"Could not import the submodules.\")\n logging.error(\"Please execute:\"\n \"'git submodule update --init --recursive'\")\n exit(1)\n\n if tf.app.flags.FLAGS.logdir is None:\n logging.error(\"No logdir is given.\")\n logging.info(\"Usage: python train.py --logdir dir\")\n exit(1)\n\n logging.info(\"Continuing training...\")\n train.continue_training(tf.app.flags.FLAGS.logdir)\n\n\nif __name__ == '__main__':\n tf.app.run()\n" ]
[ [ "tensorflow.app.flags.DEFINE_boolean", "tensorflow.app.run" ] ]
scratchrealm/spikeinterface
[ "17cfcd6f0c30c9933c11e560daf750366e12a151" ]
[ "spikeinterface/sortingcomponents/template_matching.py" ]
[ "\"\"\"Sorting components: template matching.\"\"\"\n\nimport numpy as np\n\nimport scipy.spatial\n\nfrom tqdm import tqdm\nimport sklearn, scipy\nimport scipy\n\nfrom threadpoolctl import threadpool_limits\n\ntry:\n import numba\n from numba import jit, prange\n HAVE_NUMBA = True\nexcept ImportError:\n HAVE_NUMBA = False\n\n\nfrom spikeinterface.core import WaveformExtractor\nfrom spikeinterface.core.job_tools import ChunkRecordingExecutor\nfrom spikeinterface.toolkit import (get_noise_levels, get_template_channel_sparsity,\n get_channel_distances, get_chunk_with_margin, get_template_extremum_channel, get_random_data_chunks)\n\nfrom spikeinterface.sortingcomponents.peak_detection import detect_peak_locally_exclusive, detect_peaks_by_channel\n\nfrom sklearn.feature_extraction.image import extract_patches_2d, reconstruct_from_patches_2d\nfrom sklearn.linear_model import orthogonal_mp_gram\n\npotrs, = scipy.linalg.get_lapack_funcs(('potrs',), dtype=np.float32)\n\nnrm2, = scipy.linalg.get_blas_funcs(('nrm2', ), dtype=np.float32)\n\nspike_dtype = [('sample_ind', 'int64'), ('channel_ind', 'int64'), ('cluster_ind', 'int64'),\n ('amplitude', 'float64'), ('segment_ind', 'int64')]\n\n\ndef find_spikes_from_templates(recording, method='naive', method_kwargs={}, extra_outputs=False,\n **job_kwargs):\n \"\"\"Find spike from a recording from given templates.\n\n Parameters\n ----------\n recording: RecordingExtractor\n The recording extractor object\n waveform_extractor: WaveformExtractor\n The waveform extractor\n method: str \n Which method to use ('naive' | 'tridesclous' | 'circus')\n method_kwargs: dict, optional\n Keyword arguments for the chosen method\n extra_outputs: bool\n If True then method_kwargs is also return\n job_kwargs: dict\n Parameters for ChunkRecordingExecutor\n\n Returns\n -------\n spikes: ndarray\n Spikes found from templates.\n method_kwargs: \n Optionaly returns for debug purpose.\n Notes\n -----\n Templates are represented as WaveformExtractor so statistics can be extracted.\n \"\"\"\n\n assert method in template_matching_methods\n \n method_class = template_matching_methods[method]\n \n # initialize\n method_kwargs = method_class.initialize_and_check_kwargs(recording, method_kwargs)\n \n # add \n method_kwargs['margin'] = method_class.get_margin(recording, method_kwargs)\n \n # serialiaze for worker\n method_kwargs_seralized = method_class.serialize_method_kwargs(method_kwargs)\n \n # and run\n func = _find_spikes_chunk\n init_func = _init_worker_find_spikes\n init_args = (recording.to_dict(), method, method_kwargs_seralized)\n processor = ChunkRecordingExecutor(recording, func, init_func, init_args,\n handle_returns=True, job_name=f'find spikes ({method})', **job_kwargs)\n spikes = processor.run()\n\n spikes = np.concatenate(spikes)\n \n if extra_outputs:\n return spikes, method_kwargs\n else:\n return spikes\n\n\ndef _init_worker_find_spikes(recording, method, method_kwargs):\n \"\"\"Initialize worker for finding spikes.\"\"\"\n\n if isinstance(recording, dict):\n from spikeinterface.core import load_extractor\n recording = load_extractor(recording)\n\n method_class = template_matching_methods[method]\n method_kwargs = method_class.unserialize_in_worker(method_kwargs)\n\n\n # create a local dict per worker\n worker_ctx = {}\n worker_ctx['recording'] = recording\n worker_ctx['method'] = method\n worker_ctx['method_kwargs'] = method_kwargs\n worker_ctx['function'] = method_class.main_function\n \n\n return worker_ctx\n\n\ndef _find_spikes_chunk(segment_index, start_frame, end_frame, worker_ctx):\n \"\"\"Find spikes from a chunk of data.\"\"\"\n\n # recover variables of the worker\n recording = worker_ctx['recording']\n method = worker_ctx['method']\n method_kwargs = worker_ctx['method_kwargs']\n margin = method_kwargs['margin']\n \n # load trace in memory given some margin\n recording_segment = recording._recording_segments[segment_index]\n traces, left_margin, right_margin = get_chunk_with_margin(recording_segment,\n start_frame, end_frame, None, margin, add_zeros=True)\n\n \n function = worker_ctx['function']\n \n with threadpool_limits(limits=1):\n spikes = function(traces, method_kwargs)\n \n # remove spikes in margin\n if margin > 0:\n keep = (spikes['sample_ind'] >= margin) & (spikes['sample_ind'] < (traces.shape[0] - margin))\n spikes = spikes[keep]\n\n spikes['sample_ind'] += (start_frame - margin)\n spikes['segment_ind'] = segment_index\n return spikes\n\n\n# generic class for template engine\nclass BaseTemplateMatchingEngine:\n default_params = {}\n \n @classmethod\n def initialize_and_check_kwargs(cls, recording, kwargs):\n \"\"\"This function runs before loops\"\"\"\n # need to be implemented in subclass\n raise NotImplementedError\n\n @classmethod\n def serialize_method_kwargs(cls, kwargs):\n \"\"\"This function serializes kwargs to distribute them to workers\"\"\"\n # need to be implemented in subclass\n raise NotImplementedError\n\n @classmethod\n def unserialize_in_worker(cls, recording, kwargs):\n \"\"\"This function unserializes kwargs in workers\"\"\"\n # need to be implemented in subclass\n raise NotImplementedError\n\n @classmethod\n def get_margin(cls, recording, kwargs):\n # need to be implemented in subclass\n raise NotImplementedError\n\n @classmethod\n def main_function(cls, traces, method_kwargs):\n \"\"\"This function returns the number of samples for the chunk margins\"\"\"\n # need to be implemented in subclass\n raise NotImplementedError\n\n \n\n##################\n# naive matching #\n##################\n\n\nclass NaiveMatching(BaseTemplateMatchingEngine):\n \"\"\"\n This is a naive template matching that does not resolve collision\n and does not take in account sparsity.\n It just minimizes the distance to templates for detected peaks.\n\n It is implemented for benchmarking against this low quality template matching.\n And also as an example how to deal with methods_kwargs, margin, intit, func, ...\n \"\"\"\n default_params = {\n 'waveform_extractor': None,\n 'peak_sign': 'neg',\n 'n_shifts': 10,\n 'detect_threshold': 5,\n 'noise_levels': None,\n 'local_radius_um': 100,\n 'random_chunk_kwargs': {},\n }\n \n\n @classmethod\n def initialize_and_check_kwargs(cls, recording, kwargs):\n d = cls.default_params.copy()\n d.update(kwargs)\n \n assert d['waveform_extractor'] is not None\n \n we = d['waveform_extractor']\n\n if d['noise_levels'] is None:\n d['noise_levels'] = get_noise_levels(recording, **d['random_chunk_kwargs'])\n\n d['abs_threholds'] = d['noise_levels'] * d['detect_threshold']\n\n channel_distance = get_channel_distances(recording)\n d['neighbours_mask'] = channel_distance < d['local_radius_um']\n\n d['nbefore'] = we.nbefore\n d['nafter'] = we.nafter \n\n return d\n \n @classmethod\n def get_margin(cls, recording, kwargs):\n margin = max(kwargs['nbefore'], kwargs['nafter'])\n return margin\n\n @classmethod\n def serialize_method_kwargs(cls, kwargs):\n kwargs = dict(kwargs)\n \n waveform_extractor = kwargs['waveform_extractor']\n kwargs['waveform_extractor'] = str(waveform_extractor.folder)\n \n return kwargs\n\n @classmethod\n def unserialize_in_worker(cls, kwargs):\n \n we = kwargs['waveform_extractor']\n if isinstance(we, str):\n we = WaveformExtractor.load_from_folder(we)\n kwargs['waveform_extractor'] = we\n \n templates = we.get_all_templates(mode='average')\n \n kwargs['templates'] = templates\n \n return kwargs\n\n @classmethod\n def main_function(cls, traces, method_kwargs):\n \n peak_sign = method_kwargs['peak_sign']\n abs_threholds = method_kwargs['abs_threholds']\n n_shifts = method_kwargs['n_shifts']\n neighbours_mask = method_kwargs['neighbours_mask']\n templates = method_kwargs['templates']\n \n nbefore = method_kwargs['nbefore']\n nafter = method_kwargs['nafter']\n \n margin = method_kwargs['margin']\n \n if margin > 0:\n peak_traces = traces[margin:-margin, :]\n else:\n peak_traces = traces\n peak_sample_ind, peak_chan_ind = detect_peak_locally_exclusive(peak_traces, peak_sign, abs_threholds, n_shifts, neighbours_mask)\n peak_sample_ind += margin\n\n\n spikes = np.zeros(peak_sample_ind.size, dtype=spike_dtype)\n spikes['sample_ind'] = peak_sample_ind\n spikes['channel_ind'] = peak_chan_ind # TODO need to put the channel from template\n \n # naively take the closest template\n for i in range(peak_sample_ind.size):\n i0 = peak_sample_ind[i] - nbefore\n i1 = peak_sample_ind[i] + nafter\n \n wf = traces[i0:i1, :]\n dist = np.sum(np.sum((templates - wf[None, : , :])**2, axis=1), axis=1)\n cluster_ind = np.argmin(dist)\n\n spikes['cluster_ind'][i] = cluster_ind\n spikes['amplitude'][i] = 0.\n\n return spikes\n\n\n######################\n# tridesclous peeler #\n######################\n\n\nclass TridesclousPeeler(BaseTemplateMatchingEngine):\n \"\"\"\n Template-matching ported from Tridesclous sorter.\n \n The idea of this peeler is pretty simple.\n 1. Find peaks\n 2. order by best amplitues\n 3. find nearest template\n 4. remove it from traces.\n 5. in the residual find peaks again\n \n This method is quite fast but don't give exelent results to resolve\n spike collision when templates have high similarity.\n \"\"\"\n default_params = {\n 'waveform_extractor': None,\n 'peak_sign': 'neg',\n 'peak_shift_ms': 0.2,\n 'detect_threshold': 5,\n 'noise_levels': None,\n 'local_radius_um': 100,\n 'num_closest' : 5,\n 'sample_shift': 3,\n 'ms_before': 0.8,\n 'ms_after': 1.2,\n 'num_peeler_loop': 2,\n 'num_template_try' : 1,\n }\n \n @classmethod\n def initialize_and_check_kwargs(cls, recording, kwargs):\n \n assert HAVE_NUMBA\n \n d = cls.default_params.copy()\n d.update(kwargs)\n\n assert isinstance(d['waveform_extractor'], WaveformExtractor)\n \n we = d['waveform_extractor']\n unit_ids = we.sorting.unit_ids\n channel_ids = we.recording.channel_ids\n \n sr = we.recording.get_sampling_frequency()\n\n\n # TODO load as sharedmem\n templates = we.get_all_templates(mode='average')\n d['templates'] = templates\n\n d['nbefore'] = we.nbefore\n d['nafter'] = we.nafter\n\n\n nbefore_short = int(d['ms_before'] * sr / 1000.)\n nafter_short = int(d['ms_before'] * sr / 1000.)\n assert nbefore_short <= we.nbefore\n assert nafter_short <= we.nafter\n d['nbefore_short'] = nbefore_short\n d['nafter_short'] = nafter_short\n s0 = (we.nbefore - nbefore_short)\n s1 = -(we.nafter - nafter_short)\n if s1 == 0:\n s1 = None\n templates_short = templates[:, slice(s0,s1), :].copy()\n d['templates_short'] = templates_short\n\n \n d['peak_shift'] = int(d['peak_shift_ms'] / 1000 * sr)\n\n if d['noise_levels'] is None:\n print('TridesclousPeeler : noise should be computed outside')\n d['noise_levels'] = get_noise_levels(recording)\n\n d['abs_threholds'] = d['noise_levels'] * d['detect_threshold']\n \n channel_distance = get_channel_distances(recording)\n d['neighbours_mask'] = channel_distance < d['local_radius_um']\n \n #\n #~ template_sparsity_inds = get_template_channel_sparsity(we, method='radius',\n #~ peak_sign=d['peak_sign'], outputs='index', radius_um=d['local_radius_um'])\n template_sparsity_inds = get_template_channel_sparsity(we, method='threshold',\n peak_sign=d['peak_sign'], outputs='index', threshold=d['detect_threshold']) \n template_sparsity = np.zeros((unit_ids.size, channel_ids.size), dtype='bool')\n for unit_index, unit_id in enumerate(unit_ids):\n chan_inds = template_sparsity_inds[unit_id]\n template_sparsity[unit_index, chan_inds] = True\n \n d['template_sparsity'] = template_sparsity\n \n extremum_channel = get_template_extremum_channel(we, peak_sign=d['peak_sign'], outputs='index')\n # as numpy vector\n extremum_channel = np.array([extremum_channel[unit_id] for unit_id in unit_ids], dtype='int64')\n d['extremum_channel'] = extremum_channel\n \n channel_locations = we.recording.get_channel_locations()\n \n # TODO try it with real locaion\n unit_locations = channel_locations[extremum_channel]\n #~ print(unit_locations)\n \n # distance between units\n unit_distances = scipy.spatial.distance.cdist(unit_locations, unit_locations, metric='euclidean')\n \n # seach for closet units and unitary discriminant vector\n closest_units = []\n for unit_ind, unit_id in enumerate(unit_ids):\n order = np.argsort(unit_distances[unit_ind, :])\n closest_u = np.arange(unit_ids.size)[order].tolist()\n closest_u.remove(unit_ind)\n closest_u = np.array(closest_u[:d['num_closest']])\n\n # compute unitary discriminent vector\n chans, = np.nonzero(d['template_sparsity'][unit_ind, :])\n template_sparse = templates[unit_ind, :, :][:, chans]\n closest_vec = []\n # against N closets\n for u in closest_u:\n vec = (templates[u, :, :][:, chans] - template_sparse)\n vec /= np.sum(vec ** 2)\n closest_vec.append((u, vec))\n # against noise\n closest_vec.append((None, - template_sparse / np.sum(template_sparse ** 2)))\n \n closest_units.append(closest_vec)\n\n d['closest_units'] = closest_units\n \n # distance channel from unit\n distances = scipy.spatial.distance.cdist(channel_locations, unit_locations, metric='euclidean')\n near_cluster_mask = distances < d['local_radius_um']\n\n # nearby cluster for each channel\n possible_clusters_by_channel = []\n for channel_ind in range(distances.shape[0]):\n cluster_inds, = np.nonzero(near_cluster_mask[channel_ind, :])\n possible_clusters_by_channel.append(cluster_inds)\n \n d['possible_clusters_by_channel'] = possible_clusters_by_channel\n\n\n \n \n d['possible_shifts'] = np.arange(-d['sample_shift'], d['sample_shift'] +1, dtype='int64')\n\n return d \n\n @classmethod\n def serialize_method_kwargs(cls, kwargs):\n kwargs = dict(kwargs)\n \n # remove waveform_extractor\n kwargs.pop('waveform_extractor')\n return kwargs\n\n @classmethod\n def unserialize_in_worker(cls, kwargs):\n return kwargs\n\n @classmethod\n def get_margin(cls, recording, kwargs):\n margin = 2 * (kwargs['nbefore'] + kwargs['nafter'])\n return margin\n\n @classmethod\n def main_function(cls, traces, d):\n \n traces = traces.copy()\n \n all_spikes = []\n level = 0\n while True:\n spikes = _tdc_find_spikes(traces, d, level=level)\n keep = (spikes['cluster_ind'] >= 0)\n \n if not np.any(keep):\n break\n all_spikes.append(spikes[keep])\n \n level += 1\n \n if level == d['num_peeler_loop']:\n break\n \n if len(all_spikes) > 0:\n all_spikes = np.concatenate(all_spikes)\n order = np.argsort(all_spikes['sample_ind'])\n all_spikes = all_spikes[order]\n else:\n all_spikes = np.zeros(0, dtype=spike_dtype)\n\n return all_spikes\n\n\ndef _tdc_find_spikes(traces, d, level=0):\n peak_sign = d['peak_sign']\n templates = d['templates']\n templates_short = d['templates_short']\n margin = d['margin']\n possible_clusters_by_channel = d['possible_clusters_by_channel']\n \n \n peak_traces = traces[margin // 2:-margin // 2, :]\n peak_sample_ind, peak_chan_ind = detect_peak_locally_exclusive(peak_traces, peak_sign,\n d['abs_threholds'], d['peak_shift'], d['neighbours_mask'])\n peak_sample_ind += margin // 2\n\n\n peak_amplitude = traces[peak_sample_ind, peak_chan_ind]\n order = np.argsort(np.abs(peak_amplitude))[::-1]\n peak_sample_ind = peak_sample_ind[order]\n peak_chan_ind = peak_chan_ind[order]\n\n spikes = np.zeros(peak_sample_ind.size, dtype=spike_dtype)\n spikes['sample_ind'] = peak_sample_ind\n spikes['channel_ind'] = peak_chan_ind # TODO need to put the channel from template\n\n\n\n possible_shifts = d['possible_shifts']\n distances_shift = np.zeros(possible_shifts.size)\n\n for i in range(peak_sample_ind.size):\n sample_ind = peak_sample_ind[i]\n\n chan_ind = peak_chan_ind[i]\n possible_clusters = possible_clusters_by_channel[chan_ind]\n \n if possible_clusters.size > 0:\n #~ s0 = sample_ind - d['nbefore']\n #~ s1 = sample_ind + d['nafter']\n\n #~ wf = traces[s0:s1, :]\n\n s0 = sample_ind - d['nbefore_short']\n s1 = sample_ind + d['nafter_short']\n wf_short = traces[s0:s1, :]\n \n ## pure numpy with cluster spasity\n # distances = np.sum(np.sum((templates[possible_clusters, :, :] - wf[None, : , :])**2, axis=1), axis=1)\n\n ## pure numpy with cluster+channel spasity\n # union_channels, = np.nonzero(np.any(d['template_sparsity'][possible_clusters, :], axis=0))\n # distances = np.sum(np.sum((templates[possible_clusters][:, :, union_channels] - wf[: , union_channels][None, : :])**2, axis=1), axis=1)\n \n ## numba with cluster+channel spasity\n union_channels = np.any(d['template_sparsity'][possible_clusters, :], axis=0)\n # distances = numba_sparse_dist(wf, templates, union_channels, possible_clusters)\n distances = numba_sparse_dist(wf_short, templates_short, union_channels, possible_clusters)\n \n \n # DEBUG\n #~ ind = np.argmin(distances)\n #~ cluster_ind = possible_clusters[ind]\n \n for ind in np.argsort(distances)[:d['num_template_try']]:\n cluster_ind = possible_clusters[ind]\n\n chan_sparsity = d['template_sparsity'][cluster_ind, :]\n template_sparse = templates[cluster_ind, :, :][:, chan_sparsity]\n\n # find best shift\n \n ## pure numpy version\n # for s, shift in enumerate(possible_shifts):\n #  wf_shift = traces[s0 + shift: s1 + shift, chan_sparsity]\n #  distances_shift[s] = np.sum((template_sparse - wf_shift)**2)\n # ind_shift = np.argmin(distances_shift)\n # shift = possible_shifts[ind_shift]\n \n ## numba version\n numba_best_shift(traces, templates[cluster_ind, :, :], sample_ind, d['nbefore'], possible_shifts, distances_shift, chan_sparsity)\n ind_shift = np.argmin(distances_shift)\n shift = possible_shifts[ind_shift]\n\n sample_ind = sample_ind + shift\n s0 = sample_ind - d['nbefore']\n s1 = sample_ind + d['nafter']\n wf_sparse = traces[s0:s1, chan_sparsity]\n\n # accept or not\n\n centered = wf_sparse - template_sparse\n accepted = True\n for other_ind, other_vector in d['closest_units'][cluster_ind]:\n v = np.sum(centered * other_vector)\n if np.abs(v) >0.5:\n accepted = False\n break\n \n if accepted:\n #~ if ind != np.argsort(distances)[0]:\n #~ print('not first one', np.argsort(distances), ind)\n break\n\n if accepted:\n amplitude = 1.\n \n # remove template\n template = templates[cluster_ind, :, :]\n s0 = sample_ind - d['nbefore']\n s1 = sample_ind + d['nafter']\n traces[s0:s1, :] -= template * amplitude\n \n else:\n cluster_ind = -1\n amplitude = 0.\n \n else:\n cluster_ind = -1\n amplitude = 0.\n \n spikes['cluster_ind'][i] = cluster_ind\n spikes['amplitude'][i] =amplitude\n \n\n return spikes \n\n\n\nif HAVE_NUMBA:\n @jit(nopython=True)\n def numba_sparse_dist(wf, templates, union_channels, possible_clusters):\n \"\"\"\n numba implementation that compute distance from template with sparsity \n handle by two separate vectors\n \"\"\"\n total_cluster, width, num_chan = templates.shape\n num_cluster = possible_clusters.shape[0]\n distances = np.zeros((num_cluster,), dtype=np.float32)\n for i in prange(num_cluster):\n cluster_ind = possible_clusters[i]\n sum_dist = 0.\n for chan_ind in range(num_chan):\n if union_channels[chan_ind]:\n for s in range(width):\n v = wf[s, chan_ind]\n t = templates[cluster_ind, s, chan_ind]\n sum_dist += (v - t) ** 2\n distances[i] = sum_dist\n return distances\n\n @jit(nopython=True)\n def numba_best_shift(traces, template, sample_ind, nbefore, possible_shifts, distances_shift, chan_sparsity):\n \"\"\"\n numba implementation to compute several sample shift before template substraction\n \"\"\"\n width, num_chan = template.shape\n n_shift = possible_shifts.size\n for i in range(n_shift):\n shift = possible_shifts[i]\n sum_dist = 0.\n for chan_ind in range(num_chan):\n if chan_sparsity[chan_ind]:\n for s in range(width):\n v = traces[sample_ind - nbefore + s +shift, chan_ind]\n t = template[s, chan_ind]\n sum_dist += (v - t) ** 2\n distances_shift[i] = sum_dist\n \n return distances_shift\n \n\n\n\n#################\n# Circus peeler #\n#################\n\n# if HAVE_NUMBA:\n# @jit(nopython=True)\n# def fastconvolution(traces, templates, output):\n# nb_time, nb_channels = traces.shape\n# nb_templates, nb_samples, nb_channels = templates.shape\n\n# center = nb_samples // 2\n\n# for i in range(center, nb_time - center + 1):\n# offset_1 = i - center\n# for k in range(nb_templates):\n# for jj in range(nb_samples):\n# offset_2 = offset_1 + jj\n# for j in range(nb_channels):\n# output[k, offset_1] += (templates[k, jj, j] * traces[offset_2, j])\n# return output\n\n\nclass CircusOMPPeeler(BaseTemplateMatchingEngine):\n \"\"\"\n Orthogonal Matching Pursuit inspired from Spyking Circus sorter\n\n https://elifesciences.org/articles/34518\n \n This is an Orthogonal Template Matching algorithm. For speed and \n memory optimization, templates are automatically sparsified if the \n density of the matrix falls below a given threshold. Signal is\n convolved with the templates, and as long as some scalar products\n are higher than a given threshold, we use a Cholesky decomposition\n to compute the optimal amplitudes needed to reconstruct the signal.\n\n IMPORTANT NOTE: small chunks are more efficient for such Peeler,\n consider using 100ms chunk\n\n Parameters\n ----------\n noise_levels: array\n The noise levels, for every channels\n random_chunk_kwargs: dict\n Parameters for computing noise levels, if not provided (sub optimal)\n amplitude: tuple\n (Minimal, Maximal) amplitudes allowed for every template\n omp_min_sps: float\n Stopping criteria of the OMP algorithm, in percentage of the norm\n sparsify_threshold: float\n Templates are sparsified in order to keep only the channels necessary\n to explain a given fraction of the total norm\n use_sparse_matrix_threshold: float\n If density of the templates is below a given threshold, sparse matrix\n are used (memory efficient)\n progress_bar_steps: bool\n In order to display or not steps from the algorithm\n -----\n \"\"\"\n\n _default_params = {\n 'sparsify_threshold': 0.99,\n 'amplitudes' : [0.5, 1.5],\n 'use_sparse_matrix_threshold' : 0.25,\n 'noise_levels': None,\n 'random_chunk_kwargs': {},\n 'omp_min_sps' : 0.5,\n 'progess_bar_steps' : False,\n }\n\n @classmethod\n def _sparsify_template(cls, template, sparsify_threshold, noise_levels):\n\n is_silent = template.std(0) < 0.25*noise_levels\n template[:, is_silent] = 0\n\n channel_norms = np.linalg.norm(template, axis=0)**2\n total_norm = np.linalg.norm(template)**2\n\n idx = np.argsort(channel_norms)[::-1]\n explained_norms = np.cumsum(channel_norms[idx]/total_norm)\n channel = np.searchsorted(explained_norms, sparsify_threshold)\n active_channels = np.sort(idx[:channel])\n template[:, idx[channel:]] = 0\n return template, active_channels\n\n @classmethod\n def _prepare_templates(cls, d):\n \n waveform_extractor = d['waveform_extractor']\n nb_samples = d['nb_samples']\n nb_channels = d['nb_channels']\n nb_templates = d['nb_templates']\n use_sparse_matrix_threshold = d['use_sparse_matrix_threshold']\n\n d['norms'] = np.zeros(nb_templates, dtype=np.float32)\n\n all_units = list(d['waveform_extractor'].sorting.unit_ids)\n\n templates = waveform_extractor.get_all_templates(mode='median').copy()\n\n d['sparsities'] = {}\n\n for count, unit_id in enumerate(all_units):\n \n templates[count], active_channels = cls._sparsify_template(templates[count], d['sparsify_threshold'], d['noise_levels'])\n d['sparsities'][count] = active_channels\n \n d['norms'][count] = np.linalg.norm(templates[count])\n templates[count] /= d['norms'][count]\n\n templates = templates.reshape(nb_templates, -1)\n\n nnz = np.sum(templates != 0)/(nb_templates * nb_samples * nb_channels)\n if nnz <= use_sparse_matrix_threshold:\n templates = scipy.sparse.csr_matrix(templates)\n print(f'Templates are automatically sparsified (sparsity level is {nnz})')\n d['is_dense'] = False\n else:\n d['is_dense'] = True\n\n d['templates'] = templates\n\n return d\n\n @classmethod\n def _prepare_overlaps(cls, d):\n\n templates = d['templates']\n nb_samples = d['nb_samples']\n nb_channels = d['nb_channels']\n nb_templates = d['nb_templates']\n is_dense = d['is_dense']\n\n if not is_dense:\n dense_templates = templates.toarray()\n else:\n dense_templates = templates\n\n dense_templates = dense_templates.reshape(nb_templates, nb_samples, nb_channels)\n\n size = 2 * nb_samples - 1\n\n all_delays = list(range(nb_samples))\n if d['progess_bar_steps']:\n all_delays = tqdm(all_delays, desc='[1] compute overlaps')\n\n overlaps = {}\n \n for delay in all_delays:\n source = dense_templates[:, :delay, :].reshape(nb_templates, -1)\n target = dense_templates[:, nb_samples-delay:, :].reshape(nb_templates, -1)\n\n if delay > 0:\n overlaps[delay] = scipy.sparse.csr_matrix(source.dot(target.T))\n else:\n overlaps[delay] = scipy.sparse.csr_matrix((nb_templates, nb_templates), dtype=np.float32)\n \n if delay < nb_samples:\n overlaps[size - delay-1] = overlaps[delay].T.tocsr()\n\n new_overlaps = []\n for i in range(nb_templates):\n data = [overlaps[j][i, :].T for j in range(size)]\n data = scipy.sparse.hstack(data)\n new_overlaps += [data]\n \n d['overlaps'] = new_overlaps\n\n return d\n\n @classmethod\n def initialize_and_check_kwargs(cls, recording, kwargs):\n\n d = cls._default_params.copy()\n d.update(kwargs)\n\n assert isinstance(d['waveform_extractor'], WaveformExtractor)\n\n for v in ['sparsify_threshold', 'omp_min_sps','use_sparse_matrix_threshold']:\n assert (d[v] >= 0) and (d[v] <= 1), f'{v} should be in [0, 1]'\n \n if d['noise_levels'] is None:\n print('CircusOMPPeeler : noise should be computed outside')\n d['noise_levels'] = get_noise_levels(recording, **d['random_chunk_kwargs'])\n\n d['nb_channels'] = d['waveform_extractor'].recording.get_num_channels()\n d['nb_samples'] = d['waveform_extractor'].nsamples\n d['nb_templates'] = len(d['waveform_extractor'].sorting.unit_ids)\n d['nbefore'] = d['waveform_extractor'].nbefore\n d['nafter'] = d['waveform_extractor'].nafter\n\n d = cls._prepare_templates(d)\n d = cls._prepare_overlaps(d)\n\n return d \n\n @classmethod\n def serialize_method_kwargs(cls, kwargs):\n kwargs = dict(kwargs)\n # remove waveform_extractor\n kwargs.pop('waveform_extractor')\n return kwargs\n\n @classmethod\n def unserialize_in_worker(cls, kwargs):\n return kwargs\n\n @classmethod\n def get_margin(cls, recording, kwargs):\n margin = 2 * max(kwargs['nbefore'], kwargs['nafter'])\n return margin\n\n @classmethod\n def main_function(cls, traces, d):\n templates = d['templates']\n nb_templates = d['nb_templates']\n nb_channels = d['nb_channels']\n overlaps = d['overlaps']\n margin = d['margin']\n norms = d['norms']\n nbefore = d['nbefore']\n nafter = d['nafter']\n omp_tol = np.finfo(np.float32).eps\n omp_min_sps = d['omp_min_sps']\n nb_samples = d['nafter'] + d['nbefore']\n neighbor_window = nb_samples - 1\n min_amplitude, max_amplitude = d['amplitudes']\n sparsities = d['sparsities']\n is_dense = d['is_dense']\n\n stop_criteria = omp_min_sps * norms[:, np.newaxis]\n\n nb_peaks = len(traces) - nb_samples + 1\n\n if is_dense:\n kernel_filters = templates.reshape(nb_templates, nb_samples, nb_channels)[:, ::-1, :]\n scalar_products = scipy.signal.fftconvolve(kernel_filters, traces[np.newaxis, :, :], axes=(0, 1), mode='valid').sum(2)\n else:\n scalar_products = np.empty((nb_templates, nb_peaks), dtype=np.float32)\n\n for i in range(nb_templates):\n kernel_filter = templates[i].toarray().reshape(nb_samples, nb_channels)\n kernel_filter = kernel_filter[::-1, sparsities[i]]\n\n convolution = scipy.signal.fftconvolve(kernel_filter, traces[:, sparsities[i]], axes=0, mode='valid')\n if len(convolution) > 0:\n scalar_products[i] = convolution.sum(1)\n else:\n scalar_products[i] = 0\n\n peak_chan_ind = np.zeros(nb_peaks)\n\n nb_spikes = 0\n spikes = np.empty(scalar_products.size, dtype=spike_dtype)\n idx_lookup = np.arange(scalar_products.size).reshape(nb_templates, -1)\n\n M = np.zeros((nb_peaks, nb_peaks), dtype=np.float32)\n\n all_selections = np.empty((2, scalar_products.size), dtype=np.int32)\n res_sps = np.zeros(0, dtype=np.float32)\n final_amplitudes = np.zeros(scalar_products.shape, dtype=np.float32)\n nb_selection = 0\n\n full_sps = scalar_products.copy()\n\n neighbors = {}\n cached_overlaps = {}\n\n is_valid = (scalar_products > stop_criteria)\n\n while np.any(is_valid):\n\n best_amplitude_ind = scalar_products[is_valid].argmax()\n best_cluster_ind, peak_index = np.unravel_index(idx_lookup[is_valid][best_amplitude_ind], idx_lookup.shape)\n \n all_selections[:, nb_selection] = [best_cluster_ind, peak_index]\n nb_selection += 1\n\n selection = all_selections[:, :nb_selection]\n\n res_sps = full_sps[selection[0], selection[1]]\n\n mb_selection = nb_selection - 1\n\n delta_t = selection[1] - peak_index\n idx = np.where(np.abs(delta_t) <= neighbor_window)[0]\n\n myline = neighbor_window + delta_t[idx]\n if best_cluster_ind not in cached_overlaps.keys():\n cached_overlaps[best_cluster_ind] = overlaps[best_cluster_ind].toarray()\n\n M[mb_selection, idx] = cached_overlaps[best_cluster_ind][selection[0, idx], myline]\n\n if nb_selection >= (M.shape[0] - 1):\n Z = np.zeros((2*M.shape[0], 2*M.shape[1]), dtype=np.float32)\n Z[:nb_selection, :nb_selection] = M[:nb_selection, :nb_selection]\n M = Z\n\n if mb_selection > 0:\n scipy.linalg.solve_triangular(M[:mb_selection, :mb_selection], M[mb_selection, :mb_selection], trans=0,\n lower=1,\n overwrite_b=True,\n check_finite=False)\n\n v = nrm2(M[mb_selection, :mb_selection]) ** 2\n if 1 - v <= omp_tol: # selected atoms are dependent\n break\n M[mb_selection, mb_selection] = np.sqrt(1 - v)\n\n all_amplitudes, _ = potrs(M[:nb_selection, :nb_selection], res_sps,\n lower=True, overwrite_b=False)\n\n all_amplitudes /= norms[selection[0]]\n\n diff_amplitudes = (all_amplitudes - final_amplitudes[selection[0], selection[1]])\n modified = np.where(np.abs(diff_amplitudes) > omp_tol)[0]\n final_amplitudes[selection[0], selection[1]] = all_amplitudes\n\n for i in modified:\n\n tmp_best, tmp_peak = selection[:, i]\n diff_amp = diff_amplitudes[i]*norms[tmp_best]\n \n if not tmp_best in cached_overlaps.keys():\n cached_overlaps[tmp_best] = overlaps[tmp_best].toarray()\n\n if not tmp_peak in neighbors.keys():\n idx = [max(0, tmp_peak - neighbor_window), min(nb_peaks, tmp_peak + neighbor_window + 1)]\n offset = [neighbor_window + idx[0] - tmp_peak, neighbor_window + idx[1] - tmp_peak]\n neighbors[tmp_peak] = {'idx' : idx, 'tdx' : offset}\n\n idx = neighbors[tmp_peak]['idx']\n tdx = neighbors[tmp_peak]['tdx']\n\n to_add = diff_amp * cached_overlaps[tmp_best][:, tdx[0]:tdx[1]]\n scalar_products[:, idx[0]:idx[1]] -= to_add\n\n scalar_products[best_cluster_ind, peak_index] = -np.inf\n \n is_valid = (scalar_products > stop_criteria)\n\n is_valid = (final_amplitudes > min_amplitude)*(final_amplitudes < max_amplitude)\n valid_indices = np.where(is_valid)\n\n nb_spikes = len(valid_indices[0])\n spikes['sample_ind'][:nb_spikes] = valid_indices[1] + d['nbefore']\n spikes['channel_ind'][:nb_spikes] = 0\n spikes['cluster_ind'][:nb_spikes] = valid_indices[0]\n spikes['amplitude'][:nb_spikes] = final_amplitudes[valid_indices[0], valid_indices[1]]\n \n spikes = spikes[:nb_spikes]\n order = np.argsort(spikes['sample_ind'])\n spikes = spikes[order]\n\n return spikes\n\n\nclass CircusPeeler(BaseTemplateMatchingEngine):\n\n \"\"\"\n Greedy Template-matching ported from the Spyking Circus sorter\n\n https://elifesciences.org/articles/34518\n \n This is a Greedy Template Matching algorithm. The idea is to detect \n all the peaks (negative, positive or both) above a certain threshold\n Then, at every peak (plus or minus some jitter) we look if the signal \n can be explained with a scaled template. \n The amplitudes allowed, for every templates, are automatically adjusted \n in an optimal manner, to enhance the Matthew Correlation Coefficient \n between all spikes/templates in the waveformextractor. For speed and \n memory optimization, templates are automatically sparsified if the \n density of the matrix falls below a given threshold \n\n Parameters\n ----------\n peak_sign: str\n Sign of the peak (neg, pos, or both)\n n_shifts: int\n The number of samples before/after to classify a peak (should be low)\n jitter: int\n The number of samples considered before/after every peak to search for\n matches\n detect_threshold: int\n The detection threshold\n noise_levels: array\n The noise levels, for every channels\n random_chunk_kwargs: dict\n Parameters for computing noise levels, if not provided (sub optimal)\n max_amplitude: float\n Maximal amplitude allowed for every template\n min_amplitude: float\n Minimal amplitude allowed for every template\n sparsify_threshold: float\n Templates are sparsified in order to keep only the channels necessary\n to explain a given fraction of the total norm\n use_sparse_matrix_threshold: float\n If density of the templates is below a given threshold, sparse matrix\n are used (memory efficient)\n progress_bar_steps: bool\n In order to display or not steps from the algorithm\n -----\n\n\n \"\"\"\n\n _default_params = {\n 'peak_sign': 'neg', \n 'n_shifts': 1, \n 'jitter' : 1, \n 'detect_threshold': 5, \n 'noise_levels': None, \n 'random_chunk_kwargs': {},\n 'sparsify_threshold': 0.99,\n 'max_amplitude' : 1.5,\n 'min_amplitude' : 0.5,\n 'use_sparse_matrix_threshold' : 0.25,\n 'progess_bar_steps' : True,\n }\n\n @classmethod\n def _sparsify_template(cls, template, sparsify_threshold, noise_levels):\n\n is_silent = template.std(0) < 0.25*noise_levels\n template[:, is_silent] = 0\n\n channel_norms = np.linalg.norm(template, axis=0)**2\n total_norm = np.linalg.norm(template)**2\n\n idx = np.argsort(channel_norms)[::-1]\n explained_norms = np.cumsum(channel_norms[idx]/total_norm)\n channel = np.searchsorted(explained_norms, sparsify_threshold)\n active_channels = np.sort(idx[:channel])\n template[:, idx[channel:]] = 0\n return template, active_channels\n\n @classmethod\n def _prepare_templates(cls, d):\n \n waveform_extractor = d['waveform_extractor']\n nb_samples = d['nb_samples']\n nb_channels = d['nb_channels']\n nb_templates = d['nb_templates']\n max_amplitude = d['max_amplitude']\n min_amplitude = d['min_amplitude']\n use_sparse_matrix_threshold = d['use_sparse_matrix_threshold']\n\n d['norms'] = np.zeros(nb_templates, dtype=np.float32)\n\n all_units = list(d['waveform_extractor'].sorting.unit_ids)\n\n templates = waveform_extractor.get_all_templates(mode='median').copy()\n\n d['sparsities'] = {}\n \n for count, unit_id in enumerate(all_units):\n \n templates[count], active_channels = cls._sparsify_template(templates[count], d['sparsify_threshold'], d['noise_levels'])\n d['sparsities'][count] = active_channels\n \n d['norms'][count] = np.linalg.norm(templates[count])\n templates[count] /= d['norms'][count]\n\n templates = templates.reshape(nb_templates, -1)\n\n nnz = np.sum(templates != 0)/(nb_templates * nb_samples * nb_channels)\n if nnz <= use_sparse_matrix_threshold:\n templates = scipy.sparse.csr_matrix(templates)\n print(f'Templates are automatically sparsified (sparsity level is {nnz})')\n d['is_dense'] = False\n else:\n d['is_dense'] = True\n\n d['templates'] = templates\n\n return d\n\n @classmethod\n def _prepare_overlaps(cls, d):\n\n templates = d['templates']\n nb_samples = d['nb_samples']\n nb_channels = d['nb_channels']\n nb_templates = d['nb_templates']\n is_dense = d['is_dense']\n\n if not is_dense:\n dense_templates = templates.toarray()\n else:\n dense_templates = templates\n\n dense_templates = dense_templates.reshape(nb_templates, nb_samples, nb_channels)\n\n size = 2 * nb_samples - 1\n\n all_delays = list(range(nb_samples))\n if d['progess_bar_steps']:\n all_delays = tqdm(all_delays, desc='[1] compute overlaps')\n\n overlaps = {}\n \n for delay in all_delays:\n source = dense_templates[:, :delay, :].reshape(nb_templates, -1)\n target = dense_templates[:, nb_samples-delay:, :].reshape(nb_templates, -1)\n\n if delay > 0:\n overlaps[delay] = scipy.sparse.csr_matrix(source.dot(target.T))\n else:\n overlaps[delay] = scipy.sparse.csr_matrix((nb_templates, nb_templates), dtype=np.float32)\n \n if delay < nb_samples:\n overlaps[size - delay-1] = overlaps[delay].T.tocsr()\n\n new_overlaps = []\n for i in range(nb_templates):\n data = [overlaps[j][i, :].T for j in range(size)]\n data = scipy.sparse.hstack(data)\n new_overlaps += [data]\n \n d['overlaps'] = new_overlaps\n\n return d\n\n @classmethod\n def _mcc_error(cls, bounds, good, bad):\n fn = np.sum((good < bounds[0]) | (good > bounds[1]))\n fp = np.sum((bounds[0] <= bad) & (bad <= bounds[1]))\n tp = np.sum((bounds[0] <= good) & (good <= bounds[1]))\n tn = np.sum((bad < bounds[0]) | (bad > bounds[1]))\n denom = (tp+fp)*(tp+fn)*(tn+fp)*(tn+fn)\n if denom > 0:\n mcc = 1 - (tp*tn - fp*fn)/np.sqrt(denom)\n else:\n mcc = 1\n return mcc\n\n @classmethod\n def _cost_function_mcc(cls, bounds, good, bad, delta_amplitude, alpha):\n # We want a minimal error, with the larger bounds that are possible\n cost = alpha*cls._mcc_error(bounds, good, bad) + (1 - alpha)*np.abs((1 - (bounds[1] - bounds[0])/delta_amplitude))\n return cost\n\n @classmethod\n def _optimize_amplitudes(cls, noise_snippets, d):\n\n waveform_extractor = d['waveform_extractor']\n templates = d['templates']\n nb_templates = d['nb_templates']\n max_amplitude = d['max_amplitude']\n min_amplitude = d['min_amplitude']\n alpha = 0.5\n norms = d['norms']\n all_units = list(waveform_extractor.sorting.unit_ids)\n if d['progess_bar_steps']:\n all_units = tqdm(all_units, desc='[2] compute amplitudes')\n\n d['amplitudes'] = np.zeros((nb_templates, 2), dtype=np.float32)\n noise = templates.dot(noise_snippets)/norms[:, np.newaxis]\n\n all_amps = {}\n for count, unit_id in enumerate(all_units):\n w = waveform_extractor.get_waveforms(unit_id)\n snippets = w.reshape(w.shape[0], -1).T\n amps = templates.dot(snippets)/norms[:, np.newaxis]\n good = amps[count, :].flatten()\n\n sub_amps = amps[np.concatenate((np.arange(count), np.arange(count+1, nb_templates))), :]\n bad = sub_amps[sub_amps >= good]\n bad = np.concatenate((bad, noise[count]))\n cost_kwargs = [good, bad, max_amplitude - min_amplitude, alpha]\n cost_bounds = [(min_amplitude, 1), (1, max_amplitude)]\n res = scipy.optimize.differential_evolution(cls._cost_function_mcc, bounds=cost_bounds, args=cost_kwargs)\n d['amplitudes'][count] = res.x\n\n # import pylab as plt\n # plt.hist(good, 100, alpha=0.5)\n # plt.hist(bad, 100, alpha=0.5)\n # plt.hist(noise[count], 100, alpha=0.5)\n # ymin, ymax = plt.ylim()\n # plt.plot([res.x[0], res.x[0]], [ymin, ymax], 'k--')\n # plt.plot([res.x[1], res.x[1]], [ymin, ymax], 'k--')\n # plt.savefig('test_%d.png' %count)\n # plt.close()\n\n return d\n\n @classmethod\n def initialize_and_check_kwargs(cls, recording, kwargs):\n\n d = cls._default_params.copy()\n d.update(kwargs)\n\n assert isinstance(d['waveform_extractor'], WaveformExtractor)\n\n for v in ['sparsify_threshold', 'use_sparse_matrix_threshold']:\n assert (d[v] >= 0) and (d[v] <= 1), f'{v} should be in [0, 1]'\n \n d['nb_channels'] = d['waveform_extractor'].recording.get_num_channels()\n d['nb_samples'] = d['waveform_extractor'].nsamples\n d['nb_templates'] = len(d['waveform_extractor'].sorting.unit_ids)\n\n if d['noise_levels'] is None:\n print('CircusPeeler : noise should be computed outside')\n d['noise_levels'] = get_noise_levels(recording, **d['random_chunk_kwargs'])\n\n d['abs_threholds'] = d['noise_levels'] * d['detect_threshold']\n\n d = cls._prepare_templates(d)\n d = cls._prepare_overlaps(d)\n\n d['nbefore'] = d['waveform_extractor'].nbefore\n d['nafter'] = d['waveform_extractor'].nafter\n d['patch_sizes'] = (d['waveform_extractor'].nsamples, d['nb_channels'])\n d['sym_patch'] = d['nbefore'] == d['nafter']\n #d['jitter'] = int(1e-3*d['jitter'] * recording.get_sampling_frequency())\n\n nb_segments = recording.get_num_segments()\n if d['waveform_extractor']._params['max_spikes_per_unit'] is None:\n nb_snippets = 1000\n else:\n nb_snippets = 2*d['waveform_extractor']._params['max_spikes_per_unit']\n\n nb_chunks = nb_snippets // nb_segments\n noise_snippets = get_random_data_chunks(recording, num_chunks_per_segment=nb_chunks, chunk_size=d['nb_samples'], seed=42)\n noise_snippets = noise_snippets.reshape(nb_chunks, d['nb_samples'], d['nb_channels']).reshape(nb_chunks, -1).T\n d = cls._optimize_amplitudes(noise_snippets, d)\n\n return d \n\n @classmethod\n def serialize_method_kwargs(cls, kwargs):\n kwargs = dict(kwargs)\n # remove waveform_extractor\n kwargs.pop('waveform_extractor')\n return kwargs\n\n @classmethod\n def unserialize_in_worker(cls, kwargs):\n return kwargs\n\n @classmethod\n def get_margin(cls, recording, kwargs):\n margin = 2 * max(kwargs['nbefore'], kwargs['nafter'])\n return margin\n\n @classmethod\n def main_function(cls, traces, d):\n peak_sign = d['peak_sign']\n abs_threholds = d['abs_threholds']\n n_shifts = d['n_shifts']\n templates = d['templates']\n nb_templates = d['nb_templates']\n nb_channels = d['nb_channels']\n overlaps = d['overlaps']\n margin = d['margin']\n norms = d['norms']\n jitter = d['jitter']\n patch_sizes = d['patch_sizes']\n nb_samples = d['nafter'] + d['nbefore']\n neighbor_window = nb_samples - 1\n amplitudes = d['amplitudes']\n sym_patch = d['sym_patch']\n sparsities = d['sparsities']\n is_dense = d['is_dense']\n \n peak_traces = traces[margin // 2:-margin // 2, :]\n peak_sample_ind, peak_chan_ind = detect_peaks_by_channel(peak_traces, peak_sign, abs_threholds, n_shifts)\n\n if jitter > 0:\n jittered_peaks = peak_sample_ind[:, np.newaxis] + np.arange(-jitter, jitter)\n jittered_channels = peak_chan_ind[:, np.newaxis] + np.zeros(2*jitter)\n mask = (jittered_peaks > 0) & (jittered_peaks < len(peak_traces))\n jittered_peaks = jittered_peaks[mask]\n jittered_channels = jittered_channels[mask]\n peak_sample_ind, unique_idx = np.unique(jittered_peaks, return_index=True)\n peak_chan_ind = jittered_channels[unique_idx]\n else:\n peak_sample_ind, unique_idx = np.unique(peak_sample_ind, return_index=True)\n peak_chan_ind = peak_chan_ind[unique_idx]\n\n nb_peaks = len(peak_sample_ind)\n\n if sym_patch:\n snippets = extract_patches_2d(traces, patch_sizes)[peak_sample_ind]\n peak_sample_ind += margin // 2\n else:\n peak_sample_ind += margin // 2\n snippet_window = np.arange(-d['nbefore'], d['nafter'])\n snippets = traces[peak_sample_ind[:, np.newaxis] + snippet_window]\n\n if nb_peaks > 0:\n snippets = snippets.reshape(nb_peaks, -1)\n scalar_products = templates.dot(snippets.T)\n else:\n scalar_products = np.zeros((nb_templates, 0), dtype=np.float32)\n\n nb_spikes = 0\n spikes = np.empty(scalar_products.size, dtype=spike_dtype)\n idx_lookup = np.arange(scalar_products.size).reshape(nb_templates, -1)\n\n min_sps = (amplitudes[:, 0] * norms)[:, np.newaxis]\n max_sps = (amplitudes[:, 1] * norms)[:, np.newaxis]\n\n is_valid = (scalar_products > min_sps) & (scalar_products < max_sps)\n\n cached_overlaps = {}\n\n while np.any(is_valid):\n\n best_amplitude_ind = scalar_products[is_valid].argmax()\n best_cluster_ind, peak_index = np.unravel_index(idx_lookup[is_valid][best_amplitude_ind], idx_lookup.shape)\n\n best_amplitude = scalar_products[best_cluster_ind, peak_index]\n best_peak_sample_ind = peak_sample_ind[peak_index]\n best_peak_chan_ind = peak_chan_ind[peak_index]\n\n peak_data = peak_sample_ind - peak_sample_ind[peak_index]\n is_valid = np.searchsorted(peak_data, [-neighbor_window, neighbor_window + 1])\n idx_neighbor = peak_data[is_valid[0]:is_valid[1]] + neighbor_window\n\n if not best_cluster_ind in cached_overlaps.keys():\n cached_overlaps[best_cluster_ind] = overlaps[best_cluster_ind].toarray()\n\n to_add = -best_amplitude * cached_overlaps[best_cluster_ind][:, idx_neighbor]\n\n scalar_products[:, is_valid[0]:is_valid[1]] += to_add\n scalar_products[best_cluster_ind, is_valid[0]:is_valid[1]] = -np.inf\n\n spikes['sample_ind'][nb_spikes] = best_peak_sample_ind\n spikes['channel_ind'][nb_spikes] = best_peak_chan_ind\n spikes['cluster_ind'][nb_spikes] = best_cluster_ind\n spikes['amplitude'][nb_spikes] = best_amplitude\n nb_spikes += 1\n\n is_valid = (scalar_products > min_sps) & (scalar_products < max_sps)\n\n spikes['amplitude'][:nb_spikes] /= norms[spikes['cluster_ind'][:nb_spikes]]\n \n spikes = spikes[:nb_spikes]\n order = np.argsort(spikes['sample_ind'])\n spikes = spikes[order]\n\n return spikes\n\n\n\ntemplate_matching_methods = {\n 'naive' : NaiveMatching,\n 'tridesclous' : TridesclousPeeler,\n 'circus' : CircusPeeler,\n 'circus-omp' : CircusOMPPeeler\n}\n" ]
[ [ "scipy.linalg.get_blas_funcs", "numpy.sqrt", "numpy.cumsum", "numpy.concatenate", "numpy.argmin", "numpy.any", "numpy.searchsorted", "numpy.where", "scipy.linalg.solve_triangular", "scipy.optimize.differential_evolution", "numpy.unique", "numpy.arange", "numpy.finfo", "numpy.zeros", "numpy.unravel_index", "sklearn.feature_extraction.image.extract_patches_2d", "numpy.nonzero", "scipy.signal.fftconvolve", "scipy.spatial.distance.cdist", "scipy.sparse.csr_matrix", "numpy.argsort", "scipy.sparse.hstack", "numpy.array", "numpy.sum", "scipy.linalg.get_lapack_funcs", "numpy.abs", "numpy.linalg.norm", "numpy.sort", "numpy.empty" ] ]
18bce1151/proj
[ "96c0a299ccaec29a02a9486d192a7215f5a12566" ]
[ "Diabetes_API/app.py" ]
[ "from flask import Flask, render_template, url_for, flash, redirect\r\nimport joblib\r\nfrom flask import request\r\nimport numpy as np\r\n\r\napp = Flask(__name__, template_folder='templates')\r\n\r\[email protected](\"/\")\r\n\r\[email protected](\"/Diabetes\")\r\ndef cancer():\r\n return render_template(\"diabetes.html\")\r\n\r\ndef ValuePredictor(to_predict_list, size):\r\n to_predict = np.array(to_predict_list).reshape(1,size)\r\n if(size==6):\r\n loaded_model = joblib.load(r'C:\\Users\\Mahesh Sharma\\Desktop\\HealthApp\\Indivisual_Deployment\\Diabetes_API\\diabetes_model.pkl')\r\n result = loaded_model.predict(to_predict)\r\n return result[0]\r\n\r\[email protected]('/predict', methods = [\"POST\"])\r\ndef predict():\r\n if request.method == \"POST\":\r\n to_predict_list = request.form.to_dict()\r\n to_predict_list = list(to_predict_list.values())\r\n to_predict_list = list(map(float, to_predict_list))\r\n #diabetes\r\n if(len(to_predict_list)==6):\r\n result = ValuePredictor(to_predict_list,6)\r\n \r\n if(int(result)==1):\r\n prediction = \"Sorry you chances of getting the disease. Please consult the doctor immediately\"\r\n else:\r\n prediction = \"No need to fear. You have no dangerous symptoms of the disease\"\r\n return(render_template(\"result.html\", prediction_text=prediction)) \r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=True)\r\n" ]
[ [ "numpy.array" ] ]
ChrisQiqiang/mxnet-combination
[ "015c02f8fa1b22133202e1c70488c439cd9e726d" ]
[ "python/mxnet/base.py" ]
[ "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n# coding: utf-8\n# pylint: disable=invalid-name, no-member, trailing-comma-tuple, bad-mcs-classmethod-argument, unnecessary-pass, too-many-lines, wrong-import-position\n\"\"\"ctypes library of mxnet and helper functions.\"\"\"\nfrom __future__ import absolute_import\n\nimport re\nimport atexit\nimport ctypes\nimport os\nimport sys\nimport inspect\nimport platform\nimport numpy as _np\n\nfrom . import libinfo\n\n__all__ = ['MXNetError']\n#----------------------------\n# library loading\n#----------------------------\n\n# pylint: disable=pointless-statement\ntry:\n basestring\n long\nexcept NameError:\n basestring = str\n long = int\n# pylint: enable=pointless-statement\n\ninteger_types = (int, long, _np.int32, _np.int64)\nnumeric_types = (float, int, long, _np.generic)\nstring_types = basestring,\n\nif sys.version_info[0] > 2:\n # this function is needed for python3\n # to convert ctypes.char_p .value back to python str\n py_str = lambda x: x.decode('utf-8')\nelse:\n py_str = lambda x: x\n\n\ndef data_dir_default():\n \"\"\"\n\n :return: default data directory depending on the platform and environment variables\n \"\"\"\n system = platform.system()\n if system == 'Windows':\n return os.path.join(os.environ.get('APPDATA'), 'mxnet')\n else:\n return os.path.join(os.path.expanduser(\"~\"), '.mxnet')\n\n\ndef data_dir():\n \"\"\"\n\n :return: data directory in the filesystem for storage, for example when downloading models\n \"\"\"\n return os.getenv('MXNET_HOME', data_dir_default())\n\n\nclass _NullType(object):\n \"\"\"Placeholder for arguments\"\"\"\n def __repr__(self):\n return '_Null'\n\n\n_Null = _NullType()\n\n\nclass MXNetError(Exception):\n \"\"\"Error that will be thrown by all mxnet functions.\"\"\"\n pass\n\n\nclass NotImplementedForSymbol(MXNetError):\n \"\"\"Error: Not implemented for symbol\"\"\"\n def __init__(self, function, alias, *args):\n super(NotImplementedForSymbol, self).__init__()\n self.function = function.__name__\n self.alias = alias\n self.args = [str(type(a)) for a in args]\n\n def __str__(self):\n msg = 'Function {}'.format(self.function)\n if self.alias:\n msg += ' (namely operator \"{}\")'.format(self.alias)\n if self.args:\n msg += ' with arguments ({})'.format(', '.join(self.args))\n msg += ' is not implemented for Symbol and only available in NDArray.'\n return msg\n\n\nclass NotSupportedForSparseNDArray(MXNetError):\n \"\"\"Error: Not supported for SparseNDArray\"\"\"\n def __init__(self, function, alias, *args):\n super(NotSupportedForSparseNDArray, self).__init__()\n self.function = function.__name__\n self.alias = alias\n self.args = [str(type(a)) for a in args]\n\n def __str__(self):\n msg = 'Function {}'.format(self.function)\n if self.alias:\n msg += ' (namely operator \"{}\")'.format(self.alias)\n if self.args:\n msg += ' with arguments ({})'.format(', '.join(self.args))\n msg += ' is not supported for SparseNDArray and only available in NDArray.'\n return msg\n\n\nclass MXCallbackList(ctypes.Structure):\n \"\"\"Structure that holds Callback information. Passed to CustomOpProp.\"\"\"\n _fields_ = [\n ('num_callbacks', ctypes.c_int),\n ('callbacks', ctypes.POINTER(ctypes.CFUNCTYPE(ctypes.c_int))),\n ('contexts', ctypes.POINTER(ctypes.c_void_p))\n ]\n\n\n# Please see: https://stackoverflow.com/questions/5189699/how-to-make-a-class-property\nclass _MXClassPropertyDescriptor(object):\n def __init__(self, fget, fset=None):\n self.fget = fget\n self.fset = fset\n\n def __get__(self, obj, clas=None):\n if clas is None:\n clas = type(obj)\n return self.fget.__get__(obj, clas)()\n\n def __set__(self, obj, value):\n if not self.fset:\n raise MXNetError(\"cannot use the setter: %s to set attribute\" % obj.__name__)\n if inspect.isclass(obj):\n type_ = obj\n obj = None\n else:\n type_ = type(obj)\n return self.fset.__get__(obj, type_)(value)\n\n def setter(self, func):\n if not isinstance(func, (classmethod, staticmethod)):\n func = classmethod(func)\n self.fset = func\n return self\n\n\nclass _MXClassPropertyMetaClass(type):\n def __setattr__(cls, key, value):\n obj = cls.__dict__.get(key)\n if obj and isinstance(obj, _MXClassPropertyDescriptor):\n return obj.__set__(cls, value)\n\n return super(_MXClassPropertyMetaClass, cls).__setattr__(key, value)\n\n\n# with_metaclass function obtained from: https://github.com/benjaminp/six/blob/master/six.py\n# pylint: disable=unused-argument\ndef with_metaclass(meta, *bases):\n \"\"\"Create a base class with a metaclass.\"\"\"\n # This requires a bit of explanation: the basic idea is to make a dummy\n # metaclass for one level of class instantiation that replaces itself with\n # the actual metaclass.\n class metaclass(type):\n\n def __new__(cls, name, this_bases, d):\n return meta(name, bases, d)\n\n @classmethod\n def __prepare__(cls, name, this_bases):\n return meta.__prepare__(name, bases)\n return type.__new__(metaclass, 'temporary_class', (), {})\n# pylint: enable=unused-argument\n\n\ndef classproperty(func):\n if not isinstance(func, (classmethod, staticmethod)):\n func = classmethod(func)\n\n return _MXClassPropertyDescriptor(func)\n\n\ndef _load_lib():\n \"\"\"Load library by searching possible path.\"\"\"\n lib_path = libinfo.find_lib_path()\n lib = ctypes.CDLL(lib_path[0], ctypes.RTLD_LOCAL)\n # DMatrix functions\n lib.MXGetLastError.restype = ctypes.c_char_p\n return lib\n\n\n# version number\n__version__ = libinfo.__version__\n# library instance of mxnet\n_LIB = _load_lib()\n\n# type definitions\nmx_int = ctypes.c_int\nmx_uint = ctypes.c_uint\nmx_int64 = ctypes.c_int64\nmx_float = ctypes.c_float\nmx_float_p = ctypes.POINTER(mx_float)\nmx_real_t = _np.float32\nNDArrayHandle = ctypes.c_void_p\nFunctionHandle = ctypes.c_void_p\nOpHandle = ctypes.c_void_p\nCachedOpHandle = ctypes.c_void_p\nSymbolHandle = ctypes.c_void_p\nExecutorHandle = ctypes.c_void_p\nDataIterCreatorHandle = ctypes.c_void_p\nDataIterHandle = ctypes.c_void_p\nKVStoreHandle = ctypes.c_void_p\nRecordIOHandle = ctypes.c_void_p\nRtcHandle = ctypes.c_void_p\nCudaModuleHandle = ctypes.c_void_p\nCudaKernelHandle = ctypes.c_void_p\nProfileHandle = ctypes.c_void_p\nDLPackHandle = ctypes.c_void_p\n\n\n#----------------------------\n# helper function definition\n#----------------------------\ndef check_call(ret):\n \"\"\"Check the return value of C API call.\n\n This function will raise an exception when an error occurs.\n Wrap every API call with this function.\n\n Parameters\n ----------\n ret : int\n return value from API calls.\n \"\"\"\n if ret != 0:\n raise MXNetError(py_str(_LIB.MXGetLastError()))\n\n\nif sys.version_info[0] < 3:\n def c_str(string):\n \"\"\"Create ctypes char * from a Python string.\n\n Parameters\n ----------\n string : string type\n Python string.\n\n Returns\n -------\n str : c_char_p\n A char pointer that can be passed to C API.\n\n Examples\n --------\n >>> x = mx.base.c_str(\"Hello, World\")\n >>> print x.value\n Hello, World\n \"\"\"\n return ctypes.c_char_p(string)\n\n def c_str_array(strings):\n \"\"\"Create ctypes const char ** from a list of Python strings.\n\n Parameters\n ----------\n strings : list of string\n Python strings.\n\n Returns\n -------\n (ctypes.c_char_p * len(strings))\n A const char ** pointer that can be passed to C API.\n \"\"\"\n arr = (ctypes.c_char_p * len(strings))()\n arr[:] = strings\n return arr\n\nelse:\n def c_str(string):\n \"\"\"Create ctypes char * from a Python string.\n\n Parameters\n ----------\n string : string type\n Python string.\n\n Returns\n -------\n str : c_char_p\n A char pointer that can be passed to C API.\n\n Examples\n --------\n >>> x = mx.base.c_str(\"Hello, World\")\n >>> print(x.value)\n b\"Hello, World\"\n \"\"\"\n return ctypes.c_char_p(string.encode('utf-8'))\n\n def c_str_array(strings):\n \"\"\"Create ctypes const char ** from a list of Python strings.\n\n Parameters\n ----------\n strings : list of string\n Python strings.\n\n Returns\n -------\n (ctypes.c_char_p * len(strings))\n A const char ** pointer that can be passed to C API.\n \"\"\"\n arr = (ctypes.c_char_p * len(strings))()\n arr[:] = [s.encode('utf-8') for s in strings]\n return arr\n\n\ndef c_array(ctype, values):\n \"\"\"Create ctypes array from a Python array.\n\n Parameters\n ----------\n ctype : ctypes data type\n Data type of the array we want to convert to, such as mx_float.\n\n values : tuple or list\n Data content.\n\n Returns\n -------\n out : ctypes array\n Created ctypes array.\n\n Examples\n --------\n >>> x = mx.base.c_array(mx.base.mx_float, [1, 2, 3])\n >>> print len(x)\n 3\n >>> x[1]\n 2.0\n \"\"\"\n out = (ctype * len(values))()\n out[:] = values\n return out\n\n\ndef c_array_buf(ctype, buf):\n \"\"\"Create ctypes array from a Python buffer.\n For primitive types, using the buffer created with array.array is faster\n than a c_array call.\n\n Parameters\n ----------\n ctype : ctypes data type\n Data type of the array we want to convert to, such as mx_float.\n\n buf : buffer type\n Data content.\n\n Returns\n -------\n out : ctypes array\n Created ctypes array.\n\n Examples\n --------\n >>> x = mx.base.c_array_buf(mx.base.mx_float, array.array('i', [1, 2, 3]))\n >>> print len(x)\n 3\n >>> x[1]\n 2.0\n \"\"\"\n return (ctype * len(buf)).from_buffer(buf)\n\n\ndef c_handle_array(objs):\n \"\"\"Create ctypes const void ** from a list of MXNet objects with handles.\n\n Parameters\n ----------\n objs : list of NDArray/Symbol.\n MXNet objects.\n\n Returns\n -------\n (ctypes.c_void_p * len(objs))\n A void ** pointer that can be passed to C API.\n \"\"\"\n arr = (ctypes.c_void_p * len(objs))()\n arr[:] = [o.handle for o in objs]\n return arr\n\n\ndef ctypes2buffer(cptr, length):\n \"\"\"Convert ctypes pointer to buffer type.\n\n Parameters\n ----------\n cptr : ctypes.POINTER(ctypes.c_char)\n Pointer to the raw memory region.\n length : int\n The length of the buffer.\n\n Returns\n -------\n buffer : bytearray\n The raw byte memory buffer.\n \"\"\"\n if not isinstance(cptr, ctypes.POINTER(ctypes.c_char)):\n raise TypeError('expected char pointer')\n res = bytearray(length)\n rptr = (ctypes.c_char * length).from_buffer(res)\n if not ctypes.memmove(rptr, cptr, length):\n raise RuntimeError('memmove failed')\n return res\n\n\ndef ctypes2numpy_shared(cptr, shape):\n \"\"\"Convert a ctypes pointer to a numpy array.\n\n The resulting NumPy array shares the memory with the pointer.\n\n Parameters\n ----------\n cptr : ctypes.POINTER(mx_float)\n pointer to the memory region\n\n shape : tuple\n Shape of target `NDArray`.\n\n Returns\n -------\n out : numpy_array\n A numpy array : numpy array.\n \"\"\"\n if not isinstance(cptr, ctypes.POINTER(mx_float)):\n raise RuntimeError('expected float pointer')\n size = 1\n for s in shape:\n size *= s\n dbuffer = (mx_float * size).from_address(ctypes.addressof(cptr.contents))\n return _np.frombuffer(dbuffer, dtype=_np.float32).reshape(shape)\n\n\ndef build_param_doc(arg_names, arg_types, arg_descs, remove_dup=True):\n \"\"\"Build argument docs in python style.\n\n arg_names : list of str\n Argument names.\n\n arg_types : list of str\n Argument type information.\n\n arg_descs : list of str\n Argument description information.\n\n remove_dup : boolean, optional\n Whether remove duplication or not.\n\n Returns\n -------\n docstr : str\n Python docstring of parameter sections.\n \"\"\"\n param_keys = set()\n param_str = []\n for key, type_info, desc in zip(arg_names, arg_types, arg_descs):\n if key in param_keys and remove_dup:\n continue\n if key == 'num_args':\n continue\n param_keys.add(key)\n ret = '%s : %s' % (key, type_info)\n if len(desc) != 0:\n ret += '\\n ' + desc\n param_str.append(ret)\n doc_str = ('Parameters\\n' +\n '----------\\n' +\n '%s\\n')\n doc_str = doc_str % ('\\n'.join(param_str))\n return doc_str\n\n\ndef _notify_shutdown():\n \"\"\"Notify MXNet about a shutdown.\"\"\"\n check_call(_LIB.MXNotifyShutdown())\n\n\natexit.register(_notify_shutdown)\n\n\ndef add_fileline_to_docstring(module, incursive=True):\n \"\"\"Append the definition position to each function contained in module.\n\n Examples\n --------\n # Put the following codes at the end of a file\n add_fileline_to_docstring(__name__)\n \"\"\"\n\n def _add_fileline(obj):\n \"\"\"Add fileinto to a object.\n \"\"\"\n if obj.__doc__ is None or 'From:' in obj.__doc__:\n return\n fname = inspect.getsourcefile(obj)\n if fname is None:\n return\n try:\n line = inspect.getsourcelines(obj)[-1]\n except IOError:\n return\n obj.__doc__ += '\\n\\nFrom:%s:%d' % (fname, line)\n\n if isinstance(module, str):\n module = sys.modules[module]\n for _, obj in inspect.getmembers(module):\n if inspect.isbuiltin(obj):\n continue\n if inspect.isfunction(obj):\n _add_fileline(obj)\n if inspect.ismethod(obj):\n _add_fileline(obj.__func__)\n if inspect.isclass(obj) and incursive:\n add_fileline_to_docstring(obj, False)\n\n\ndef _as_list(obj):\n \"\"\"A utility function that converts the argument to a list if it is not already.\n\n Parameters\n ----------\n obj : object\n\n Returns\n -------\n If `obj` is a list or tuple, return it. Otherwise, return `[obj]` as a\n single-element list.\n\n \"\"\"\n if isinstance(obj, (list, tuple)):\n return obj\n else:\n return [obj]\n\n\n_OP_NAME_PREFIX_LIST = ['_contrib_', '_linalg_', '_sparse_', '_image_', '_random_']\n\n\ndef _get_op_name_prefix(op_name):\n \"\"\"\n Check whether the given op_name starts with any words in `_OP_NAME_PREFIX_LIST`.\n If found, return the prefix; else, return an empty string.\n \"\"\"\n for prefix in _OP_NAME_PREFIX_LIST:\n if op_name.startswith(prefix):\n return prefix\n return \"\"\n\n\n# pylint: enable=invalid-name\ndef _init_op_module(root_namespace, module_name, make_op_func):\n \"\"\"\n Registers op functions created by `make_op_func` under\n `root_namespace.module_name.[submodule_name]`,\n where `submodule_name` is one of `_OP_SUBMODULE_NAME_LIST`.\n\n Parameters\n ----------\n root_namespace : str\n Top level module name, `mxnet` in the current cases.\n module_name : str\n Second level module name, `ndarray` and `symbol` in the current cases.\n make_op_func : function\n Function for creating op functions for `ndarray` and `symbol` modules.\n \"\"\"\n plist = ctypes.POINTER(ctypes.c_char_p)()\n size = ctypes.c_uint()\n\n check_call(_LIB.MXListAllOpNames(ctypes.byref(size),\n ctypes.byref(plist)))\n op_names = []\n for i in range(size.value):\n op_name = py_str(plist[i])\n if not _is_np_op(op_name):\n op_names.append(op_name)\n\n module_op = sys.modules[\"%s.%s.op\" % (root_namespace, module_name)]\n module_internal = sys.modules[\"%s.%s._internal\" % (root_namespace, module_name)]\n # contrib module in the old format (deprecated)\n # kept here for backward compatibility\n # use mx.nd.contrib or mx.sym.contrib from now on\n contrib_module_name_old = \"%s.contrib.%s\" % (root_namespace, module_name)\n contrib_module_old = sys.modules[contrib_module_name_old]\n submodule_dict = {}\n for op_name_prefix in _OP_NAME_PREFIX_LIST:\n submodule_dict[op_name_prefix] =\\\n sys.modules[\"%s.%s.%s\" % (root_namespace, module_name, op_name_prefix[1:-1])]\n for name in op_names:\n hdl = OpHandle()\n check_call(_LIB.NNGetOpHandle(c_str(name), ctypes.byref(hdl)))\n op_name_prefix = _get_op_name_prefix(name)\n module_name_local = module_name\n if len(op_name_prefix) > 0:\n if op_name_prefix != '_random_' or name.endswith('_like'):\n func_name = name[len(op_name_prefix):]\n cur_module = submodule_dict[op_name_prefix]\n module_name_local = \"%s.%s.%s\" % (root_namespace, module_name, op_name_prefix[1:-1])\n else:\n func_name = name\n cur_module = module_internal\n elif name.startswith('_'):\n func_name = name\n cur_module = module_internal\n else:\n func_name = name\n cur_module = module_op\n\n function = make_op_func(hdl, name, func_name)\n function.__module__ = module_name_local\n setattr(cur_module, function.__name__, function)\n cur_module.__all__.append(function.__name__)\n\n if op_name_prefix == '_contrib_':\n hdl = OpHandle()\n check_call(_LIB.NNGetOpHandle(c_str(name), ctypes.byref(hdl)))\n func_name = name[len(op_name_prefix):]\n\n function = make_op_func(hdl, name, func_name)\n function.__module__ = contrib_module_name_old\n setattr(contrib_module_old, function.__name__, function)\n contrib_module_old.__all__.append(function.__name__)\n\n\ndef _generate_op_module_signature(root_namespace, module_name, op_code_gen_func):\n \"\"\"\n Generate op functions created by `op_code_gen_func` and write to the source file\n of `root_namespace.module_name.[submodule_name]`,\n where `submodule_name` is one of `_OP_SUBMODULE_NAME_LIST`.\n\n Parameters\n ----------\n root_namespace : str\n Top level module name, `mxnet` in the current cases.\n module_name : str\n Second level module name, `ndarray` and `symbol` in the current cases.\n op_code_gen_func : function\n Function for creating op functions for `ndarray` and `symbol` modules.\n \"\"\"\n def get_module_file(module_name):\n \"\"\"Return the generated module file based on module name.\"\"\"\n path = os.path.dirname(__file__)\n module_path = module_name.split('.')\n module_path[-1] = 'gen_' + module_path[-1]\n file_name = os.path.join(path, '..', *module_path) + '.py'\n module_file = open(file_name, 'w', encoding=\"utf-8\")\n dependencies = {'symbol': ['from ._internal import SymbolBase',\n 'from ..base import _Null'],\n 'ndarray': ['from ._internal import NDArrayBase',\n 'from ..base import _Null']}\n module_file.write('# coding: utf-8')\n module_file.write('# File content is auto-generated. Do not modify.' + os.linesep)\n module_file.write('# pylint: skip-file' + os.linesep)\n module_file.write(os.linesep.join(dependencies[module_name.split('.')[1]]))\n return module_file\n\n def write_all_str(module_file, module_all_list):\n \"\"\"Write the proper __all__ based on available operators.\"\"\"\n module_file.write(os.linesep)\n module_file.write(os.linesep)\n all_str = '__all__ = [' + ', '.join([\"'%s'\"%s for s in module_all_list]) + ']'\n module_file.write(all_str)\n\n plist = ctypes.POINTER(ctypes.c_char_p)()\n size = ctypes.c_uint()\n\n check_call(_LIB.MXListAllOpNames(ctypes.byref(size),\n ctypes.byref(plist)))\n op_names = []\n for i in range(size.value):\n op_name = py_str(plist[i])\n if not _is_np_op(op_name):\n op_names.append(op_name)\n\n module_op_file = get_module_file(\"%s.%s.op\" % (root_namespace, module_name))\n module_op_all = []\n module_internal_file = get_module_file(\"%s.%s._internal\"%(root_namespace, module_name))\n module_internal_all = []\n submodule_dict = {}\n for op_name_prefix in _OP_NAME_PREFIX_LIST:\n submodule_dict[op_name_prefix] =\\\n (get_module_file(\"%s.%s.%s\" % (root_namespace, module_name,\n op_name_prefix[1:-1])), [])\n for name in op_names:\n hdl = OpHandle()\n check_call(_LIB.NNGetOpHandle(c_str(name), ctypes.byref(hdl)))\n op_name_prefix = _get_op_name_prefix(name)\n if len(op_name_prefix) > 0:\n func_name = name[len(op_name_prefix):]\n cur_module_file, cur_module_all = submodule_dict[op_name_prefix]\n elif name.startswith('_'):\n func_name = name\n cur_module_file = module_internal_file\n cur_module_all = module_internal_all\n else:\n func_name = name\n cur_module_file = module_op_file\n cur_module_all = module_op_all\n\n code, _ = op_code_gen_func(hdl, name, func_name, True)\n cur_module_file.write(os.linesep)\n cur_module_file.write(code)\n cur_module_all.append(func_name)\n\n for (submodule_f, submodule_all) in submodule_dict.values():\n write_all_str(submodule_f, submodule_all)\n submodule_f.close()\n write_all_str(module_op_file, module_op_all)\n module_op_file.close()\n write_all_str(module_internal_file, module_internal_all)\n module_internal_file.close()\n\nctypes.pythonapi.PyCapsule_New.restype = ctypes.py_object\nctypes.pythonapi.PyCapsule_GetPointer.restype = ctypes.c_void_p\n\n\n_NP_OP_PREFIX = '_np_'\n_NP_OP_SUBMODULE_LIST = ['_random_', '_linalg_']\n\n_NP_EXT_OP_PREFIX = '_npx_'\n_NP_EXT_OP_SUBMODULE_LIST = ['_image_']\n\n_NP_INTERNAL_OP_PREFIX = '_npi_'\n\n\ndef _is_np_op(op_name):\n return op_name.startswith(_NP_OP_PREFIX) or op_name.startswith(_NP_EXT_OP_PREFIX)\\\n or op_name.startswith(_NP_INTERNAL_OP_PREFIX)\n\n\ndef _get_op_submodule_name(op_name, op_name_prefix, submodule_name_list):\n \"\"\"Get the submodule name of a specific op\"\"\"\n assert op_name.startswith(op_name_prefix)\n for submodule_name in submodule_name_list:\n if op_name[len(op_name_prefix):].startswith(submodule_name):\n return submodule_name\n return \"\"\n\n\ndef _init_np_op_module(root_module_name, np_module_name, mx_module_name, make_op_func):\n \"\"\"\n Register numpy operators in namespaces `mxnet.numpy`, `mxnet.ndarray.numpy`\n and `mxnet.symbol.numpy`. They are used in imperative mode, Gluon APIs w/o hybridization,\n and Gluon APIs w/ hybridization, respectively. Essentially, operators with the same name\n registered in three namespaces, respectively share the same functionality in C++ backend.\n Different namespaces are needed for dispatching operator calls in Gluon's `HybridBlock` by `F`.\n\n Parameters\n ----------\n root_module_name : str\n Top level module name, `mxnet` in the current cases.\n np_module_name : str\n Second level module name, `numpy` or `numpy_extension` in the current case.\n make_op_func : function\n Function for creating op functions.\n \"\"\"\n from . import _numpy_op_doc as _np_op_doc\n if np_module_name == 'numpy':\n op_name_prefix = _NP_OP_PREFIX\n submodule_name_list = _NP_OP_SUBMODULE_LIST\n elif np_module_name == 'numpy_extension':\n op_name_prefix = _NP_EXT_OP_PREFIX\n submodule_name_list = _NP_EXT_OP_SUBMODULE_LIST\n elif np_module_name == 'numpy._internal':\n op_name_prefix = _NP_INTERNAL_OP_PREFIX\n submodule_name_list = []\n else:\n raise ValueError('unsupported np module name {}'.format(np_module_name))\n\n plist = ctypes.POINTER(ctypes.c_char_p)()\n size = ctypes.c_uint()\n check_call(_LIB.MXListAllOpNames(ctypes.byref(size), ctypes.byref(plist)))\n op_names = []\n for i in range(size.value):\n name = py_str(plist[i])\n if name.startswith(op_name_prefix):\n op_names.append(name)\n\n if mx_module_name is None:\n # register np/npx ops for imperative programming\n op_module_name = \"%s.%s._op\" % (root_module_name, np_module_name) # e.g. mxnet.numpy._op\n op_submodule_name = \"%s.%s\" % (root_module_name, np_module_name) # e.g. mxnet.numpy.random\n elif mx_module_name in ('ndarray', 'symbol'):\n # register numpy internal ops and np/npx ops for use in Gluon\n # np internal ops are registered in mxnet.ndarray/symbol.numpy._internal\n # np ops are registered in mxnet.ndarray/symbol.numpy._op\n # npx ops are registered in mxnet.ndarray/symbol.numpy_extension._op\n op_module_name = \"%s.%s.%s\" % (root_module_name, mx_module_name, np_module_name)\n if op_name_prefix != _NP_INTERNAL_OP_PREFIX:\n op_module_name += '._op'\n # e.g. mxnet.symbol.numpy.random\n op_submodule_name = \"%s.%s.%s\" % (root_module_name, mx_module_name, np_module_name)\n else:\n raise ValueError('unsupported mxnet module {}'.format(mx_module_name))\n op_submodule_name += '.%s'\n\n op_module = sys.modules[op_module_name]\n submodule_dict = {}\n for submodule_name in submodule_name_list:\n submodule_dict[submodule_name] = sys.modules[op_submodule_name % submodule_name[1:-1]]\n for name in op_names:\n hdl = OpHandle()\n check_call(_LIB.NNGetOpHandle(c_str(name), ctypes.byref(hdl)))\n submodule_name = _get_op_submodule_name(name, op_name_prefix, submodule_name_list)\n if len(submodule_name) > 0:\n func_name = name[(len(op_name_prefix) + len(submodule_name)):]\n cur_module = submodule_dict[submodule_name]\n module_name_local = op_submodule_name % submodule_name[1:-1]\n else:\n func_name = name[len(op_name_prefix):]\n cur_module = op_module\n module_name_local =\\\n op_module_name[:-len('._op')] if op_module_name.endswith('._op') else op_module_name\n\n function = make_op_func(hdl, name, func_name)\n function.__module__ = module_name_local\n setattr(cur_module, function.__name__, function)\n cur_module.__all__.append(function.__name__)\n\n if hasattr(_np_op_doc, name):\n function.__doc__ = getattr(_np_op_doc, name).__doc__\n else:\n function.__doc__ = re.sub('NDArray', 'ndarray', function.__doc__)\n" ]
[ [ "numpy.frombuffer" ] ]
evemorgen/GdzieJestMojTramwajProject
[ "65a090ae4222053a2a0a1b145df5196f3658065c" ]
[ "backend/schedule_worker/utils/generate_graph.py" ]
[ "import os\nimport logging\nimport networkx as nx\nimport matplotlib.pyplot as plt\nimport json\nfrom geopy.distance import vincenty\nfrom collections import deque\n\nfrom db import MpkDb as DbApi\nfrom utils import Config\n\n\ndef czy_skrzyzowanie(przystanek, skrzyzowania, wariant, punkty):\n for skrzyzowanie in skrzyzowania:\n if przystanek in punkty[skrzyzowanie]['between'] and wariant[1][wariant[1].index(przystanek) + 1] in punkty[skrzyzowanie]['between']:\n return skrzyzowanie\n return None\n\n\ndef generate_graph():\n config = Config()\n dbapi = DbApi()\n #test = Przystanki()\n linie = [str(linia) for linia in config['lines']]\n\n #logging.info(test.petle)\n\n dokladne_linie = {klucz: [] for klucz in linie}\n for linia in linie:\n warianty = dbapi.get_variants_for_line(linia)\n for wariant in warianty:\n przystanki = dbapi.get_stops_for_variant(wariant)\n tupla = tuple([wariant, przystanki])\n dokladne_linie[linia].append(tupla)\n\n with open(os.environ['TRAM_ROOT'] + '/data/przystanki_0_159.json', 'r') as plik:\n punkty = json.load(plik)\n\n ogarniete = {klucz: (float(punkty[klucz]['y']) * (10**6), float(punkty[klucz]['x']) * (10**6)) for klucz in punkty}\n petle = {k: v for k, v in ogarniete.items() if punkty[k]['petla'] is True}\n skrzyzowania = {k: v for k, v in ogarniete.items() if punkty[k]['skrzyzowanie'] is True}\n przystanki = {k: v for k, v in ogarniete.items() if punkty[k]['przystanek'] is True}\n\n G = nx.Graph()\n\n G.add_nodes_from(ogarniete.keys())\n for n, p in ogarniete.items():\n G.node[n]['pos'] = p\n pos = nx.get_node_attributes(G, 'pos')\n\n offset = {}\n for k, v in pos.items():\n offset[k] = (v[0], v[1] - 500)\n\n plt.figure(3, figsize=(80, 80))\n nx.draw_networkx_nodes(G, pos, nodelist=przystanki, node_color='b', node_size=150)\n nx.draw_networkx_nodes(G, pos, nodelist=skrzyzowania, node_color='g', node_size=100)\n nx.draw_networkx_nodes(G, pos, nodelist=petle, node_color='r', node_size=200)\n nx.draw_networkx_labels(G, offset, font_size=12, font_family=('ubuntu', 'arial'))\n\n edges = {}\n for linia in linie:\n for wariant in dokladne_linie[linia]:\n for przystanek in wariant[1][:-1]:\n ze_skrzyzowaniem = czy_skrzyzowanie(przystanek, skrzyzowania, wariant, punkty)\n if ze_skrzyzowaniem is not None:\n kraw1 = tuple([przystanek, ze_skrzyzowaniem])\n if kraw1 in edges:\n edges[kraw1].append(linia)\n else:\n edges[kraw1] = [linia]\n else:\n kraw = tuple([przystanek, wariant[1][wariant[1].index(przystanek) + 1]])\n if kraw in edges:\n edges[kraw].append(linia)\n else:\n edges[kraw] = [linia]\n\n for edge, label in edges.items():\n first = (punkty[edge[0]]['x'], punkty[edge[0]]['y'])\n second = (punkty[edge[1]]['x'], punkty[edge[1]]['y'])\n logging.info('%s - %s: %s', edge[0], edge[1], vincenty(first, second).meters)\n G.add_edge(edge[0], edge[1], linie=label, kolejka_L=deque(), kolejka_R=deque(), odleglosc=vincenty(first, second).meters)\n nx.draw_networkx_edges(G, pos)\n # nx.draw_networkx_edge_labels(G, pos)\n\n plt.savefig(os.environ['TRAM_ROOT'] + '/data/graph.png', format='png', dpi=75)\n nx.write_yaml(G, os.environ['TRAM_ROOT'] + '/data/graph.yaml')\n" ]
[ [ "matplotlib.pyplot.savefig", "matplotlib.pyplot.figure" ] ]
embracesource-cv-com/keras-east
[ "0733a9a99c4446a30c8b8e1d62e102391f7a854a" ]
[ "east/utils/image_utils.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\n File Name: image\r\n Description : 图像处理工具类\r\n Author : mick.yi\r\n date: 2019/2/18\r\n\"\"\"\r\nimport skimage\r\nfrom skimage import io, transform\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport random\r\n\r\n\r\ndef load_image(image_path):\r\n \"\"\"\r\n 加载图像\r\n :param image_path: 图像路径\r\n :return: [h,w,3] numpy数组\r\n \"\"\"\r\n image = plt.imread(image_path)\r\n # 灰度图转为RGB\r\n if len(image.shape) == 2:\r\n image = np.expand_dims(image, axis=2)\r\n image = np.tile(image, (1, 1, 3))\r\n elif image.shape[-1] == 1:\r\n image = skimage.color.gray2rgb(image) # io.imread 报ValueError: Input image expected to be RGB, RGBA or gray\r\n # 标准化为0~255之间\r\n if image.dtype == np.float32:\r\n image *= 255\r\n image = image.astype(np.uint8)\r\n # 删除alpha通道\r\n return image[..., :3]\r\n\r\n\r\ndef resize_image_and_gt(image, output_size, gt_polygons=None):\r\n \"\"\"\r\n 按照输入大小缩放图像\r\n :param image:\r\n :param output_size:\r\n :param gt_polygons:\r\n :return:\r\n image: (H,W,3)\r\n image_meta: 元数据信息,详见compose_image_meta\r\n gt_boxes:图像缩放及padding后对于的GT 边框坐标 [N,(y1,x1,y2,x2)]\r\n \"\"\"\r\n original_shape = image.shape\r\n # resize图像,并获取相关元数据信息\r\n h, w, window, scale, padding = resize_meta(original_shape[0], original_shape[1], output_size)\r\n image = resize_image(image, h, w, padding)\r\n\r\n # 组合元数据信息\r\n image_meta = compose_image_meta(np.random.randint(10000), original_shape, image.shape,\r\n window, scale)\r\n # 根据缩放及padding调整GT边框\r\n if gt_polygons is not None and gt_polygons.shape[0] > 0:\r\n gt_polygons = adjust_polygons(gt_polygons, padding, scale)\r\n\r\n return image, image_meta, gt_polygons\r\n\r\n\r\ndef random_crop_image(image, gt_window):\r\n \"\"\"\r\n 随机裁剪图像\r\n :param image: [H,W,C]\r\n :param gt_window: 标注区域 (y1,x1,y2,x2)\r\n :return: 裁剪后的图像和裁剪窗口\r\n \"\"\"\r\n h, w = list(image.shape)[:2]\r\n y1, x1, y2, x2 = gt_window\r\n # 每边最多裁剪1/10\r\n crop_ratio = 0.1\r\n wy1 = np.random.randint(min(y1 + 1, h * crop_ratio))\r\n wx1 = np.random.randint(min(x1 + 1, w * crop_ratio))\r\n wy2 = h - np.random.randint(min(h - y2 + 1, h * crop_ratio))\r\n wx2 = w - np.random.randint(min(w - x2 + 1, w * crop_ratio))\r\n return image[wy1:wy2, wx1:wx2], [wy1, wx1, wy2, wx2]\r\n\r\n\r\ndef resize_image(image, h, w, padding):\r\n \"\"\"\r\n 缩放图像为正方形,指定长边大小,短边padding;\r\n :param image: numpy 数组(H,W,3)\r\n :param h: 缩放后的高度\r\n :param w: 缩放后的宽度\r\n :param padding:缩放后增加的padding\r\n :return: 缩放后的图像,元素图像的宽口位置,缩放尺寸,padding\r\n \"\"\"\r\n image_dtype = image.dtype\r\n image = transform.resize(image, (h, w), order=1, mode='constant',\r\n cval=0, clip=True, preserve_range=True)\r\n\r\n image = np.pad(image, padding, mode='constant', constant_values=0)\r\n return image.astype(image_dtype)\r\n\r\n\r\ndef resize_meta(h, w, max_dim):\r\n \"\"\"\r\n 计算resize的元数据信息\r\n :param h: 图像原始高度\r\n :param w: 图像原始宽度\r\n :param max_dim: 缩放后的边长\r\n :return:\r\n \"\"\"\r\n scale = max_dim / max(h, w) # 缩放尺寸\r\n # 新的高度和宽度\r\n h, w = round(h * scale), round(w * scale)\r\n\r\n # 计算padding\r\n top_pad = (max_dim - h) // 2\r\n bottom_pad = max_dim - h - top_pad\r\n left_pad = (max_dim - w) // 2\r\n right_pad = max_dim - w - left_pad\r\n padding = [(top_pad, bottom_pad), (left_pad, right_pad), (0, 0)]\r\n # 计算窗口\r\n window = (top_pad, left_pad, h + top_pad, w + left_pad) #\r\n return h, w, window, scale, padding\r\n\r\n\r\ndef compose_image_meta(image_id, original_image_shape, image_shape,\r\n window, scale):\r\n \"\"\"\r\n 组合图像元数据信息,返回numpy数据\r\n :param image_id:\r\n :param original_image_shape: 原始图像形状,tuple(H,W,3)\r\n :param image_shape: 缩放后图像形状tuple(H,W,3)\r\n :param window: 原始图像在缩放图像上的窗口位置(y1,x1,y2,x2)\r\n :param scale: 缩放因子\r\n :return:\r\n \"\"\"\r\n meta = np.array(\r\n [image_id] + # size=1\r\n list(original_image_shape) + # size=3\r\n list(image_shape) + # size=3\r\n list(window) + # size=4 (y1, x1, y2, x2) in image cooredinates\r\n [scale] # size=1\r\n )\r\n return meta\r\n\r\n\r\ndef parse_image_meta(meta):\r\n \"\"\"\r\n 解析图像元数据信息,注意输入是元数据信息数组\r\n :param meta: [12]\r\n :return:\r\n \"\"\"\r\n image_id = meta[0]\r\n original_image_shape = meta[1:4]\r\n image_shape = meta[4:7]\r\n window = meta[7:11] # (y1, x1, y2, x2) window of image in in pixels\r\n scale = meta[11]\r\n return {\r\n \"image_id\": image_id.astype(np.int32),\r\n \"original_image_shape\": original_image_shape.astype(np.int32),\r\n \"image_shape\": image_shape.astype(np.int32),\r\n \"window\": window.astype(np.int32),\r\n \"scale\": scale.astype(np.float32)\r\n }\r\n\r\n\r\ndef batch_parse_image_meta(meta):\r\n \"\"\"\r\n 解析图像元数据信息,注意输入是元数据信息数组\r\n :param meta: [batch,12]\r\n :return:\r\n \"\"\"\r\n image_id = meta[:, 0]\r\n original_image_shape = meta[:, 1:4]\r\n image_shape = meta[:, 4:7]\r\n window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels\r\n scale = meta[:, 11]\r\n return {\r\n \"image_id\": image_id.astype(np.int32),\r\n \"original_image_shape\": original_image_shape.astype(np.int32),\r\n \"image_shape\": image_shape.astype(np.int32),\r\n \"window\": window.astype(np.int32),\r\n \"scale\": scale.astype(np.float32)\r\n }\r\n\r\n\r\ndef adjust_box(boxes, padding, scale):\r\n \"\"\"\r\n 根据填充和缩放因子,调整boxes的值\r\n :param boxes: numpy 数组; GT boxes [N,(y1,x1,y2,x2)]\r\n :param padding: [(top_pad, bottom_pad), (left_pad, right_pad), (0, 0)]\r\n :param scale: 缩放因子\r\n :return:\r\n \"\"\"\r\n boxes = boxes * scale\r\n boxes[:, 0::2] += padding[0][0] # 高度padding\r\n boxes[:, 1::2] += padding[1][0] # 宽度padding\r\n return boxes\r\n\r\n\r\ndef adjust_polygons(polygons, padding, scale):\r\n \"\"\"\r\n 根据填充和缩放因子,调整四边形的值\r\n :param polygons: numpy 数组; GT polygons[N,4,(x,y)]\r\n :param padding: [(top_pad, bottom_pad), (left_pad, right_pad), (0, 0)]\r\n :param scale: 缩放因子\r\n :return:\r\n \"\"\"\r\n polygons = polygons * scale\r\n polygons[:, :, 1] += padding[0][0] # 高度padding\r\n polygons[:, :, 0] += padding[1][0] # 宽度padding\r\n return polygons\r\n\r\n\r\ndef recover_detect_boxes(boxes, window, scale):\r\n \"\"\"\r\n 将检测边框映射到原始图像上,去除padding和缩放\r\n :param boxes: numpy数组,[n,(y1,x1,y2,x2)]\r\n :param window: [(y1,x1,y2,x2)]\r\n :param scale: 标量\r\n :return:\r\n \"\"\"\r\n # 去除padding\r\n boxes[:, 0::2] -= window[0]\r\n boxes[:, 1::2] -= window[1]\r\n # 还原缩放\r\n boxes /= scale\r\n return boxes\r\n\r\n\r\ndef clip_polygons(polygons, window):\r\n \"\"\"\r\n 将检测四边形映射到原始图像上,去除padding和缩放\r\n :param polygons: numpy数组,[n,4,(x,y)]\r\n :param window: [(y1,x1,y2,x2)]\r\n :return:\r\n \"\"\"\r\n if len(polygons) == 0:\r\n return polygons\r\n y1, x1, y2, x2 = window\r\n # 保证不越界\r\n polygons[:, :, 1] = np.maximum(y1, np.minimum(y2, polygons[:, :, 1]))\r\n polygons[:, :, 0] = np.maximum(x1, np.minimum(x2, polygons[:, :, 0]))\r\n return polygons\r\n\r\n\r\ndef recover_detect_polygons(polygons, window, scale):\r\n \"\"\"\r\n 将检测四边形映射到原始图像上,去除padding和缩放\r\n :param polygons: numpy数组,[n,4,(x,y)]\r\n :param window: [(y1,x1,y2,x2)]\r\n :param scale: 标量\r\n :return:\r\n \"\"\"\r\n if len(polygons) == 0:\r\n return polygons\r\n clip_polygons(polygons, window)\r\n # 去除padding\r\n polygons[:, :, 1] -= window[0] # 高度\r\n polygons[:, :, 0] -= window[1] # 宽度\r\n # 还原缩放\r\n polygons /= scale\r\n return polygons\r\n" ]
[ [ "numpy.expand_dims", "numpy.minimum", "numpy.pad", "matplotlib.pyplot.imread", "numpy.tile", "numpy.random.randint" ] ]
santosh-b/Alleviate-Robust-Overfitting
[ "c369ab2eaf51ba02a15f45db77a8c9292c8dbbf8" ]
[ "Synaptic-Flow/Utils/metrics.py" ]
[ "import torch\nimport torch.nn as nn\nimport numpy as np\nimport pandas as pd\nfrom prune import * \nfrom Layers import layers\n\ndef summary(model, scores, flops, prunable):\n r\"\"\"Summary of compression results for a model.\n \"\"\"\n rows = []\n for name, module in model.named_modules():\n for pname, param in module.named_parameters(recurse=False):\n pruned = prunable(module) and id(param) in scores.keys()\n if pruned:\n sparsity = getattr(module, pname+'_mask').detach().cpu().numpy().mean()\n score = scores[id(param)].detach().cpu().numpy()\n else:\n sparsity = 1.0\n score = np.zeros(1)\n shape = param.detach().cpu().numpy().shape\n flop = flops[name][pname]\n score_mean = score.mean()\n score_var = score.var()\n score_sum = score.sum()\n score_abs_mean = np.abs(score).mean()\n score_abs_var = np.abs(score).var()\n score_abs_sum = np.abs(score).sum()\n rows.append([name, pname, sparsity, np.prod(shape), shape, flop,\n score_mean, score_var, score_sum, \n score_abs_mean, score_abs_var, score_abs_sum, \n pruned])\n\n columns = ['module', 'param', 'sparsity', 'size', 'shape', 'flops', 'score mean', 'score variance', \n 'score sum', 'score abs mean', 'score abs variance', 'score abs sum', 'prunable']\n return pd.DataFrame(rows, columns=columns)\n\ndef flop(model, input_shape, device):\n\n total = {}\n def count_flops(name):\n def hook(module, input, output):\n flops = {}\n if isinstance(module, layers.Linear) or isinstance(module, nn.Linear):\n in_features = module.in_features\n out_features = module.out_features\n flops['weight'] = in_features * out_features\n if module.bias is not None:\n flops['bias'] = out_features\n if isinstance(module, layers.Conv2d) or isinstance(module, nn.Conv2d):\n in_channels = module.in_channels\n out_channels = module.out_channels\n kernel_size = int(np.prod(module.kernel_size))\n output_size = output.size(2) * output.size(3)\n flops['weight'] = in_channels * out_channels * kernel_size * output_size\n if module.bias is not None:\n flops['bias'] = out_channels * output_size\n if isinstance(module, layers.BatchNorm1d) or isinstance(module, nn.BatchNorm1d):\n if module.affine:\n flops['weight'] = module.num_features\n flops['bias'] = module.num_features\n if isinstance(module, layers.BatchNorm2d) or isinstance(module, nn.BatchNorm2d):\n output_size = output.size(2) * output.size(3)\n if module.affine:\n flops['weight'] = module.num_features * output_size\n flops['bias'] = module.num_features * output_size\n if isinstance(module, layers.Identity1d):\n flops['weight'] = module.num_features\n if isinstance(module, layers.Identity2d):\n output_size = output.size(2) * output.size(3)\n flops['weight'] = module.num_features * output_size\n total[name] = flops\n return hook\n \n for name, module in model.named_modules():\n module.register_forward_hook(count_flops(name))\n\n input = torch.ones([1] + list(input_shape)).to(device)\n model(input)\n\n return total\n\n\n# def conservation(model, scores, batchnorm, residual):\n# r\"\"\"Summary of conservation results for a model.\n# \"\"\"\n# rows = []\n# bias_flux = 0.0\n# mu = 0.0\n# for name, module in reversed(list(model.named_modules())):\n# if prunable(module, batchnorm, residual):\n# weight_flux = 0.0\n# for pname, param in module.named_parameters(recurse=False):\n \n# # Get score\n# score = scores[id(param)].detach().cpu().numpy()\n \n# # Adjust batchnorm bias score for mean and variance\n# if isinstance(module, (layers.Linear, layers.Conv2d)) and pname == \"bias\":\n# bias = param.detach().cpu().numpy()\n# score *= (bias - mu) / bias\n# mu = 0.0\n# if isinstance(module, (layers.BatchNorm1d, layers.BatchNorm2d)) and pname == \"bias\":\n# mu = module.running_mean.detach().cpu().numpy()\n \n# # Add flux\n# if pname == \"weight\":\n# weight_flux += score.sum()\n# if pname == \"bias\":\n# bias_flux += score.sum()\n# layer_flux = weight_flux\n# if not isinstance(module, (layers.Identity1d, layers.Identity2d)):\n# layer_flux += bias_flux\n# rows.append([name, layer_flux])\n# columns = ['module', 'score flux']\n\n# return pd.DataFrame(rows, columns=columns)\n\n" ]
[ [ "numpy.prod", "numpy.zeros", "numpy.abs", "pandas.DataFrame" ] ]
kzm4269/keras-yolo3
[ "06b2b522213cb901f4a7133b87aab04079e41aff" ]
[ "test_tflite.py" ]
[ "import argparse\nimport sys\nfrom pathlib import Path\n\nimport numpy as np\nimport tensorflow as tf\nimport keras\nfrom PIL import Image\nimport matplotlib.pyplot as plt\n\nfrom yolo3.model import yolo_eval\nfrom yolo3.utils import letterbox_image\n\n\ndef predict_keras(model_path):\n model = keras.models.load_model(model_path, compile=False)\n \n def predict(image):\n assert image.ndim == 3, image.shape\n assert image.dtype == np.float32, image.dtype\n assert image.ptp() <= 1.0, image.ptp()\n return model.predict([image[None]])\n \n return predict\n\n\ndef predict_tflite(model_path):\n # Load TFLite model and allocate tensors.\n interpreter = tf.lite.Interpreter(model_path=model_path)\n interpreter.allocate_tensors()\n\n # Get input and output tensors.\n input_details = interpreter.get_input_details()\n output_details = interpreter.get_output_details()\n \n def predict(image):\n assert image.ndim == 3, image.shape\n assert image.dtype == np.float32, image.dtype\n assert image.ptp() <= 1.0, image.ptp()\n \n # Test model on random input data.\n print('- predict_tflite: interpreter.set_tensor')\n interpreter.set_tensor(input_details[0]['index'], image[None])\n \n print('- predict_tflite: interpreter.invoke')\n interpreter.invoke()\n \n # The function `get_tensor()` returns a copy of the tensor data.\n # Use `tensor()` in order to get a pointer to the tensor.\n print('- predict_tflite: interpreter.get_tensor')\n return [interpreter.get_tensor(output_ditail['index']) for output_ditail in output_details]\n \n return predict\n \n \ndef _main():\n parser = argparse.ArgumentParser()\n parser.add_argument('model', help='model path (.h5 or .tflite)')\n parser.add_argument('images', nargs='+', help='image paths')\n args = parser.parse_args()\n \n anchors = np.reshape(list(map(int, Path('./model_data/yolo_anchors.txt').read_text().strip().split(','))), (-1, 2))\n class_names = Path('./model_data/coco_classes.txt').read_text().strip().splitlines()\n\n predict = {\n 'h5': predict_keras,\n 'tflite': predict_tflite,\n }[args.model.split('.')[-1]](args.model)\n \n for i, image_path in enumerate(map(Path, args.images)):\n print('load:', image_path)\n pil_image = Image.open(str(image_path))\n input_data = letterbox_image(pil_image, size=(416, 416))\n input_data = input_data / np.float32(255.)\n image = np.asarray(pil_image)\n # image = input_data.copy()\n \n print('predict:', image_path)\n output_data = predict(input_data)\n \n print('eval:', image_path)\n result = yolo_eval(\n [keras.backend.constant(d) for d in output_data],\n anchors=anchors, \n num_classes=len(class_names),\n image_shape=(image.shape[0], image.shape[1]),\n score_threshold=0.3,\n iou_threshold=0.45,\n )\n boxes, scores, classes = [keras.backend.eval(t) for t in result]\n print('boxes =', boxes)\n \n print('save:', image_path)\n from matplotlib.backends.backend_agg import FigureCanvasAgg\n fig = FigureCanvasAgg(plt.Figure()).figure\n ax = fig.add_subplot(1,1,1)\n ax.imshow(image)\n for i, (top, left, bottom, right) in enumerate(boxes):\n assert top <= bottom and left <= right\n ax.add_patch(plt.Rectangle(xy=[left, top], width=right - left, height=bottom - top, fill=False, linewidth=3, color='red'))\n fig.savefig(f'out_{args.model.split(\".\")[-1]}_{i:03d}.png')\n \n \nif __name__ == '__main__':\n _main()" ]
[ [ "matplotlib.pyplot.Rectangle", "matplotlib.pyplot.Figure", "numpy.asarray", "tensorflow.lite.Interpreter", "numpy.float32" ] ]
foamliu/Image-Matching
[ "3213a8a574fa7bcc476d3de1c7370c268bf817a7" ]
[ "demo.py" ]
[ "import math\n\nimport cv2 as cv\nimport numpy as np\nimport torch\nfrom PIL import Image\nfrom torchvision import transforms\n\nfrom models import ResNetMatchModel\n\n\ndef get_image(file):\n img = cv.imread(file)\n img = img[..., ::-1] # RGB\n img = Image.fromarray(img, 'RGB') # RGB\n img = transformer(img)\n img = img.to(device)\n return img\n\n\ndef get_feature(model, file):\n img = get_image(file)\n imgs = img.unsqueeze(dim=0)\n with torch.no_grad():\n output = model(imgs)\n feature = output[0].cpu().numpy()\n return feature / np.linalg.norm(feature)\n\n\nif __name__ == \"__main__\":\n device = torch.device('cpu')\n threshold = 21.07971786746929\n\n filename = 'image_matching.pt'\n model = ResNetMatchModel()\n model.load_state_dict(torch.load(filename))\n model = model.to(device)\n model.eval()\n\n transformer = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ])\n\n x0 = get_feature(model, '0.jpg')\n x1 = get_feature(model, '6.jpg')\n\n cosine = np.dot(x0, x1)\n cosine = np.clip(cosine, -1, 1)\n theta = math.acos(cosine)\n theta = theta * 180 / math.pi\n\n print(theta)\n print(theta <= threshold)\n" ]
[ [ "numpy.dot", "numpy.clip", "torch.load", "numpy.linalg.norm", "torch.no_grad", "torch.device" ] ]
ucbrise/snoopy
[ "da4c98e3876c10cf52aa51ece3b62c5e8b8e335a" ]
[ "scripts/fig/util.py" ]
[ "import json\nimport math\nimport random\nfrom collections import defaultdict\nfrom scipy.special import lambertw\n\ndef parseData(filename):\n results = []\n f = open(filename, \"r\")\n for line in f:\n elems = line.split()\n result = {\n \"clients\": int(elems[0]),\n \"data_size\": int(elems[1]),\n \"suborams\": int(elems[2]),\n \"iter\": int(elems[3]),\n \"balancers\": int(elems[4]),\n \"mean_latency\": float(elems[6]),\n \"min_latenecy\": float(elems[7]),\n \"max_latency\": float(elems[8]),\n \"var_latency\": float(elems[9]),\n \"std_latency\": float(elems[10]),\n \"50_latency\": float(elems[11]),\n \"75_latency\": float(elems[12]),\n \"90_latency\": float(elems[13]),\n \"95_latency\": float(elems[14]),\n \"99_latency\": float(elems[15]),\n \"throughput\": float(elems[16])\n }\n results.append(result)\n return results\n\ndef parseDataNew(filename):\n results = []\n f = open(filename, \"r\")\n for line in f:\n elems = line.split()\n result = {\n \"clients\": int(elems[0]),\n \"data_size\": int(elems[1]),\n \"suborams\": int(elems[2]),\n \"balancers\": int(elems[3]),\n \"iter\": int(elems[4]),\n \"mean_latency\": float(elems[5]),\n \"min_latenecy\": float(elems[6]),\n \"max_latency\": float(elems[7]),\n \"var_latency\": float(elems[8]),\n \"std_latency\": float(elems[9]),\n \"50_latency\": float(elems[10]),\n \"75_latency\": float(elems[11]),\n \"90_latency\": float(elems[12]),\n \"95_latency\": float(elems[13]),\n \"99_latency\": float(elems[14]),\n \"throughput\": float(elems[15])\n }\n results.append(result)\n return results\n\ndef parseDataNew2(filename):\n results = []\n f = open(filename, \"r\")\n for line in f:\n elems = line.split()\n result = {\n \"clients\": int(elems[0]),\n \"data_size\": int(elems[1]),\n \"suborams\": int(elems[2]),\n \"balancers\": int(elems[3]),\n \"epoch_ms\": int(elems[4]),\n \"iter\": int(elems[5]),\n \"mean_latency\": float(elems[6]),\n \"min_latenecy\": float(elems[7]),\n \"max_latency\": float(elems[8]),\n \"var_latency\": float(elems[9]),\n \"std_latency\": float(elems[10]),\n \"50_latency\": float(elems[11]),\n \"75_latency\": float(elems[12]),\n \"90_latency\": float(elems[13]),\n \"95_latency\": float(elems[14]),\n \"99_latency\": float(elems[15]),\n \"throughput\": float(elems[16])\n }\n results.append(result)\n return results\n\ndef getMaxThroughputForNumBalancers(results, num_balancers):\n ret = 0\n for result in results:\n if result[\"balancers\"] == num_balancers:\n ret = max(ret, result[\"throughput\"])\n return ret\n\ndef getMaxThroughputForNumBalancersWithMaxLatency(results, num_balancers, max_latency, suborams=None):\n ret = 0\n for result in results:\n if result[\"balancers\"] == num_balancers and result[\"90_latency\"] <= max_latency:\n if suborams is None or result[\"suborams\"] == suborams:\n ret = max(ret, result[\"throughput\"])\n return ret\n\ndef getMaxThroughputForNumBalancersWithMaxMeanLatency(results, num_balancers, max_latency, suborams=None):\n ret = 0\n for result in results:\n if result[\"balancers\"] == num_balancers and result[\"50_latency\"] <= max_latency:\n if suborams is None or result[\"suborams\"] == suborams:\n ret = max(ret, result[\"throughput\"])\n return ret\n\n\n\ndef getLatencyForMaxThroughputForNumBalancers(results, num_balancers):\n throughput = 0\n ret = 0\n for result in results:\n if result[\"balancers\"] == num_balancers:\n if (throughput < result[\"throughput\"]):\n throughput = result[\"throughput\"]\n ret = result[\"mean_latency\"]\n return ret\n\ndef getMaxThroughputForEpochMs(results, epoch_ms):\n ret = 0\n for result in results:\n if result[\"epoch_ms\"] == epoch_ms:\n ret = max(ret, result[\"throughput\"])\n return ret\n\n\ndef getMaxDataForNumSuborams(results, num_suborams, max_latency, latency_type):\n ret = 0\n for result in results:\n if result[\"suborams\"] == num_suborams and result[latency_type] < max_latency:\n print((\"Acceptable latency for %d suborams: %d\") % (result[\"suborams\"], result[latency_type]))\n ret = max(ret, result[\"data_size\"])\n return ret\n\ndef getTupleListOfVals(results, *labels):\n ret = []\n for result in results:\n res = ()\n for l in labels:\n res += (result[l],)\n if res not in ret:\n ret.append(res)\n return ret\n\ndef getListOfVals(results, label):\n ret = []\n for result in results:\n if result[label] not in ret:\n ret.append(result[label])\n return ret\n\ndef getLatencyForSuboramAndDataSize(results, num_suborams, data_size, latency_type):\n for result in results:\n if result[\"suborams\"] == num_suborams and result[\"data_size\"] == data_size:\n return result[latency_type]\n\ndef f(N, n_suborams, secparam=128):\n mu = N / n_suborams\n alpha = math.log(n_suborams * (2 ** secparam))\n rhs = alpha / (math.e * mu) - 1 / math.e\n branch = 0 \n epsilon = math.e ** (lambertw(rhs, branch) + 1) - 1 \n #epsilon = (alpha + math.sqrt(2 * mu * alpha)) / mu # uncomment for looser bound\n #print(alpha, rhs, lambertw(rhs, 0), lambertw(rhs, 1))\n #print(\"bound\", suborams, secparam, alpha, rhs, lambertw(rhs), epsilon)\n return mu * (1 + epsilon)\n\ndef hash_requests(reqs, n_suborams, run):\n offset = run * reqs\n secret = b'Sixteen byte key'\n buckets = defaultdict(int)\n for i in range(offset, offset+reqs):\n \"\"\" \n cobj = CMAC.new(secret, ciphermod=AES)\n cobj.update(i.to_bytes(i.bit_length(), 'big'))\n h = int(cobj.hexdigest(), 16)\n \"\"\"\n h = int(random.random() * n_suborams)\n bucket = h % n_suborams\n buckets[bucket] += 1\n return max(buckets.values())\n\ndef max_requests(n_suborams, target, secparam):\n \"\"\" \n Get maximum request batch size for a given # of suborams that each support target requests.\n \"\"\"\n l = n_suborams\n r = 2 ** 32\n m = 0\n while l <= r:\n m = math.floor((l+r)/ 2)\n bound = f(m, n_suborams, secparam)\n if bound > target:\n r = m - 1\n elif bound < target:\n l = m + 1\n else:\n return m\n return m\n\ndef parse_args(parser):\n parser.add_argument('input', type=str, help='input data')\n parser.add_argument('output', type=str, help='output file')\n parser.add_argument('-b', '--baseline', help='baseline data')\n parser.add_argument('-t', '--title', help='set graph title')\n parser.add_argument('-l', '--large', action='store_true',\n help='output large graph (default: false)')\n args = parser.parse_args()\n return args\n\ndef parse_baseline(filename):\n with open(filename, 'r') as f:\n baseline = json.load(f)\n return baseline" ]
[ [ "scipy.special.lambertw" ] ]
idc9/explore
[ "ce8aa039de96b1dd9fecc19fa098c222863ac3ce" ]
[ "explore/viz/continuous.py" ]
[ "import matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.preprocessing import StandardScaler\nfrom scipy.stats import pearsonr\n\nfrom explore.utils import safe_apply\nfrom explore.viz.utils import bold, ABLine2D, fmt_pval\n\n\ndef plot_scatter(x, y, alpha=0.05, standardize=False, label=None):\n \"\"\"\n Parameters\n ----------\n x, y: array-like (ideally pd.Series)\n x, y values to plot. If pd.Series, uses 'name' to get x/y labels\n\n alpha: float\n Cutoff for correlation coefficient significance.\n\n standardisze: bool\n Whether or not to standardized (mean center and scale) variables.\n True by defualt.\n\n \"\"\"\n\n xlab, ylab = '', ''\n if hasattr(x, 'name'):\n xlab = x.name\n if hasattr(y, 'name'):\n ylab = y.name\n\n # drop missing values\n df = pd.concat([pd.Series(x), pd.Series(y)], axis=1).dropna()\n\n # optinally center/scale\n if standardize:\n df = safe_apply(StandardScaler().fit_transform, df)\n xlab += ' (standardized)'\n ylab += ' (standardized)'\n\n x = df.iloc[:, 0].values.reshape(-1)\n y = df.iloc[:, 1].values.reshape(-1)\n\n # fit linear model\n lm = LinearRegression(fit_intercept=True).fit(x.reshape(-1, 1), y)\n slope = lm.coef_.item()\n intercept = lm.intercept_\n\n # if no label provided, compute correlation\n if label is None:\n alpha = 0.05\n # compute pearson correlation\n corr, pval = pearsonr(x, y)\n reject = pval < alpha\n label = get_cts_label(reject, corr, corr_name='pearson', pval=pval)\n\n # scatter plot\n plt.scatter(x, y, color='blue', s=2)\n plt.xlabel(xlab)\n plt.ylabel(ylab)\n\n # line\n ABLine2D(slope, intercept, label=label,\n color='blue') # , linewidth=linewidth\n plt.legend(loc='upper left')\n\n\ndef get_cts_label(reject, corr, corr_name, pval):\n\n if reject:\n # stat_str = bold('pearson \\\\ corr: {:1.2f} \\\\ (p={:1.2f})'.format(corr, pval))\n # label = bold('{}: {:1.3f} (p={:1.3f})*'.format(corr_name, corr, pval))\n # label = bold('{}: {:1.3f} (p={:.1e})*'.format(corr_name, corr, pval))\n label = bold('{}: {:1.3f} (p={})*'.format(corr_name, corr,\n fmt_pval(pval)))\n else:\n # stat_str = 'pearson corr: {:1.2f} (p={:1.2f})'.format(corr, pval)\n # label = '{}: {:1.3f} (p={:1.3f})'.format(corr_name, corr, pval)\n label = '{}: {:1.3f} (p={})'.format(corr_name, corr,\n fmt_pval(pval))\n\n return label\n" ]
[ [ "matplotlib.pyplot.legend", "pandas.Series", "matplotlib.pyplot.scatter", "scipy.stats.pearsonr", "sklearn.linear_model.LinearRegression", "matplotlib.pyplot.xlabel", "sklearn.preprocessing.StandardScaler", "matplotlib.pyplot.ylabel" ] ]
timkphd/examples
[ "04c162ec890a1c9ba83498b275fbdc81a4704062" ]
[ "mpi/mpi4py/simple.py" ]
[ "#!/usr/bin/env python3\nfrom mpi4py import MPI\nimport numpy\nglobal numnodes,myid,mpi_err\nglobal mpi_root\nimport sys\nmpi_root=0\n# This is a bag-of-tasks program. We define a manager task\n# that distributes work to workers. Actually, the workers\n# request input data. The manager sits in a loop calling\n# Iprobe waiting for requests for work. \n\n# In this case the manager reads input. The input is a list\n# of file names. It will send a entry from the list as \n# requested. When the worker is done processing it will\n# request a new file name from the manager. This continues\n# until the manager runs out of files to process. The \n# manager subroutine is just \"manager\"\n\n# The worker subroutine is \"worker\". It receives file names \n# form the manager.\n#\n# The files in this case are outputs from an optics program\n# tracking a laser beam as it propagates through the atmosphere.\n# The workers read in the data and then create an image of the\n# data by calling the routine mkview.plotit. This should worker\n# with arbitrary 2d files except the size in mkview.plotit is\n# currently hard coded to 64 x 64. \n\n# We use the call to \"Split\" to create a seperate communicator\n# for the workers. This is not important in this example but\n# could be if you wanted multiple workers to work together. \n\n# To get the data...\n\n# curl http://hpc.mines.edu/examples/laser.tgz | tar -xz\n\ndef worker(THE_COMM_WORLD,managerid):\n\timport mkview\n\tx=0\n\tcomm=MPI.COMM_WORLD\n\tsend_msg = numpy.arange(1, dtype='i')\n\trecv_msg = numpy.zeros_like(send_msg)\n\tic=0\n\twhile(True) :\n# send message says I am ready for data #\n\t\tsend_msg[0]=x\n\t\tcomm.Send([send_msg, MPI.INT], dest=managerid, tag=1234)\n# get a message from the manager #\n\t\tbuffer=numpy.array((1), dtype=str)\n\t\t#buffer=numpy.asarray(\"000000000000000\",dtype=str)\n\t\tbuffer=numpy.asarray(\" \",dtype=str)\n\t\tcomm.Recv([buffer,MPI.CHAR], source=managerid, tag=2345)\n#\t\tprint(buffer)\n\t\tx=str(buffer).split()\n\t\tfname=x[0]\n\t\tx=int(x[1])\n\t\tif(x < 0):\n\t\t\treturn ic\n\t\tprint(THE_COMM_WORLD.Get_rank(),fname,x)\n\t\tic=ic+1\n\t\tmkview.plotit(fname,x)\n#\ndef manager(num_used,TODO):\n\tglobal numnodes,myid,mpi_err\n\tglobal mpi_root\n\tcomm=MPI.COMM_WORLD\n\tsend_msg = numpy.arange(1, dtype='i')\n\trecv_msg = numpy.zeros_like(send_msg)\n\tstatus = MPI.Status()\n# our \"data\"\n# Our worker is expecting a single word followed by a manager appended integer\n\tdata=sys.stdin.readlines()\n\ttodo=len(data)\n# counters\n\tigot=0 \n\tisent=0\n\twhile(isent < todo):\n# wait for a request for work #\n\t\tflag=comm.Iprobe(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG,status=status)\n\t\tif(flag):\n# where is it comming from #\n\t\t\tgotfrom=status.source \n\t\t\tsendto=gotfrom\n\t\t\tcomm.Recv([recv_msg, MPI.INT], source=gotfrom, tag=1234)\n\t\t\tx=recv_msg[0]\n\t\t\tprint(\"worker %d sent %d\" % (gotfrom,x))\n\t\t\tif(x > -1):\n\t\t\t\tigot=igot+1\n\t\t\t\tprint(\"igot \"+str(igot))\n\t\t\tif(isent < TODO):\n# send real data #\n\t\t\t\td=data[isent]\n\t\t\t\td=d.strip()\n\t\t\t\tsend_msg=numpy.array([d+\" \"+str(isent)], dtype=str)\n\t\t\t\tcomm.Send([send_msg, MPI.CHAR], dest=sendto, tag=2345)\n\t\t\t\tisent=isent+1\n# tell everyone to quit #\n\tfor i in range(1,num_used+1):\n\t\tsend_msg=numpy.array([\"stop -1000\"], dtype=str)\n\t\tcomm.Send([send_msg, MPI.CHAR], dest=i, tag=2345)\n\treturn None\n#\n#\nif __name__ == '__main__':\n# do init\n\tglobal numnodes,myid,mpi_err\n\tcomm=MPI.COMM_WORLD\n\tmyid=comm.Get_rank()\n\tnumnodes=comm.Get_size()\n\tname = MPI.Get_processor_name()\n\tprint(\"hello from %d of %d on %s\" % (myid,numnodes,name))\n# num_used is the # of processors that are part of the new communicator #\n# for this case hardwire to not include 1 processor #\n\tnum_used=numnodes-1\n\tmannum=0;\n\tMPI_COMM_WORLD=MPI.COMM_WORLD\n\tif(myid == mannum):\n\t\tgroup=0\n\telse:\n\t\tgroup=1\n# Split will create a set of communicators. All of the\n# tasks with the same value of group will be in the same\n# communicator. In this case we get two sets one for the \n# manager and one for the workers. The manager's version \n# of the communicator is not used. \n\n\tDEFINED_COMM=MPI_COMM_WORLD.Split(group,myid)\n#\n\tnew_id=DEFINED_COMM.Get_rank()\n\tworker_size=DEFINED_COMM.Get_size()\n\tprint(\"old id = %d new id = %d worker size = %d\" %(myid,new_id,worker_size))\n#\n\tif(group == 0):\n\t\ttodo=1000\n# if not part of the new group do management. #\n\t\tmanager(num_used,todo)\n\t\tprint(\"manager finished\")\n\t\t#mpi_err = MPI_Barrier(MPI_COMM_WORLD)\n\t\tMPI_COMM_WORLD.barrier()\n\t\tMPI.Finalize()\n\telse:\n# part of the new group do work. #\n\t\tmannum=0;\n\t\tts=MPI.Wtime()\n\t\tidid=worker(DEFINED_COMM,mannum)\n\t\tte=MPI.Wtime()\n\t\tprint(\"worker (%d,%d) finished did %d tasks in %8.2f seconds\" %(myid,new_id,idid,te-ts))\n\t\tMPI_COMM_WORLD.barrier()\n\t\tMPI.Finalize()\n" ]
[ [ "numpy.asarray", "numpy.arange", "numpy.array", "numpy.zeros_like" ] ]
Prettyfinger/Twostream_reID
[ "8e340e0c03bd248b04ff1b48398ca99b6aeaa508", "8e340e0c03bd248b04ff1b48398ca99b6aeaa508" ]
[ "evaluate.py", "score_rank.py" ]
[ "import scipy.io\nimport torch\nimport numpy as np\n#import time\nimport os\n\n#######################################################################\n# Evaluate\ndef evaluate(qf,ql,qc,gf,gl,gc):\n query = qf\n score = np.dot(gf,query)\n # predict index\n index = np.argsort(score) #from small to large\n index = index[::-1] #19732\n #index = index[0:2000]\n # good index\n query_index = np.argwhere(gl==ql) #59\n camera_index = np.argwhere(gc==qc)#3156\n\n good_index = np.setdiff1d(query_index, camera_index, assume_unique=True)#59-8=51 the same peron in different cameras\n junk_index1 = np.argwhere(gl==-1)#3819:part body\n junk_index2 = np.intersect1d(query_index, camera_index) # 8 the same person in the same camera\n junk_index = np.append(junk_index2, junk_index1) #.flatten()) #3827=3819+8\n \n CMC_tmp = compute_mAP(index, good_index, junk_index)\n return CMC_tmp\n\n\ndef compute_mAP(index, good_index, junk_index):\n ap = 0\n cmc = torch.IntTensor(len(index)).zero_()\n if good_index.size==0: # if empty\n cmc[0] = -1\n return ap,cmc\n\n # remove junk_index\n mask = np.in1d(index, junk_index, invert=True) #19732\n index = index[mask] #15950=19732-8-3819 index remove (the same person in same camera) and (label=-1)\n\n # find good_index index\n ngood = len(good_index)\n mask = np.in1d(index, good_index)\n rows_good = np.argwhere(mask==True)\n rows_good = rows_good.flatten()\n \n cmc[rows_good[0]:] = 1\n for i in range(ngood):\n d_recall = 1.0/ngood\n precision = (i+1)*1.0/(rows_good[i]+1)\n if rows_good[i]!=0:\n old_precision = i*1.0/rows_good[i]\n else:\n old_precision=1.0\n ap = ap + d_recall*(old_precision + precision)/2\n\n return ap, cmc\n\n######################################################################\nresult = scipy.io.loadmat('twostream_Market1501_SeSC.mat')\nquery_feature = result['query_f']\nquery_cam = result['query_cam'][0]\nquery_label = result['query_label'][0]\ngallery_feature = result['gallery_f']\ngallery_cam = result['gallery_cam'][0]\ngallery_label = result['gallery_label'][0]\n\nmulti = os.path.isfile('multi_query.mat')\n\nif multi:\n m_result = scipy.io.loadmat('multi_query.mat')\n mquery_feature = m_result['mquery_f']\n mquery_cam = m_result['mquery_cam'][0]\n mquery_label = m_result['mquery_label'][0]\n \nCMC = torch.IntTensor(len(gallery_label)).zero_()\nap = 0.0\n#print(query_label)\nfor i in range(len(query_label)):\n ap_tmp, CMC_tmp = evaluate(query_feature[i],query_label[i],query_cam[i],gallery_feature,gallery_label,gallery_cam)\n if CMC_tmp[0]==-1:\n continue\n CMC = CMC + CMC_tmp\n ap += ap_tmp\n # print(i, CMC_tmp[0])\n\nCMC = CMC.float()\nCMC = CMC/len(query_label) #average CMC\nprint('Rank@1:%f Rank@5:%f Rank@10:%f mAP:%f'%(CMC[0],CMC[4],CMC[9],ap/len(query_label)))\n\n# multiple-query\nCMC = torch.IntTensor(len(gallery_label)).zero_()\nap = 0.0\nif multi:\n for i in range(len(query_label)):\n mquery_index1 = np.argwhere(mquery_label==query_label[i])\n mquery_index2 = np.argwhere(mquery_cam==query_cam[i])\n mquery_index = np.intersect1d(mquery_index1, mquery_index2)\n mq = np.mean(mquery_feature[mquery_index,:], axis=0)\n ap_tmp, CMC_tmp = evaluate(mq,query_label[i],query_cam[i],gallery_feature,gallery_label,gallery_cam)\n if CMC_tmp[0]==-1:\n continue\n CMC = CMC + CMC_tmp\n ap += ap_tmp\n #print(i, CMC_tmp[0])\n CMC = CMC.float()\n CMC = CMC/len(query_label) #average CMC\n print('multi Rank@1:%f Rank@5:%f Rank@10:%f mAP:%f'%(CMC[0],CMC[4],CMC[9],ap/len(query_label)))\n", "import scipy.io\nimport torch\nimport numpy as np\n# import time\nimport os\n\n\n#######################################################################\n# Evaluate\ndef evaluate(qf, ql, qc, gf, gl, gc):\n query = qf\n score = np.dot(gf, query)\n score0=list(score)\n # score_order = score0.sort(reverse=True)\n\n # print(score)\n # predict index\n index = np.argsort(score) #score:from small to large\n index = index[::-1] # score:from large to small##########***********\n # print(index)\n # index = index[0:2000]\n # good index\n query_index = np.argwhere(gl == ql)\n camera_index = np.argwhere(gc == qc)\n\n good_index = np.setdiff1d(query_index, camera_index, assume_unique=True)# gl == ql & gc !== qc\n junk_index1 = np.argwhere(gl == -1)\n junk_index2 = np.intersect1d(query_index, camera_index) # gl ==ql & gc == qc\n junk_index = np.append(junk_index2, junk_index1) # .flatten())\n\n ap, cmc = compute_mAP(index, good_index, junk_index)\n return ap, cmc, index, score0\n\n\ndef compute_mAP(index, good_index, junk_index):\n ap = 0\n cmc = torch.IntTensor(len(index)).zero_()\n if good_index.size == 0: # if empty\n cmc[0] = -1\n return ap, cmc\n\n # remove junk_index\n mask = np.in1d(index, junk_index, invert=True)\n index = index[mask]\n\n # find good_index index\n ngood = len(good_index)\n mask = np.in1d(index, good_index)\n rows_good = np.argwhere(mask == True)\n rows_good = rows_good.flatten()\n\n cmc[rows_good[0]:] = 1\n for i in range(ngood):\n d_recall = 1.0 / ngood\n precision = (i + 1) * 1.0 / (rows_good[i] + 1)\n if rows_good[i] != 0:\n old_precision = i * 1.0 / rows_good[i]\n else:\n old_precision = 1.0\n ap = ap + d_recall * (old_precision + precision) / 2\n\n return ap, cmc\n\n\n######################################################################\nresult = scipy.io.loadmat('mat/twostream_Resnet50_CUHK99_SSN.mat')\nquery_feature = result['query_f']\nquery_cam = result['query_cam'][0]\nquery_label = result['query_label'][0]\ngallery_feature = result['gallery_f']\ngallery_cam = result['gallery_cam'][0]\ngallery_label = result['gallery_label'][0]\n\nmulti = os.path.isfile('multi_query.mat')\n\nif multi:\n m_result = scipy.io.loadmat('multi_query.mat')\n mquery_feature = m_result['mquery_f']\n mquery_cam = m_result['mquery_cam'][0]\n mquery_label = m_result['mquery_label'][0]\n\nCMC = torch.IntTensor(len(gallery_label)).zero_()\nap = 0.0\n# print(query_label)\nf1 = open('txt/twostream_Resnet50_CUHK99_SSN_queryscore.txt', 'w')\n# f1.write(\"query_ID\")\n# f1.write(' ') # 添加一个空格# # f1.write(\"label\") # 将图片的路径,写入文件\n# # f1.write(' ') # 添加一个空格\n# f1.write(\"score\") # 将图片的路径,写入文件\n# f1.write('\\n') # 转行字符写入文件 换行\nfor i in range(len(query_label)):\n ap_tmp, CMC_tmp, index_order, score_order = evaluate(query_feature[i], query_label[i], query_cam[i], gallery_feature, gallery_label,\n gallery_cam)\n indexorder=index_order[:20]\n scoreorder=sorted(score_order, reverse=True)[:20]\n qelabel= query_label[i]\n qecam= query_cam[i]\n\n f1.write(str(i)) # 将图片的路径,写入文件\n f1.write(': label ') # 添加一个空格\n f1.write(str(qelabel))\n f1.write(' camera ')\n f1.write(str(qecam))\n f1.write('\\n')\n for j in range(len(indexorder)):\n gaorder=indexorder[j]\n f1.write(str(gallery_label[gaorder])) # 将图片的路径,写入文件\n f1.write(' ') # 添加一个空格\n f1.write(str(gallery_cam[gaorder])) # 添加一个空格\n f1.write(' , ') # 添加一个空格\n f1.write('\\n')\n f1.write(\", \".join('%s' %id for id in indexorder)) # index\n f1.write('\\n') # 添加一个空格\n f1.write(\", \".join('%s' %id for id in scoreorder)) # score\n f1.write('\\n') # 转行字符写入文件 换行\n f1.write('\\n')\n\n if CMC_tmp[0] == -1:\n continue\n CMC = CMC + CMC_tmp\n ap += ap_tmp\n # print(i, CMC_tmp[0])\n\nCMC = CMC.float()\nCMC = CMC / len(query_label) # average CMC\nprint('Rank@1:%f Rank@5:%f Rank@10:%f mAP:%f' % (CMC[0], CMC[4], CMC[9], ap / len(query_label)))\n\n# multiple-query\nCMC = torch.IntTensor(len(gallery_label)).zero_()\nap = 0.0\nif multi:\n for i in range(len(query_label)):\n mquery_index1 = np.argwhere(mquery_label == query_label[i])\n mquery_index2 = np.argwhere(mquery_cam == query_cam[i])\n mquery_index = np.intersect1d(mquery_index1, mquery_index2)\n mq = np.mean(mquery_feature[mquery_index, :], axis=0)\n ap_tmp, CMC_tmp = evaluate(mq, query_label[i], query_cam[i], gallery_feature, gallery_label, gallery_cam)\n if CMC_tmp[0] == -1:\n continue\n CMC = CMC + CMC_tmp\n ap += ap_tmp\n # print(i, CMC_tmp[0])\n CMC = CMC.float()\n CMC = CMC / len(query_label) # average CMC\n print('multi Rank@1:%f Rank@5:%f Rank@10:%f mAP:%f' % (CMC[0], CMC[4], CMC[9], ap / len(query_label)))\n" ]
[ [ "numpy.dot", "numpy.in1d", "numpy.setdiff1d", "numpy.argwhere", "numpy.intersect1d", "numpy.append", "numpy.mean", "numpy.argsort" ], [ "numpy.dot", "numpy.in1d", "numpy.setdiff1d", "numpy.argwhere", "numpy.intersect1d", "numpy.append", "numpy.mean", "numpy.argsort" ] ]
spectrochempy/spectrochempy
[ "829b290f465e630078785e303dbab197cd78b815" ]
[ "spectrochempy/core/analysis/simplisma.py" ]
[ "# -*- coding: utf-8 -*-\n\n#\n# =============================================================================\n# Copyright (©) 2015-2022 LCS\n# Laboratoire Catalyse et Spectrochimie, Caen, France.\n# CeCILL-B FREE SOFTWARE LICENSE AGREEMENT\n# See full LICENSE agreement in the root directory\n# =============================================================================\n\"\"\"\nThis module implement the SIMPLISMA class.\n\"\"\"\n\n__all__ = [\"SIMPLISMA\"]\n\n__dataset_methods__ = []\n\n# ----------------------------------------------------------------------------\n# imports\n# ----------------------------------------------------------------------------\nimport numpy as np\n\nimport warnings\nfrom traitlets import HasTraits, Instance, Unicode\n\nfrom spectrochempy.core.dataset.nddataset import NDDataset\nfrom spectrochempy.core.dataset.npy import dot\nfrom spectrochempy.core import info_, set_loglevel, INFO\n\n\n# ============================================================================\n# class SIMPLISMA\n# ============================================================================\n\n\nclass SIMPLISMA(HasTraits):\n \"\"\"\n SIMPLe to use Interactive Self-modeling Mixture Analysis.\n\n This class performs a SIMPLISMA analysis of a 2D |NDDataset|. The algorithm is adapted from Windig's paper,\n Chemometrics and Intelligent Laboratory Systems, 36, 1997, 3-16.\n\n TODO : adapt to 3DDataset ?\n \"\"\"\n\n _St = Instance(NDDataset)\n _C = Instance(NDDataset)\n _X = Instance(NDDataset)\n _Pt = Instance(NDDataset)\n _s = Instance(NDDataset)\n _logs = Unicode\n\n def __init__(self, dataset, **kwargs):\n \"\"\"\n Parameters\n ----------\n dataset : |NDDataset|\n A 2D dataset containing the data matrix (spectra in rows).\n interactive : bool, optional, default=False\n If True, the determination of purest variables is carried out interactively\n n_pc : int, optional, default=2 in non-interactive mode; 100 in interactive mode\n The maximum number of pure compounds. Used only for non interactive analysis\n (the default in interative mode (100) will never be reached in practice).\n tol : float, optional, default=0.1\n The convergence criterion on the percent of unexplained variance.\n noise : float or int, optional, default=5\n A correction factor (%) for low intensity variables (0 - no offset, 15 - large offset).\n verbose : bool, optional, default=True\n If True some information is given during the analysis.\n \"\"\"\n\n super().__init__()\n\n # ------------------------------------------------------------------------\n # Utility functions\n # ------------------------------------------------------------------------\n def figures_of_merit(X, maxPIndex, C, St, j):\n # return %explained variance and stdev of residuals when the jth compound is added\n C[:, j] = X[:, maxPIndex[j]]\n St[0 : j + 1, :] = np.linalg.lstsq(\n C.data[:, 0 : j + 1], X.data, rcond=None\n )[0]\n Xhat = dot(C[:, 0 : j + 1], St[0 : j + 1, :])\n res = Xhat - X\n stdev_res = np.std(res)\n rsquare = 1 - np.linalg.norm(res) ** 2 / np.linalg.norm(X) ** 2\n return rsquare, stdev_res\n\n def str_iter_summary(j, index, coord, rsquare, stdev_res, diff):\n # return formatted list of figure of merits at a given iteration\n\n string = \"{:4} {:5} {:8.1f} {:10.4f} {:10.4f} \".format(\n j + 1, index, coord, stdev_res, rsquare\n )\n return string\n\n def get_x_data(X):\n if X.x is not None and not X.x.is_empty: # TODO what about labels?\n return X.x.data\n else:\n return np.arange(X.shape[-1])\n\n # ------------------------------------------------------------------------\n # Check data\n # ------------------------------------------------------------------------\n\n X = dataset\n\n if len(X.shape) != 2:\n raise ValueError(\"For now, SIMPLISMA only handles 2D Datasets\")\n\n if np.min(X.data) < 0:\n warnings.warn(\"SIMPLISMA does not handle easily negative values.\")\n # TODO: check whether negative values should be set to zero or not.\n\n verbose = kwargs.get(\"verbose\", True)\n if verbose:\n set_loglevel(INFO)\n\n interactive = kwargs.get(\"interactive\", False)\n tol = kwargs.get(\"tol\", 0.1)\n noise = kwargs.get(\"noise\", 3)\n n_pc = kwargs.get(\"n_pc\", 2)\n if n_pc < 2 or not isinstance(n_pc, int):\n raise ValueError(\n \"Oh you did not just... 'MA' in simplisMA stands for Mixture Analysis. \"\n \"The number of pure compounds should be an integer larger than 2\"\n )\n if interactive:\n n_pc = 100\n\n # ------------------------------------------------------------------------\n # Core\n # ------------------------------------------------------------------------\n\n if not interactive:\n logs = \"*** Automatic SIMPL(I)SMA analysis *** \\n\"\n else:\n logs = \"*** Interative SIMPLISMA analysis *** \\n\"\n logs += \"dataset: {}\\n\".format(X.name)\n logs += \" noise: {:2} %\\n\".format(noise)\n if not interactive:\n logs += \" tol: {:2} %\\n\".format(tol)\n logs += \" n_pc: {:2}\\n\".format(n_pc)\n logs += \"\\n\"\n logs += \"#iter index_pc coord_pc Std(res) R^2 \\n\"\n logs += \"---------------------------------------------\"\n info_(logs)\n logs += \"\\n\"\n\n # Containers for returned objects and intermediate data\n # ---------------------------------------------------\n # purity 'spectra' (generally spectra if X is passed,\n # but could also be concentrations if X.T is passed)\n Pt = NDDataset.zeros((n_pc, X.shape[-1]))\n Pt.name = \"Purity spectra\"\n Pt.set_coordset(y=Pt.y, x=X.x)\n Pt.y.title = \"# pure compound\"\n\n # weight matrix\n w = NDDataset.zeros((n_pc, X.shape[-1]))\n w.set_coordset(y=Pt.y, x=X.x)\n\n # Stdev spectrum\n s = NDDataset.zeros((n_pc, X.shape[-1]))\n s.name = \"Standard deviation spectra\"\n s.set_coordset(y=Pt.y, x=X.x)\n\n # maximum purity indexes and coordinates\n maxPIndex = [0] * n_pc\n maxPCoordinate = [0] * n_pc\n\n # Concentration matrix\n C = NDDataset.zeros((X.shape[-2], n_pc))\n C.name = \"Relative Concentrations\"\n C.set_coordset(y=X.y, x=C.x)\n C.x.title = \"# pure compound\"\n\n # Pure component spectral profiles\n St = NDDataset.zeros((n_pc, X.shape[-1]))\n St.name = \"Pure compound spectra\"\n St.set_coordset(y=Pt.y, x=X.x)\n\n # Compute Statistics\n # ------------------\n sigma = np.std(X.data, axis=0)\n mu = np.mean(X.data, axis=0)\n alpha = (noise / 100) * np.max(mu.data)\n lamda = np.sqrt(mu ** 2 + sigma ** 2)\n p = sigma / (mu + alpha)\n\n # scale dataset\n Xscaled = X.data / np.sqrt(mu ** 2 + (sigma + alpha) ** 2)\n\n # COO dispersion matrix\n COO = (1 / X.shape[-2]) * np.dot(Xscaled.T, Xscaled)\n\n # Determine the purest variables\n j = 0\n finished = False\n while not finished:\n # compute first purest variable and weights\n if j == 0:\n w[j, :] = lamda ** 2 / (mu ** 2 + (sigma + alpha) ** 2)\n s[j, :] = sigma * w[j, :]\n Pt[j, :] = p * w[j, :]\n\n # get index and coordinate of pure variable\n maxPIndex[j] = np.argmax(Pt[j, :].data)\n maxPCoordinate[j] = get_x_data(X)[maxPIndex[j]]\n\n # compute figures of merit\n rsquare0, stdev_res0 = figures_of_merit(X, maxPIndex, C, St, j)\n\n # add summary to log\n llog = str_iter_summary(\n j, maxPIndex[j], maxPCoordinate[j], rsquare0, stdev_res0, \"\"\n )\n logs += llog + \"\\n\"\n\n if verbose or interactive:\n print(llog)\n\n if interactive:\n # should plot purity and stdev, does not work for the moment\n # TODO: fix the code below\n # fig1, (ax1, ax2) = plt.subplots(2,1)\n # Pt[j, :].plot(ax=ax1)\n # ax1.set_title('Purity spectrum #{}'.format(j+1))\n # ax1.axvline(maxPCoordinate[j], color='r')\n # s[j, :].plot(ax=ax2)\n # ax2.set_title('standard deviation spectrum #{}'.format(j+1))\n # ax2.axvline(maxPCoordinate[j], color='r')\n # plt.show()\n\n ans = \"\"\n while ans.lower() not in [\"a\", \"c\"]:\n ans = input(\" |--> (a) Accept, (c) Change: \")\n\n while ans.lower() != \"a\":\n new = input(\n \" |--> enter the new index (int) or variable value (float): \"\n )\n try:\n new = int(new)\n maxPIndex[j] = new\n maxPCoordinate[j] = get_x_data(X)[maxPIndex[j]]\n except ValueError:\n try:\n new = float(new)\n maxPIndex[j] = np.argmin(abs(get_x_data(X) - new))\n maxPCoordinate[j] = get_x_data(X)[maxPIndex[j]]\n except ValueError:\n print(\n \"Incorrect answer. Please enter a valid index or value\"\n )\n\n rsquare0, stdev_res0 = figures_of_merit(X, maxPIndex, C, St, j)\n\n llog = str_iter_summary(\n j, maxPIndex[j], maxPCoordinate[j], rsquare0, stdev_res0, \"\"\n )\n logs += \" |--> changed pure variable #1\"\n logs += llog + \"\\n\"\n info_(llog)\n\n ans = input(\" |--> (a) Accept, (c) Change: \")\n # ans was [a]ccept\n j += 1\n if not interactive:\n j += 1\n\n prev_stdev_res = stdev_res0\n\n else:\n # compute jth purest variable\n for i in range(X.shape[-1]):\n Mji = np.zeros((j + 1, j + 1))\n idx = [i] + maxPIndex[0:j]\n for line in range(j + 1):\n for col in range(j + 1):\n Mji[line, col] = COO[idx[line], idx[col]]\n w[j, i] = np.linalg.det(Mji)\n Pt[j:] = p * w[j, :]\n s[j, :] = sigma * w[j, :]\n\n # get index and coordinate of jth pure variable\n maxPIndex[j] = np.argmax(Pt[j, :].data)\n maxPCoordinate[j] = get_x_data(X)[maxPIndex[j]]\n\n # compute figures of merit\n rsquarej, stdev_resj = figures_of_merit(X, maxPIndex, C, St, j)\n diff = 100 * (stdev_resj - prev_stdev_res) / prev_stdev_res\n prev_stdev_res = stdev_resj\n\n # add summary to log\n llog = str_iter_summary(\n j, maxPIndex[j], maxPCoordinate[j], rsquarej, stdev_resj, diff\n )\n logs += llog + \"\\n\"\n\n if verbose or interactive:\n info_(llog)\n\n if (\n interactive\n ): # TODO: I suggest to use jupyter widgets for the interactivity!\n # should plot purity and stdev, does not work for the moment\n # TODO: fix the code below\n # ax1.clear()\n # ax1.set_title('Purity spectrum #{}'.format(j+1))\n # Pt[j, :].plot(ax=ax1)\n # for coord in maxPCoordinate[:-1]:\n # ax1.axvline(coord, color='g')\n # ax1.axvline(maxPCoordinate[j], color='r')\n # ax2.clear()\n # ax2.set_title('standard deviation spectrum #{}'.format(j+1))\n # s[j, :].plot(ax=ax2)\n # for coord in maxPCoordinate[:-1]:\n # ax2.axvline(coord, color='g')\n # ax2.axvline(maxPCoordinate[j], color='r')\n # plt.show()\n\n ans = \"\"\n while ans.lower() not in [\"a\", \"c\", \"r\", \"f\"]:\n ans = input(\n \" |--> (a) Accept and continue, (c) Change, (r) Reject, (f) Accept and finish: \"\n )\n\n while ans.lower() == \"c\":\n new = input(\n \" |--> enter the new index (int) or variable value (float): \"\n )\n try:\n new = int(new)\n maxPIndex[j] = new\n maxPCoordinate[j] = get_x_data(X)[maxPIndex[j]]\n except ValueError:\n try:\n new = float(new)\n maxPIndex[j] = np.argmin(abs(get_x_data(X) - new))\n maxPCoordinate[j] = get_x_data(X)[maxPIndex[j]]\n except ValueError:\n print(\n \" |--> Incorrect answer. Please enter a valid index or value\"\n )\n\n rsquarej, stdev_resj = figures_of_merit(X, maxPIndex, C, St, j)\n diff = 100 * (stdev_resj - prev_stdev_res) / prev_stdev_res\n prev_stdev_res + stdev_resj\n\n logs += f\" |--> changed pure variable #{j + 1}\\n\"\n llog = str_iter_summary(\n j,\n maxPIndex[j],\n maxPCoordinate[j],\n rsquarej,\n stdev_resj,\n \"diff\",\n )\n logs += llog + \"\\n\"\n info_(llog)\n\n info_(\n f\"purest variable #{j + 1} set at index = {maxPIndex[j]} ; x = {maxPCoordinate[j]}\"\n )\n ans = input(\n \" |--> (a) Accept and continue, (c) Change, (r) Reject, (f) Accept and stop: \"\n )\n\n if ans.lower() == \"r\":\n maxPCoordinate[j] = 0\n maxPIndex[j] = 0\n logs += f\" |--> rejected pure variable #{j + 1}\\n\"\n j = j - 1\n\n elif ans.lower() == \"a\":\n j = j + 1\n\n elif ans.lower() == \"f\":\n finished = True\n j = j + 1\n llog = f\"\\n**** Interrupted by user at compound # {j} \\n**** End of SIMPL(I)SMA analysis.\"\n logs += llog + \"\\n\"\n Pt = Pt[0:j, :]\n St = St[0:j, :]\n s = s[0:j, :]\n C = C[:, 0:j]\n # not interactive\n else:\n j = j + 1\n if (1 - rsquarej) < tol / 100:\n llog = (\n f\"\\n**** Unexplained variance lower than 'tol' ({tol}%) \\n\"\n \"**** End of SIMPL(I)SMA analysis.\"\n )\n logs += llog + \"\\n\"\n Pt = Pt[0:j, :]\n St = St[0:j, :]\n s = s[0:j, :]\n C = C[:, 0:j]\n\n info_(llog)\n finished = True\n if j == n_pc:\n if not interactive:\n llog = (\n f\"\\n**** Reached maximum number of pure compounds 'n_pc' ({n_pc}) \\n\"\n \"**** End of SIMPL(I)SMA analysis.\"\n )\n logs += llog + \"\\n\"\n info_(llog)\n finished = True\n\n Pt.description = \"Purity spectra from SIMPLISMA:\\n\" + logs\n C.description = \"Concentration/contribution matrix from SIMPLISMA:\\n\" + logs\n St.description = \"Pure compound spectra matrix from SIMPLISMA:\\n\" + logs\n s.description = \"Standard deviation spectra matrix from SIMPLISMA:\\n\" + logs\n\n self._logs = logs\n self._X = X\n self._Pt = Pt\n self._C = C\n self._St = St\n self._s = s\n\n @property\n def X(self):\n \"\"\"\n The original dataset.\n \"\"\"\n return self._X\n\n @property\n def St(self):\n \"\"\"\n Spectra of pure compounds.\n \"\"\"\n return self._St\n\n @property\n def C(self):\n \"\"\"\n Intensities ('concentrations') of pure compounds in spectra.\n \"\"\"\n return self._C\n\n @property\n def Pt(self):\n \"\"\"\n Purity spectra.\n \"\"\"\n return self._Pt\n\n @property\n def s(self):\n \"\"\"\n Standard deviation spectra.\n \"\"\"\n return self._s\n\n @property\n def logs(self):\n \"\"\"\n Logs ouptut.\n \"\"\"\n return self._logs\n\n def reconstruct(self):\n \"\"\"\n Transform data back to the original space.\n\n The following matrix operation is performed: :math:`X'_{hat} = C'.S'^t`\n\n Returns\n -------\n X_hat\n The reconstructed dataset based on the SIMPLISMA Analysis.\n \"\"\"\n\n # reconstruct from concentration and spectra profiles\n\n X_hat = dot(self.C, self.St)\n X_hat.description = \"Dataset reconstructed by SIMPLISMA\\n\" + self.logs\n X_hat.title = \"X_hat: \" + self.X.title\n return X_hat\n\n def plotmerit(self, **kwargs):\n \"\"\"\n Plots the input dataset, reconstructed dataset and residuals.\n\n Parameters\n ----------\n **kwargs : dict\n Plotting parameters.\n\n Returns\n -------\n ax\n subplot.\n \"\"\"\n\n colX, colXhat, colRes = kwargs.get(\"colors\", [\"blue\", \"green\", \"red\"])\n\n X_hat = self.reconstruct()\n\n res = self.X - X_hat\n\n ax = self.X.plot(label=\"$X$\")\n ax.plot(X_hat.data.T, color=colXhat, label=r\"$\\hat{X}\")\n ax.plot(res.data.T, color=colRes, label=\"Residual\")\n ax.set_title(\"SIMPLISMA plot: \" + self.X.name)\n\n return ax\n\n\n# ============================================================================\nif __name__ == \"__main__\":\n pass\n" ]
[ [ "numpy.dot", "numpy.sqrt", "numpy.min", "numpy.arange", "numpy.linalg.norm", "numpy.linalg.det", "numpy.max", "numpy.std", "numpy.linalg.lstsq", "numpy.mean", "numpy.argmax", "numpy.zeros" ] ]
pawni/sgld_online_approximation
[ "1edae8a669fdeef4e5501bcb07d6b809fc4cccd9" ]
[ "experiment.py" ]
[ "import tensorflow as tf\nimport numpy as np\nimport os\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport edward as ed\nfrom edward.models import Normal, Categorical, Multinomial, Empirical, PointMass\nfrom tensorflow.python.training import moving_averages\n\n# setup function to handle session configuration and seeding\ndef setup():\n tf.reset_default_graph()\n os.environ['CUDA_VISIBLE_DEVICES'] = ''\n\n tf.set_random_seed(42)\n\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n\n sess = tf.InteractiveSession(config=config)\n return sess\n\n# function to return data readers - it assumes that the notMNIST dataset has\n# been downloaded from https://github.com/davidflanagan/notMNIST-to-MNIST\ndef get_data():\n mnist = input_data.read_data_sets('MNIST_data', one_hot=False)\n notmnist = input_data.read_data_sets('notMNIST_data', one_hot=False)\n return mnist, notmnist\n\n# function to build a NN using a variables dict. If the variables for a 3 layer\n# network is present it builds a 3 layer network. Otherwise it builds a 1 layer\n# network. If a keep_prob for dropout is given it includes dropout in the model.\ndef build_nn(variables, dropout=None):\n x_ = tf.reshape(variables['x'], [-1, 784])\n if 'W_3' in variables:\n if dropout:\n h1 = tf.nn.dropout(tf.nn.relu(tf.matmul(x_, variables['W_0']) + variables['b_0']), keep_prob=dropout)\n h2 = tf.nn.dropout(tf.nn.relu(tf.matmul(h1, variables['W_1']) + variables['b_1']), keep_prob=dropout)\n h3 = tf.nn.dropout(tf.nn.relu(tf.matmul(h2, variables['W_2']) + variables['b_2']), keep_prob=dropout)\n else:\n h1 = tf.nn.relu(tf.matmul(x_, variables['W_0']) + variables['b_0'])\n h2 = tf.nn.relu(tf.matmul(h1, variables['W_1']) + variables['b_1'])\n h3 = tf.nn.relu(tf.matmul(h2, variables['W_2']) + variables['b_2'])\n\n logits = tf.matmul(h3, variables['W_3']) + variables['b_3']\n else:\n if dropout:\n h1 = tf.nn.dropout(tf.nn.relu(tf.matmul(x_, variables['W_0']) + variables['b_0']), keep_prob=dropout)\n else:\n h1 = tf.nn.relu(tf.matmul(x_, variables['W_0']) + variables['b_0'])\n\n logits = tf.matmul(h1, variables['W_1']) + variables['b_1']\n return logits\n\n# Builds the 1 layer probabilistic model using edward random variables\n# returns the output and variables as dictionary\ndef get_model(dropout=None):\n x = tf.placeholder(tf.float32, shape=[None, 784])\n y = tf.placeholder(tf.int32, shape=[None])\n\n W_0 = Normal(mu=tf.zeros([784, 50]), sigma=tf.ones([784, 50]))\n W_1 = Normal(mu=tf.zeros([50, 10]), sigma=tf.ones([50, 10]))\n b_0 = Normal(mu=tf.zeros(50), sigma=tf.ones(50))\n b_1 = Normal(mu=tf.zeros(10), sigma=tf.ones(10))\n \n variables = {'W_0': W_0, 'W_1': W_1,\n 'b_0': b_0, 'b_1': b_1,\n 'x': x, 'y': y}\n \n logits = build_nn(variables, dropout=dropout)\n y_ = Categorical(logits=logits)\n return y_, variables\n\n# Builds the 3 layer probabilistic model using edward random variables\n# returns the output and variables as dictionary\ndef get_model_3layer(dropout=None):\n x = tf.placeholder(tf.float32, shape=[None, 784])\n y = tf.placeholder(tf.int32, shape=[None])\n\n W_0 = Normal(mu=tf.zeros([784, 200]), sigma=tf.ones([784, 200]))\n W_1 = Normal(mu=tf.zeros([200, 200]), sigma=tf.ones([200, 200]))\n W_2 = Normal(mu=tf.zeros([200, 200]), sigma=tf.ones([200, 200]))\n W_3 = Normal(mu=tf.zeros([200, 10]), sigma=tf.ones([200, 10]))\n b_0 = Normal(mu=tf.zeros(200), sigma=tf.ones(200))\n b_1 = Normal(mu=tf.zeros(200), sigma=tf.ones(200))\n b_2 = Normal(mu=tf.zeros(200), sigma=tf.ones(200))\n b_3 = Normal(mu=tf.zeros(10), sigma=tf.ones(10))\n \n variables = {'W_0': W_0, 'W_1': W_1, 'W_2': W_2, 'W_3': W_3,\n 'b_0': b_0, 'b_1': b_1, 'b_2': b_2, 'b_3': b_3,\n 'x': x, 'y': y}\n \n logits = build_nn(variables, dropout=dropout)\n y_ = Categorical(logits=logits)\n return y_, variables\n\n# Function to build an ensemble from the random variables and produce tensors\n# for calculating the mean classificationa accuracy of the model as well as the\n# per-datapoint-disagreement as defined in Lakshminarayanan et al. (2016), Simple and scalable\n# predictive uncertainty estimation using deep ensembles\ndef get_metrics(model_variables, approx_variables, num_samples=10, dropout=None):\n eps = 1e-8\n ensemble_model = tf.stack([build_nn(\n {key: approx_variables[key].sample()\n if key in approx_variables else model_variables[key]\n for key in model_variables}, dropout=dropout)\n for _ in range(num_samples)])\n ensemble_preds = tf.nn.softmax(ensemble_model)\n disagreement = tf.reduce_sum(tf.reduce_sum(ensemble_preds\n * tf.log(ensemble_preds\n / (tf.reduce_mean(ensemble_preds, axis=0)\n + eps)\n + eps),\n axis=-1),\n axis=0)\n accuracy = tf.reduce_mean(\n tf.cast(\n tf.equal(\n tf.cast(\n tf.argmax(tf.reduce_mean(ensemble_preds, axis=0), axis=-1),\n tf.int32),\n model_variables['y']),\n tf.float32))\n return accuracy, disagreement\n\n# Function to build an ensemble from the pretrained neural network states and produce tensors\n# for calculating the mean classificationa accuracy of the model as well as the\n# per-datapoint-disagreement as defined in Lakshminarayanan et al. (2016), Simple and scalable\n# predictive uncertainty estimation using deep ensembles\ndef get_metrics_ensemble(model_variables, approx_variables, num_samples=10, dropout=None):\n eps = 1e-8\n ensemble_model = tf.stack([build_nn(\n {key: approx_variables[i][key]\n if key in approx_variables[i] else model_variables[key]\n for key in model_variables})\n for i in np.random.permutation(len(approx_variables))[:num_samples]])\n ensemble_preds = tf.nn.softmax(ensemble_model)\n disagreement = tf.reduce_sum(tf.reduce_sum(ensemble_preds\n * tf.log(ensemble_preds\n / (tf.reduce_mean(ensemble_preds, axis=0)\n + eps)\n + eps),\n axis=-1),\n axis=0)\n accuracy = tf.reduce_mean(\n tf.cast(\n tf.equal(\n tf.cast(\n tf.argmax(tf.reduce_mean(ensemble_preds, axis=0), axis=-1),\n tf.int32),\n model_variables['y']),\n tf.float32))\n return accuracy, disagreement\n\n# function to run our proposed outlier detection based on disagreement thresholding.\n# returns the number of correctly / incorrectly classified samples\ndef get_outlier_stats(model_variables, disagreement, mnist, notmnist):\n batch = mnist.train.next_batch(100)\n train_disagreements = disagreement.eval({model_variables['x']: batch[0],\n model_variables['y']: batch[1]})\n threshold = train_disagreements.mean() + 3. * train_disagreements.std()\n mnist_disagreements = disagreement.eval({model_variables['x']: mnist.test.images,\n model_variables['y']: mnist.test.labels})\n notmnist_disagreements = disagreement.eval({model_variables['x']: notmnist.test.images,\n model_variables['y']: notmnist.test.labels})\n mnist_outlier = mnist_disagreements > threshold\n notmnist_outlier = notmnist_disagreements > threshold\n return {'TP': np.sum(notmnist_outlier),\n 'FN': np.sum(1 - notmnist_outlier),\n 'FP': np.sum(mnist_outlier),\n 'TN': np.sum(1 - mnist_outlier),\n }\n\n# function to return the variables for approximating the 1 layer model using variational inference\ndef get_vi_approximation_variables(): \n qW_0 = Normal(mu=tf.Variable(tf.random_normal([784, 50], stddev=0.1)),\n sigma=tf.nn.softplus(tf.Variable(tf.random_normal([784, 50], stddev=0.1))))\n qW_1 = Normal(mu=tf.Variable(tf.random_normal([50, 10], stddev=0.1)),\n sigma=tf.nn.softplus(tf.Variable(tf.random_normal([50, 10], stddev=0.1))))\n qb_0 = Normal(mu=tf.Variable(tf.random_normal([50], stddev=0.1)),\n sigma=tf.nn.softplus(tf.Variable(tf.random_normal([50], stddev=0.1))))\n qb_1 = Normal(mu=tf.Variable(tf.random_normal([10], stddev=0.1)),\n sigma=tf.nn.softplus(tf.Variable(tf.random_normal([10], stddev=0.1))))\n variables = {'W_0': qW_0, 'W_1': qW_1, 'b_0': qb_0, 'b_1': qb_1}\n return variables\n\n# function to return the variables for approximating the 3 layer model using variational inference\ndef get_vi_approximation_variables_3layer(): \n qW_0 = Normal(mu=tf.Variable(tf.random_normal([784, 200], stddev=0.1)),\n sigma=tf.nn.softplus(tf.Variable(tf.random_normal([784, 200], stddev=0.1))))\n qW_1 = Normal(mu=tf.Variable(tf.random_normal([200, 200], stddev=0.1)),\n sigma=tf.nn.softplus(tf.Variable(tf.random_normal([200, 200], stddev=0.1))))\n qW_2 = Normal(mu=tf.Variable(tf.random_normal([200, 200], stddev=0.1)),\n sigma=tf.nn.softplus(tf.Variable(tf.random_normal([200, 200], stddev=0.1))))\n qW_3 = Normal(mu=tf.Variable(tf.random_normal([200, 10], stddev=0.1)),\n sigma=tf.nn.softplus(tf.Variable(tf.random_normal([200, 10], stddev=0.1))))\n qb_0 = Normal(mu=tf.Variable(tf.random_normal([200], stddev=0.1)),\n sigma=tf.nn.softplus(tf.Variable(tf.random_normal([200], stddev=0.1))))\n qb_1 = Normal(mu=tf.Variable(tf.random_normal([200], stddev=0.1)),\n sigma=tf.nn.softplus(tf.Variable(tf.random_normal([200], stddev=0.1))))\n qb_2 = Normal(mu=tf.Variable(tf.random_normal([200], stddev=0.1)),\n sigma=tf.nn.softplus(tf.Variable(tf.random_normal([200], stddev=0.1))))\n qb_3 = Normal(mu=tf.Variable(tf.random_normal([10], stddev=0.1)),\n sigma=tf.nn.softplus(tf.Variable(tf.random_normal([10], stddev=0.1))))\n variables = {'W_0': qW_0, 'W_1': qW_1, 'W_2': qW_2, 'W_3': qW_3,\n 'b_0': qb_0, 'b_1': qb_1, 'b_2': qb_2, 'b_3': qb_3}\n return variables\n\n# function to return the variables for approximating the 1 layer model using our online approximation of sampling methods\ndef get_gauss_approximation_variables(): \n qW_0 = Normal(mu=tf.Variable(tf.zeros([784, 50])),\n sigma=tf.Variable(tf.zeros([784, 50])))\n qW_1 = Normal(mu=tf.Variable(tf.zeros([50, 10])),\n sigma=tf.Variable(tf.zeros([50, 10])))\n qb_0 = Normal(mu=tf.Variable(tf.zeros([50])),\n sigma=tf.Variable(tf.zeros([50])))\n qb_1 = Normal(mu=tf.Variable(tf.zeros([10])),\n sigma=tf.Variable(tf.zeros([10])))\n variables = {'W_0': qW_0, 'W_1': qW_1, 'b_0': qb_0, 'b_1': qb_1}\n return variables\n\n# function to return the variables for approximating the 3 layer model using our online approximation of sampling methods\ndef get_gauss_approximation_variables_3layer(): \n qW_0 = Normal(mu=tf.Variable(tf.zeros([784, 200])),\n sigma=tf.Variable(tf.zeros([784, 200])))\n qW_1 = Normal(mu=tf.Variable(tf.zeros([200, 200])),\n sigma=tf.Variable(tf.zeros([200, 200])))\n qW_2 = Normal(mu=tf.Variable(tf.zeros([200, 200])),\n sigma=tf.Variable(tf.zeros([200, 200])))\n qW_3 = Normal(mu=tf.Variable(tf.zeros([200, 10])),\n sigma=tf.Variable(tf.zeros([200, 10])))\n qb_0 = Normal(mu=tf.Variable(tf.zeros([200])),\n sigma=tf.Variable(tf.zeros([200])))\n qb_1 = Normal(mu=tf.Variable(tf.zeros([200])),\n sigma=tf.Variable(tf.zeros([200])))\n qb_2 = Normal(mu=tf.Variable(tf.zeros([200])),\n sigma=tf.Variable(tf.zeros([200])))\n qb_3 = Normal(mu=tf.Variable(tf.zeros([10])),\n sigma=tf.Variable(tf.zeros([10])))\n variables = {'W_0': qW_0, 'W_1': qW_1, 'W_2': qW_2, 'W_3': qW_3,\n 'b_0': qb_0, 'b_1': qb_1, 'b_2': qb_2, 'b_3': qb_3}\n return variables\n\n# function to return the variables for approximating the 1 layer model using MAP\ndef get_pointmass_approximation_variables(): \n qW_0 = PointMass(tf.Variable(tf.random_normal([784, 50], stddev=0.1)))\n qW_1 = PointMass(tf.Variable(tf.random_normal([50, 10], stddev=0.1)))\n qb_0 = PointMass(tf.Variable(tf.random_normal([50], stddev=0.1)))\n qb_1 = PointMass(tf.Variable(tf.random_normal([10], stddev=0.1)))\n variables = {'W_0': qW_0, 'W_1': qW_1, 'b_0': qb_0, 'b_1': qb_1}\n return variables\n\n# function to return the variables for approximating the 3 layer model using MAP\ndef get_pointmass_approximation_variables_3layer(): \n qW_0 = PointMass(tf.Variable(tf.random_normal([784, 200], stddev=0.1)))\n qW_1 = PointMass(tf.Variable(tf.random_normal([200, 200], stddev=0.1)))\n qW_2 = PointMass(tf.Variable(tf.random_normal([200, 200], stddev=0.1)))\n qW_3 = PointMass(tf.Variable(tf.random_normal([200, 10], stddev=0.1)))\n qb_0 = PointMass(tf.Variable(tf.random_normal([200], stddev=0.1)))\n qb_1 = PointMass(tf.Variable(tf.random_normal([200], stddev=0.1)))\n qb_2 = PointMass(tf.Variable(tf.random_normal([200], stddev=0.1)))\n qb_3 = PointMass(tf.Variable(tf.random_normal([10], stddev=0.1)))\n variables = {'W_0': qW_0, 'W_1': qW_1, 'W_2': qW_2, 'W_3': qW_3,\n 'b_0': qb_0, 'b_1': qb_1, 'b_2': qb_2, 'b_3': qb_3}\n return variables\n\n" ]
[ [ "tensorflow.matmul", "tensorflow.nn.softmax", "tensorflow.InteractiveSession", "tensorflow.zeros", "tensorflow.reduce_mean", "tensorflow.reshape", "tensorflow.placeholder", "tensorflow.ones", "tensorflow.ConfigProto", "tensorflow.reset_default_graph", "tensorflow.set_random_seed", "tensorflow.examples.tutorials.mnist.input_data.read_data_sets", "numpy.sum", "tensorflow.random_normal" ] ]
LifeEGX/methQC
[ "2b4f960e7e5c7baca9dc778ca05ee332e2f27653" ]
[ "methylcheck/qc_plot.py" ]
[ "import warnings\nfrom pathlib import Path\nimport logging\nimport pandas as pd\nimport numpy as np\nimport seaborn as sb\nimport matplotlib.pyplot as plt\n\n#app\nimport methylcheck\nfrom .progress_bar import *\n\nLOGGER = logging.getLogger(__name__)\n\n__all__ = ['run_qc', 'plot_beta_by_type', 'qc_signal_intensity', 'plot_M_vs_U', 'plot_controls', 'bis_conversion_control']\n\ndef run_qc(path):\n \"\"\"Generates all QC plots for a dataset in the path provided.\n if `process --all` was used to create control probes and raw values for QC,\n because it uses four output files:\n\n - beta_values.pkl\n - control_probes.pkl\n - meth_values.pkl or noob_meth_values.pkl\n - unmeth_values.pkl or noob_unmeth_values.pkl\n\n output is all to screen, so best to use in a jupyter notebook.\n If you prefer output in a PDF, use ReportPDF instead.\n\n Note: this will only look in the path folder; it doesn't do a recursive search for matching files.\n \"\"\"\n try:\n beta_df = pd.read_pickle(Path(path,'beta_values.pkl').expanduser())\n controls = pd.read_pickle(Path(path,'control_probes.pkl').expanduser())\n if Path(path,'meth_values.pkl').expanduser().exists() and Path(path,'unmeth_values.pkl').expanduser().exists():\n meth_df = pd.read_pickle(Path(path,'meth_values.pkl').expanduser())\n unmeth_df = pd.read_pickle(Path(path,'unmeth_values.pkl').expanduser())\n else:\n meth_df = pd.read_pickle(Path(path,'noob_meth_values.pkl').expanduser())\n unmeth_df = pd.read_pickle(Path(path,'noob_unmeth_values.pkl').expanduser())\n if Path(path,'poobah_values.pkl').expanduser().exists():\n poobah = pd.read_pickle(Path(path,'poobah_values.pkl').expanduser())\n else:\n poobah = None\n except FileNotFoundError:\n if not Path(path).exists():\n raise FileNotFoundError(\"Invalid path\")\n elif not Path(path).is_dir():\n raise FileNotFoundError(\"Path is not a directory.\")\n raise FileNotFoundError(\"Files missing. run_qc() only works if you used `methylprep process --all` option to produce beta_values, control_probes, meth_values, and unmeth_values files.\")\n # needs meth_df, unmeth_df, controls, and beta_df\n # if passing in a path, it will auto-search for poobah. but if meth/unmeth passed in, you must explicitly tell it to look.\n plot_M_vs_U(meth=meth_df, unmeth=unmeth_df, poobah=poobah)\n qc_signal_intensity(meth=meth_df, unmeth=unmeth_df, poobah=poobah)\n plot_controls(controls, 'all')\n plot_beta_by_type(beta_df, 'all')\n\n\ndef qc_signal_intensity(data_containers=None, path=None, meth=None, unmeth=None, poobah=None, palette=None,\n noob=True, silent=False, verbose=False, plot=True, cutoff_line=True, bad_sample_cutoff=11.5, return_fig=False):\n \"\"\"Suggests sample outliers based on methylated and unmethylated signal intensity.\n\ninput (one of these):\n=====================\n path\n to csv files processed using methylprep\n these have \"noob_meth\" and \"noob_unmeth\" columns per sample file this function can use.\n if you want it to processed data uncorrected data.\n\n data_containers\n output from the methylprep.run_pipeline() command when run in a script or notebook.\n you can also recreate the list of datacontainers using methylcheck.load(<filepath>,'meth')\n\n (meth and unmeth)\n if you chose `process --all` you can load the raw intensities like this, and pass them in:\n meth = pd.read_pickle('meth_values.pkl')\n unmeth = pd.read_pickle('unmeth_values.pkl')\n THIS will run the fastest.\n (meth and unmeth and poobah)\n if poobah=None (default): Does nothing\n if poobah=False: suppresses this color\n if poobah=dataframe: color-codes samples according to percent probe failure range,\n but only if you pass in meth and unmeth dataframes too, not data_containers object.\n if poobah=True: looks for poobah_values.pkl in the path provided.\n\noptional params:\n================\n cutoff_line: True will draw the line; False omits it.\n bad_sample_cutoff (default 11.5): set the cutoff for determining good vs bad samples, based on signal intensities of meth and unmeth fluorescence channels. 10.5 was borrowed from minfi's internal defaults.\n noob: use noob-corrected meth/unmeth values\n verbose: additional messages\n plot: if True (default), shows a plot. if False, this function returns the median values per sample of meth and unmeth probes.\n return_fig (False default), if True, and plot is True, returns a figure object instead of showing plot.\n compare: if the processed data contains both noob and uncorrected values, it will plot both in different colors\n palette: if using poobah to color code, you can specify a Seaborn palette to use.\n\nthis will draw a diagonal line on plots\n\nreturns:\n========\n A dictionary of data about good/bad samples based on signal intensity\n\nTODO:\n doesn't return both types of data if using compare and not plotting\n doesn't give good error message for compare\n \"\"\"\n if not path and not data_containers and type(meth) is type(None) and type(unmeth) is type(None):\n print(\"ERROR: You must specify a path to methylprep processed data files or provide a data_containers object as input.\")\n return\n if not isinstance(data_containers,list) and isinstance(data_containers, (str,Path)):\n print(\"ERROR: If you want to supply a path to your processed files, use 'path=<path>'.\")\n return\n # path can be a string, but must be converted to a Path\n if isinstance(path, str):\n path = Path(path)\n # meth can be none, or df, or path\n if isinstance(meth, type(None)) and isinstance(unmeth, type(None)):\n meth, unmeth = _get_data(data_containers=data_containers, path=path, compare=False, noob=noob, verbose=verbose)\n if (path is not None and not isinstance(poobah, pd.DataFrame)\n and not isinstance(poobah, type(None))):\n if poobah in (False,None):\n pass # unless poobah IS a dataframe below, nothing happens. None/False suppress this\n else:\n if 'poobah_values.pkl' in [i.name for i in list(path.rglob('poobah_values.pkl'))]:\n poobah = pd.read_pickle(list(path.rglob('poobah_values.pkl'))[0])\n else:\n if verbose and not silent:\n LOGGER.info(\"Cannot load poobah_values.pkl file.\")\n\n # Plotting\n medians = _make_qc_df(meth,unmeth)\n cutoffs = (medians.mMed.values + medians.uMed.values)/2\n bad_samples = medians.index[cutoffs < bad_sample_cutoff]\n\n # flex the x and y axes depending on the data\n min_x = int(min(medians.mMed))\n max_x = max(medians.mMed) + 1\n min_y = int(min(medians.uMed))\n max_y = max(medians.uMed) + 1\n\n if not plot:\n return {\n 'medians': medians,\n 'cutoffs': cutoffs,\n 'good_samples': [str(s) for s in medians.index[cutoffs >= bad_sample_cutoff]],\n 'bad_samples': [str(s) for s in bad_samples],\n 'bad_sample_cutoff': bad_sample_cutoff,\n }\n # set up figure\n fig,ax = plt.subplots(figsize=(10,10))\n plt.grid(color=(0.8, 0.8, 0.8), linestyle='dotted')\n plt.xlabel('Meth Median Intensity (log2)', fontsize='large')\n plt.ylabel('Unmeth Median Intensity (log2)', fontsize='large')\n if not isinstance(poobah, pd.DataFrame):\n plt.title('Log M versus U plot')\n # bad values\n plt.scatter(x='mMed',y='uMed',data=medians[medians.index.isin(bad_samples)],label='Bad Samples',c='red')\n # good values\n plt.scatter(x='mMed',y='uMed',data=medians[~medians.index.isin(bad_samples)],label=\"Good Samples\",c='black')\n elif isinstance(poobah, pd.DataFrame):\n plt.title('Log M versus U plot: Colors are the percent of probe failures per sample')\n if poobah.isna().sum().sum() > 0:\n if poobah.isna().equals(meth.isna()) and poobah.isna().equals(unmeth.isna()):\n pass # not a problem if the SAME probes are excluded in all dataframes\n else:\n LOGGER.warning(\"Your poobah_values.pkl file contains missing values; color coding will be inaccurate.\")\n percent_failures = round(100*( poobah[poobah > 0.05].count() / poobah.count() ),1)\n percent_failures = percent_failures.rename('probe_failure_(%)')\n # Series.where will replace the stuff that is False, so you have to negate it.\n percent_failures_hues = percent_failures.where(~percent_failures.between(0,5), 0)\n percent_failures_hues.where(~percent_failures_hues.between(5,10), 1, inplace=True)\n percent_failures_hues.where(~percent_failures_hues.between(10,15), 2, inplace=True)\n percent_failures_hues.where(~percent_failures_hues.between(15,20), 3, inplace=True)\n percent_failures_hues.where(~percent_failures_hues.between(20,25), 4, inplace=True)\n percent_failures_hues.where(~percent_failures_hues.between(25,30), 5, inplace=True)\n percent_failures_hues.where(~(percent_failures_hues > 30), 6, inplace=True)\n percent_failures_hues = percent_failures_hues.astype(int)\n #sizes = percent_failures_hues.copy()\n percent_failures_hues = percent_failures_hues.replace({0:'0 to 5', 1:'5 to 10', 2:'10 to 15', 3:'15 to 20', 4:'20 to 25', 5:'25 to 30', 6:'>30'})\n legend_order = ['0 to 5','5 to 10','10 to 15','15 to 20','20 to 25','25 to 30','>30']\n try:\n qc = pd.merge(left=medians,\n right=percent_failures_hues,\n left_on=medians.index,\n right_on=percent_failures_hues.index,\n how='inner')\n except:\n # edge case where meth/unmeth medians loses sample sentrix_ids, but poobah pkl retains them - proceed with merging assuming order is retained\n tempA = medians.reset_index(drop=True)\n tempB = percent_failures_hues.reset_index(drop=True)\n #qc = pd.merge(left=tempA,right=tempB,left_on=tempA.index,right_on=tempB.index,how='inner')\n qc = pd.concat([tempA, tempB], axis='columns') # pandas 1.3x needs this. Above .merge fails when inner-joining on range-indeces.\n hues_palette = sb.color_palette(\"twilight\", n_colors=7, desat=0.8) if palette is None else sb.color_palette(palette, n_colors=7, desat=0.8)\n this = sb.scatterplot(data=qc, x=\"mMed\", y=\"uMed\", hue=\"probe_failure_(%)\",\n palette=hues_palette, hue_order=legend_order, legend=\"full\") # size=\"size\"\n else:\n raise NotImplementedError(\"poobah color coding is not implemented with 'compare' option\")\n\n plt.xlim([min_x,max_x])\n plt.ylim([min_y,max_y])\n if cutoff_line:\n x = np.linspace(6,14)\n y = -1*x+(2*bad_sample_cutoff)\n plt.plot(x, y, '--', lw=1, color='lightgrey', alpha=0.75, label='Cutoff')\n # legend\n legend = plt.legend(bbox_to_anchor=(0, 1), loc='upper left', ncol=1, fontsize='large')\n legend.set_title(\"Probe failure rate (%)\", prop={'size':'large'})\n # display plot\n if return_fig:\n return fig\n plt.show()\n plt.close('all')\n # print list of bad samples for user\n if len(bad_samples) > 0:\n print('List of Bad Samples')\n print([str(s) for s in bad_samples])\n return {\n 'medians': medians,\n 'cutoffs': cutoffs,\n 'good_samples': [str(s) for s in medians.index[cutoffs >= bad_sample_cutoff]],\n 'bad_samples': [str(s) for s in bad_samples],\n 'bad_sample_cutoff': bad_sample_cutoff,\n }\n\n\ndef _make_qc_df(meth,unmeth):\n \"\"\"Function takes meth and unmeth dataframes,\n returns a single dataframe with log2 medians for\n m and u values\"\"\"\n mmed = pd.DataFrame(np.log2(meth.median(axis=0)),columns=['mMed'])\n umed = pd.DataFrame(np.log2(unmeth.median(axis=0)),columns=['uMed'])\n qc = pd.merge(left=mmed,\n right=umed,\n left_on=mmed.index,\n right_on=umed.index,\n how='inner').set_index('key_0',drop=True)\n #del qc.index.name\n qc.index.name = None\n return qc\n\ndef _get_data(data_containers=None, path=None, compare=False, noob=True, verbose=True):\n \"\"\" internal function that loads data from object or path and returns 2 or 4 dataframes \"\"\"\n # NOTE: not a flexible function because it returns 0, 2, or 4 objects depending on inputs.\n # NOTE: this requires that data_containers label the index 'IlmnID' for each sample\n if data_containers:\n # Pull M and U values\n meth = pd.DataFrame(index=data_containers[0]._SampleDataContainer__data_frame.index)\n unmeth = pd.DataFrame(index=data_containers[0]._SampleDataContainer__data_frame.index)\n\n for i,c in enumerate(data_containers):\n sample = data_containers[i].sample\n m = c._SampleDataContainer__data_frame.rename(columns={'meth':sample})\n u = c._SampleDataContainer__data_frame.rename(columns={'unmeth':sample})\n meth = pd.merge(left=meth,right=m[sample],left_on='IlmnID',right_on='IlmnID',)\n unmeth = pd.merge(left=unmeth,right=u[sample],left_on='IlmnID',right_on='IlmnID')\n elif path:\n n = 'noob_' if noob else ''\n # first try to load from disk\n if (noob and Path(path, f'{n}meth_values.pkl').exists() and\n Path(path, f'{n}unmeth_values.pkl').exists()):\n _meth = pd.read_pickle(Path(path, f'{n}meth_values.pkl'))\n _unmeth = pd.read_pickle(Path(path, f'{n}unmeth_values.pkl'))\n return _meth, _unmeth\n # THIS DOES NOT warn user if they want noob and the files don't exist.\n elif Path(path, 'meth_values.pkl').exists() and Path(path,'unmeth_values.pkl').exists() and not compare:\n _meth = pd.read_pickle(Path(path, 'meth_values.pkl'))\n _unmeth = pd.read_pickle(Path(path, 'unmeth_values.pkl'))\n return _meth, _unmeth\n elif (compare and\n Path(path, 'meth_values.pkl').exists() and\n Path(path, 'unmeth_values.pkl').exists() and\n Path(path, f'{n}meth_values.pkl').exists() and\n Path(path, f'{n}unmeth_values.pkl').exists()):\n meth = pd.read_pickle(Path(path, 'meth_values.pkl'))\n unmeth = pd.read_pickle(Path(path, 'unmeth_values.pkl'))\n _meth = pd.read_pickle(Path(path, f'{n}meth_values.pkl'))\n _unmeth = pd.read_pickle(Path(path, f'{n}unmeth_values.pkl'))\n return meth, unmeth, _meth, _unmeth\n else:\n sample_filenames = []\n csvs = []\n files_found = False\n for file in tqdm(Path(path).expanduser().rglob('*_processed.csv'), desc='Loading files', total=len(list(Path(path).expanduser().rglob('*_processed.csv')))):\n this = pd.read_csv(file)\n files_found = True\n if f'{n}meth' in this.columns and f'{n}unmeth' in this.columns:\n csvs.append(this)\n sample_filenames.append(str(file.stem).replace('_processed',''))\n # note, this doesn't give a clear error message if using compare and missing uncorrected data.\n if verbose and len(csvs) > 0:\n print(f\"{len(csvs)} processed samples found.\")\n\n if csvs != []:\n meth = pd.DataFrame({'IlmnID': csvs[0]['IlmnID'], 0: csvs[0][f'{n}meth']})\n unmeth = pd.DataFrame({'IlmnID': csvs[0]['IlmnID'], 0: csvs[0][f'{n}unmeth']})\n meth.set_index('IlmnID', inplace=True)\n unmeth.set_index('IlmnID', inplace=True)\n if compare:\n n2 = '' if noob else 'noob_'\n _meth = pd.DataFrame({'IlmnID': csvs[0]['IlmnID'], 0: csvs[0][f'{n2}meth']})\n _unmeth = pd.DataFrame({'IlmnID': csvs[0]['IlmnID'], 0: csvs[0][f'{n2}unmeth']})\n _meth.set_index('IlmnID', inplace=True)\n _unmeth.set_index('IlmnID', inplace=True)\n for idx, sample in tqdm(enumerate(csvs[1:],1), desc='Samples', total=len(csvs)):\n # columns are meth, unmeth OR noob_meth, noob_unmeth, AND IlmnID\n meth = pd.merge(left=meth, right=sample[f'{n}meth'], left_on='IlmnID', right_on=sample['IlmnID'])\n meth = meth.rename(columns={f'{n}meth': sample_filenames[idx]})\n unmeth = pd.merge(left=unmeth, right=sample[f'{n}unmeth'], left_on='IlmnID', right_on=sample['IlmnID'])\n unmeth = unmeth.rename(columns={f'{n}unmeth': sample_filenames[idx]})\n if compare:\n _meth = pd.merge(left=_meth, right=sample[f'{n2}meth'], left_on='IlmnID', right_on=sample['IlmnID'])\n _meth = _meth.rename(columns={f'{n2}meth': sample_filenames[idx]})\n _unmeth = pd.merge(left=_unmeth, right=sample[f'{n2}unmeth'], left_on='IlmnID', right_on=sample['IlmnID'])\n _unmeth = _unmeth.rename(columns={f'{n2}unmeth': sample_filenames[idx]})\n else:\n if verbose:\n print(f\"{len(csvs)} processed samples found in {path} using NOOB: {noob}.\")\n if files_found:\n data_columns = \"NOOB meth/unmeth\" if noob else \"non-NOOB-corrected meth/unmeth\"\n print(f\"processed files found, but did not contain the right data ({data_columns})\")\n return\n if compare:\n return meth, unmeth, _meth, _unmeth\n return meth, unmeth\n\n\ndef plot_M_vs_U(data_containers_or_path=None, meth=None, unmeth=None, poobah=None,\n noob=True, silent=False, verbose=False, plot=True, compare=False, return_fig=False, palette=None,\n cutoff_line=True):\n \"\"\"plot methylated vs unmethylated probe intensities\n\ninput (choose one of these):\n============================\n PATH to csv files processed using methylprep\n these have \"noob_meth\" and \"noob_unmeth\" columns per sample file this function can use.\n if you want it to processed data uncorrected data.\n (If there is a poobah_values.pkl file in this PATH, it will use the file to color code points)\n\n data_containers = run_pipeline(data_dir = 'somepath',\n save_uncorrected=True,\n sample_sheet_filepath='samplesheet.csv')\n you can also recreate the list of datacontainers using methylcheck.load(<filepath>,'meth')\n\n\n (meth and unmeth)\n if you chose `process --all` you can load the raw intensities like this, and pass them in:\n meth = pd.read_pickle('meth_values.pkl')\n unmeth = pd.read_pickle('unmeth_values.pkl')\n THIS will run the fastest.\n\n poobah\n filepath: You may supply the file path to the p-value detection dataframe. If supplied, it will color\n code points on the plot.\n False: set poobah to False to suppress this coloring.\n None (default): if there is a poobah_values.pkl file in your path, it will use it.\n\noptional params:\n noob: use noob-corrected meth/unmeth values\n verbose: additional messages\n plot: if True (default), shows a plot. if False, this function returns the median values per sample of meth and unmeth probes.\n return_fig: (False default), if True (and plot is true), returns the figure object instead of showing it.\n compare:\n if the processed data contains both noob and uncorrected values, it will plot both in different colors\n the compare option will not work with using the 'meth' and 'unmeth' inputs, only with path or data_containers.\n cutoff_line: True will draw a diagonal line on plots.\n the cutoff line is based on the X-Y scale of the plot, which depends on the range of intensity values in your data set.\n\nTODO:\n doesn't return both types of data if using compare and not plotting\n doesn't give good error message for compare\n \"\"\"\n try:\n if Path(data_containers_or_path).exists(): # if passing in a valid string, this should work.\n path = Path(data_containers_or_path)\n else:\n path = None\n except TypeError:\n path = None # fails if passing in a data_containers object\n\n if isinstance(data_containers_or_path, Path): #this only recognizes a Path object, not a string path\n path = data_containers_or_path\n data_containers = None\n elif isinstance(path, Path):\n data_containers = None\n else:\n path = None\n data_containers = data_containers_or_path # by process of exclusion, this must be an object, or None\n\n if isinstance(data_containers_or_path, pd.DataFrame):\n raise ValueError(\"M_vs_U cannot plot a dataframe of processed data; requires meth and unmeth values.\")\n if not isinstance(path, Path) and isinstance(data_containers, type(None)) and not isinstance(meth, pd.DataFrame) and not isinstance(unmeth, pd.DataFrame):\n print(\"You must specify a path to methylprep processed data files, or provide a data_containers object as input, or pass in meth and unmeth dataframes.\")\n # hasattr: user defined class instances should have __name__ and other objects should not\n return\n\n # 2. load meth + unmeth from path\n elif isinstance(meth,type(None)) and isinstance(unmeth,type(None)):\n try:\n if compare:\n meth, unmeth, _meth, _unmeth = _get_data(data_containers, path, compare=compare, noob=noob)\n else:\n meth, unmeth = _get_data(data_containers, path, compare=compare, noob=noob)\n except Exception as e:\n print(e)\n print(\"No processed data found.\")\n return\n\n # 2. load poobah_df if exists\n if isinstance(poobah,bool) and poobah == False:\n poobah_df = None\n elif isinstance(poobah, pd.DataFrame):\n poobah_df = poobah\n poobah = True\n else:\n poobah_df = None\n if isinstance(path, Path) and 'poobah_values.pkl' in [i.name for i in list(path.rglob('poobah_values.pkl'))]:\n poobah_df = pd.read_pickle(list(path.rglob('poobah_values.pkl'))[0])\n poobah=True\n else:\n if poobah_df is None: # didn't find a poobah file to load\n LOGGER.warning(\"Did not find a poobah_values.pkl file; unable to color-code plot.\")\n poobah = False #user may have set this to True or None, but changing params to fit data.\n if verbose and not silent and isinstance(poobah_df,pd.DataFrame):\n LOGGER.info(\"Using poobah_values.pkl\")\n\n #palette options to pass in: \"CMRmap\" \"flare\" \"twilight\" \"Blues\", \"tab10\"\n hues_palette = sb.color_palette(\"twilight\", n_colors=7, desat=0.8) if palette is None else sb.color_palette(palette, n_colors=7, desat=0.8)\n\n if poobah is not False and isinstance(poobah_df, pd.DataFrame) and not compare:\n if poobah_df.isna().sum().sum() > 0:\n if poobah_df.isna().equals(meth.isna()) and poobah_df.isna().equals(unmeth.isna()):\n pass # not a problem if the SAME probes are excluded in all dataframes\n else:\n LOGGER.warning(\"Your poobah_values.pkl file contains missing values; color coding will be inaccurate.\")\n percent_failures = round(100*( poobah_df[poobah_df > 0.05].count() / poobah_df.count() ),1)\n percent_failures = percent_failures.rename('probe_failure (%)')\n meth_med = meth.median()\n unmeth_med = unmeth.median()\n # Series.where will replace the stuff that is False, so you have to negate it.\n percent_failures_hues = percent_failures.where(~percent_failures.between(0,5), 0)\n percent_failures_hues.where(~percent_failures_hues.between(5,10), 1, inplace=True)\n percent_failures_hues.where(~percent_failures_hues.between(10,15), 2, inplace=True)\n percent_failures_hues.where(~percent_failures_hues.between(15,20), 3, inplace=True)\n percent_failures_hues.where(~percent_failures_hues.between(20,25), 4, inplace=True)\n percent_failures_hues.where(~percent_failures_hues.between(25,30), 5, inplace=True)\n percent_failures_hues.where(~(percent_failures_hues > 30), 6, inplace=True)\n percent_failures_hues = percent_failures_hues.astype(int)\n #sizes = percent_failures_hues.copy()\n percent_failures_hues = percent_failures_hues.replace({0:'0 to 5', 1:'5 to 10', 2:'10 to 15', 3:'15 to 20', 4:'20 to 25', 5:'25 to 30', 6:'>30'})\n legend_order = ['0 to 5','5 to 10','10 to 15','15 to 20','20 to 25','25 to 30','>30']\n df = pd.concat([\n meth_med.rename('meth'),\n unmeth_med.rename('unmeth'),\n percent_failures_hues],\n #sizes.rename('size')],\n axis=1)\n\n if plot:\n # plot it\n fig,ax = plt.subplots(figsize=(10,10))\n plt.grid(color=(0.8, 0.8, 0.8), linestyle='dotted')\n if poobah and not compare:\n this = sb.scatterplot(data=df, x=\"meth\", y=\"unmeth\", hue=\"probe_failure (%)\",\n palette=hues_palette, hue_order=legend_order, legend=\"full\") # size=\"size\"\n legend = plt.legend(bbox_to_anchor=(0, 1), loc='upper left', ncol=1, fontsize='large')\n legend.set_title(\"Probe failure rate (%)\", prop={'size':'large'})\n elif not poobah and not compare:\n this = sb.scatterplot(x=meth.median(),y=unmeth.median(),s=75)\n elif compare:\n data_df = pd.DataFrame(data={\n 'meth': meth.median(),\n 'unmeth': unmeth.median()\n })\n data_df[\"hue\"] = \"Raw intensity\"\n data_df2 = pd.DataFrame(data={ # the NOOB version\n 'meth': _meth.median(),\n 'unmeth': _unmeth.median()\n })\n # each data set should have same samples in same order, so label_lookup will work for both hues\n label_lookup = {index_val: chr(i+65) if i <= 26 else str(i-26) for i,index_val in enumerate(data_df.index)}\n data_df2['hue'] = \"Corrected intensity\"\n data_df = data_df.append(data_df2)\n del data_df2\n legend_order = [\"Raw intensity\", \"Corrected intensity\"]\n hues_palette = sb.color_palette(\"tab10\", n_colors=2) if palette is None else sb.color_palette(palette, n_colors=2)\n this = sb.scatterplot(data=data_df, x='meth', y='unmeth', hue='hue', palette=hues_palette)\n # FINALLY, label ALL points so you can compare the shifts\n for index_val, row in data_df.iterrows():\n color_code = {\"Raw intensity\":\"blue\", \"Corrected intensity\": \"darkorange\"}\n #proxy_label = chr(i+65) if i <= 52 else str(i-65)\n proxy_label = label_lookup.get(index_val,\"-1\")\n plt.text(x=row[\"meth\"]+7, y=row[\"unmeth\"]+7, s=proxy_label,\n fontdict={'color':color_code.get(row[\"hue\"], \"black\"), 'size':8, 'family':'sans-serif'})\n #bbox=dict(facecolor=’yellow’,alpha=0.5))\n if poobah and not compare:\n plt.title('M versus U plot: Colors are the percent of probe failures per sample')\n elif compare:\n plt.title('M versus U plot: Showing effect of processing fluorescence intensities')\n else:\n plt.title('M versus U plot')\n plt.xlabel('Median Methylated Intensity', fontsize='large')\n plt.ylabel('Median Unmethylated Intensity', fontsize='large')\n\n # add diagonal line\n if cutoff_line:\n line = {'y': this.axes.get_ylim(), 'x': this.axes.get_xlim()}\n sx = []\n sy = []\n for i in range(1000):\n sx.append(line['x'][0] + i/1000*(line['x'][1] - line['x'][0]))\n sy.append(line['y'][0] + i/1000*(line['y'][1] - line['y'][0]))\n this = sb.scatterplot(x=sx, y=sy, s=3, color=(0.8, 0.8, 0.8))\n if poobah:\n # This is necessary because legend title disappears when adding cutoff-line for some reason.\n legend = plt.legend(bbox_to_anchor=(0, 1), loc='upper left', ncol=1, fontsize='large')\n legend.set_title(\"Probe failure rate (%)\", prop={'size':'large'})\n if return_fig:\n return this.get_figure()\n plt.show()\n plt.close('all')\n else:\n return {'meth_median': meth.median(), 'unmeth_median': unmeth.median()}\n\n\ndef plot_beta_by_type(beta_df, probe_type='all', return_fig=False, silent=False, on_lambda=False):\n \"\"\"compare betas for type I and II probes -- (inspired by the plotBetasByType() function)\n\nPlot the overall density distribution of beta values and the density distributions of the Infinium I or II probe types\n1 distribution plot; user defines type (I or II infinium)\n\n Doesn't work with 27k arrays because they are all of the same type, Infinium Type I.\n\noptions:\n return_fig: (default False) if True, returns a list of figure objects instead of showing plots.\n \"\"\"\n mouse_probe_types = ['cg','ch','uk']\n probe_types = ['I', 'II', 'IR', 'IG', 'all'] # 'SnpI', 'Control' are in manifest, but not in the processed data\n if probe_type not in probe_types + mouse_probe_types:\n raise ValueError(f\"Please specify an Infinium probe_type: ({probe_types}) to plot or, if mouse array, one of these ({mouse_probe_types}) or 'all'.\")\n\n # orient\n if beta_df.shape[1] > beta_df.shape[0]:\n beta_df = beta_df.transpose() # probes should be in rows.\n array_type, man_filepath = methylcheck.detect_array(beta_df, returns='filepath', on_lambda=on_lambda)\n # note that 'array_type' can look like string 'mouse' but only str(array_type) will match the string 'mouse'\n\n if Path.exists(man_filepath):\n try:\n from methylprep import Manifest, ArrayType\n except ImportError:\n raise ImportError(\"plot_betas_by_type() requires methylprep\")\n\n LOGGER.setLevel(logging.WARNING)\n manifest = Manifest(ArrayType(array_type), man_filepath, on_lambda=on_lambda)\n LOGGER.setLevel(logging.INFO)\n else:\n raise FileNotFoundError(\"manifest file not found.\")\n\n # merge reference col, filter probes, them remove ref col(s)\n orig_shape = beta_df.shape\n # II, I, IR, IG, Control\n mapper = manifest.data_frame.loc[:, ['probe_type','Color_Channel']]\n beta_df = beta_df.merge(mapper, right_index=True, left_index=True)\n\n figs = []\n if probe_type in ('I', 'all'):\n subset = beta_df[beta_df['probe_type'] == 'I']\n subset = subset.drop('probe_type', axis='columns')\n subset = subset.drop('Color_Channel', axis='columns')\n if return_fig:\n figs.append( methylcheck.beta_density_plot(subset, plot_title=f'{subset.shape[0]} type I probes', return_fig=True, silent=silent, full_range=True) )\n else:\n print(f'Found {subset.shape[0]} type I probes.')\n methylcheck.beta_density_plot(subset, plot_title=f'{subset.shape[0]} type I probes', silent=silent, full_range=True)\n if probe_type in ('II', 'all'):\n subset = beta_df[beta_df['probe_type'] == 'II']\n subset = subset.drop('probe_type', axis='columns')\n subset = subset.drop('Color_Channel', axis='columns')\n if return_fig:\n figs.append( methylcheck.beta_density_plot(subset, plot_title=f'{subset.shape[0]} type II probes', return_fig=True, silent=silent, full_range=True) )\n else:\n print(f'Found {subset.shape[0]} type II probes.')\n methylcheck.beta_density_plot(subset, plot_title=f'{subset.shape[0]} type II probes', silent=silent, full_range=True)\n if probe_type in ('IR', 'all'):\n subset = beta_df[(beta_df['probe_type'] == 'I') & (beta_df['Color_Channel'] == 'Red')]\n subset = subset.drop('probe_type', axis='columns')\n subset = subset.drop('Color_Channel', axis='columns')\n if return_fig:\n figs.append( methylcheck.beta_density_plot(subset, plot_title=f'{subset.shape[0]} type I Red (IR) probes', return_fig=True, silent=silent, full_range=True) )\n else:\n print(f'Found {subset.shape[0]} type I Red (IR) probes.')\n methylcheck.beta_density_plot(subset, plot_title=f'{subset.shape[0]} type I Red (IR) probes', silent=silent, full_range=True)\n if probe_type in ('IG', 'all'):\n subset = beta_df[(beta_df['probe_type'] == 'I') & (beta_df['Color_Channel'] == 'Grn')]\n subset = subset.drop('probe_type', axis='columns')\n subset = subset.drop('Color_Channel', axis='columns')\n if return_fig:\n figs.append( methylcheck.beta_density_plot(subset, plot_title=f'{subset.shape[0]} type I Green (IG) probes', return_fig=True, silent=silent, full_range=True) )\n else:\n print(f'Found {subset.shape[0]} type I Green (IG) probes.')\n methylcheck.beta_density_plot(subset, plot_title=f'{subset.shape[0]} type I Green (IG) probes', silent=silent, full_range=True)\n if str(array_type) != 'mouse':\n if return_fig:\n return figs\n return\n\n ############ MOUSE ONLY ################\n # TODO: control probe types #\n # 'probe_type' are I, II, IR, IG and probe_type (mouse only) are 'cg','ch','uk'. | 'rs' are in controls\n # mouse_probe_types are 'ch','cg','rs','uk'\n mapper = pd.DataFrame(data=manifest.data_frame.index.str[:2], index=manifest.data_frame.index)\n mapper = mapper.rename(columns={'IlmnID':'mouse_probe_type'})\n beta_df = beta_df.merge(mapper, right_index=True, left_index=True)\n\n if probe_type in mouse_probe_types:\n subset = beta_df[beta_df['mouse_probe_type'] == probe_type]\n subset = subset.drop(columns=['probe_type','Color_Channel','mouse_probe_type'])\n if return_fig:\n figs.append( methylcheck.beta_density_plot(subset, plot_title=f'{subset.shape[0]} {probe_type} probes', return_fig=True, silent=silent, full_range=True) )\n else:\n methylcheck.beta_density_plot(subset, plot_title=f'{subset.shape[0]} {probe_type} probes', silent=silent, full_range=True)\n if probe_type == 'all':\n for mouse_probe_type in mouse_probe_types:\n subset = beta_df[beta_df['mouse_probe_type'] == mouse_probe_type]\n subset = subset.drop(columns=['probe_type','Color_Channel','mouse_probe_type'])\n if subset.shape[0] == 0:\n if not silent:\n LOGGER.warning(\"No {mouse_probe_type} probes found\")\n if return_fig:\n figs.append( methylcheck.beta_density_plot(subset, plot_title=f'{subset.shape[0]} {mouse_probe_type} probes', return_fig=True, silent=silent, full_range=True) )\n else:\n methylcheck.beta_density_plot(subset, plot_title=f'{subset.shape[0]} {mouse_probe_type} probes', silent=silent, full_range=True)\n\n if return_fig:\n return figs\n plt.show()\n plt.close('all')\n\n\ndef plot_controls(path=None, subset='all', return_fig=False):\n \"\"\"internal array QC controls (available with the `--save_control` or `--all` methylprep process option)\n\n\ninput:\n======\n path\n can either be a path to the file, or a path to the folder containing a file called 'control_probes.pkl',\n or it can be the dictionary of control dataframes in `control_probes.pkl`.\n\noptions:\n========\n subset ('staining' | 'negative' | 'hybridization' | 'extension' | 'bisulfite' |\n 'non-polymorphic' | 'target-removal' | 'specificity' | 'all'):\n 'all' will plot every control function (default)\n\n return_fig (False)\n if True, returns a list of matplotlib.pyplot figure objects INSTEAD of showing then. Used in QC ReportPDF.\n\n if there are more than 30 samples, plots will not have sample names on x-axis.\n \"\"\"\n subset_options = {'staining', 'negative', 'hybridization', 'extension', 'bisulfite', 'non-polymorphic', 'target-removal', 'specificity', 'all'}\n if subset not in subset_options:\n raise ValueError(f\"Choose one of these options for plot type: {subset_options}\")\n if not path:\n print(\"You must specify a path to the control probes processed data file or folder (available with the `--save_control` methylprep process option).\")\n return\n try:\n # detect a dict of dataframes (control_probes.pkl) object\n if type(path) is dict and all([type(df) is type(pd.DataFrame()) for df in path.values()]):\n control = path\n path = None\n else:\n path = Path(path)\n if path.is_dir():\n control = pd.read_pickle(Path(path, 'control_probes.pkl'))\n elif path.is_file():\n control = pd.read_pickle(path) # allows for any arbitrary filename to be used, so long as structure is same, and it is a pickle.\n except Exception as e: # cannot unpack NoneType\n print(e)\n print(\"No data.\")\n return\n\n mouse = True if list(control.values())[0].shape[0] == 473 else False # vs 694 controls for epic.\n plotx = 'show' if len(list(control.keys())) <= 30 else None\n # Create empty dataframes for red and green negative controls\n control_R = pd.DataFrame(list(control.values())[0][['Control_Type','Color','Extended_Type']])\n control_G = pd.DataFrame(list(control.values())[0][['Control_Type','Color','Extended_Type']])\n # convert the list of DFs into one DF for each red and green channel\n for sample,c in control.items():\n # drop SNPS from control DF using Control_Type column.\n c = c[c['Control_Type'].notna() == True]\n df_red = c[['Extended_Type','Mean_Value_Red']].rename(columns={'Mean_Value_Red':sample})\n df_green = c[['Extended_Type','Mean_Value_Green']].rename(columns={'Mean_Value_Green':sample})\n control_R = pd.merge(left=control_R,right=df_red,on=['Extended_Type'])\n control_G = pd.merge(left=control_G,right=df_green,on=['Extended_Type'])\n\n figs = []\n if subset in ('staining','all'):\n stain_red = control_R[control_R['Control_Type']=='STAINING'].copy().drop(columns=['Control_Type']).reset_index(drop=True)\n stain_green = control_G[control_G['Control_Type']=='STAINING'].copy().drop(columns=['Control_Type']).reset_index(drop=True)\n color_dict = dict(zip(stain_green.Extended_Type, stain_green.Color))\n color_dict.update({k: (v if v != '-99' else 'gold') for k,v in color_dict.items()})\n stain_green = stain_green.drop(columns=['Color']).set_index('Extended_Type')\n stain_red = stain_red.drop(columns=['Color']).set_index('Extended_Type')\n stain_red = stain_red.T\n stain_green = stain_green.T\n if stain_red.shape[1] == 0 or stain_green.shape[1] == 0:\n LOGGER.info(\"No staining probes found\")\n else:\n fig = _qc_plotter(stain_red, stain_green, color_dict, xticks=plotx, ymax=60000, title='Staining', return_fig=return_fig)\n if fig:\n figs.append(fig)\n\n if subset in ('negative','all'):\n if mouse:\n # mouse manifest defines control probes in TWO columns, just to be annoying.\n neg_red = control_R[(control_R['Control_Type'] == 'NEGATIVE') & (control_R['Extended_Type'].str.startswith('neg_'))].copy().drop(columns=['Control_Type']).reset_index(drop=True)\n neg_green = control_G[(control_G['Control_Type'] == 'NEGATIVE') & (control_G['Extended_Type'].str.startswith('neg_'))].copy().drop(columns=['Control_Type']).reset_index(drop=True)\n neg_mouse_probe_names = list(neg_red.Extended_Type.values)\n else:\n neg_red = control_R[control_R['Control_Type']=='NEGATIVE'].copy().drop(columns=['Control_Type']).reset_index(drop=True)\n neg_green = control_G[control_G['Control_Type']=='NEGATIVE'].copy().drop(columns=['Control_Type']).reset_index(drop=True)\n color_dict = dict(zip(neg_green.Extended_Type, neg_green.Color))\n color_dict.update({k: (v if v != '-99' else 'Black') for k,v in color_dict.items()})\n neg_green = neg_green.drop(columns=['Color']).set_index('Extended_Type')\n neg_red = neg_red.drop(columns=['Color']).set_index('Extended_Type')\n neg_red = neg_red.T\n neg_green = neg_green.T\n # note: GenomeStudio appears to only do the first 16 negative control probes\n # Maybe user should be able to select which they want to see\n # There is a total of 600, which is too many to plot at once\n list_of_negative_controls_to_plot = ['Negative 1','Negative 2','Negative 3','Negative 4','Negative 5',\n 'Negative 6','Negative 7','Negative 8','Negative 9','Negative 10',\n 'Negative 11','Negative 12','Negative 13','Negative 14','Negative 15',\n 'Negative 16']\n # UPDATE: picking a smattering of probes that are in both EPIC and EPIC+\n list_of_negative_controls_to_plot = ['Negative 1','Negative 142','Negative 3','Negative 4','Negative 5',\n 'Negative 6','Negative 7','Negative 8','Negative 119','Negative 10',\n 'Negative 484','Negative 12','Negative 13','Negative 144','Negative 151',\n 'Negative 166']\n probes_to_plot = list_of_negative_controls_to_plot\n if mouse:\n probes_to_plot = neg_mouse_probe_names[:36] # plot the first 36\n dynamic_controls = [c for c in probes_to_plot if c in neg_red.columns and c in neg_green.columns]\n dynamic_ymax = max([max(neg_red[dynamic_controls].max(axis=0)), max(neg_green[dynamic_controls].max(axis=0))])\n dynamic_ymax = dynamic_ymax + int(0.1*dynamic_ymax)\n fig = _qc_plotter(neg_red, neg_green, color_dict, columns=probes_to_plot, ymax=dynamic_ymax, xticks=plotx, title='Negative', return_fig=return_fig)\n if fig:\n figs.append(fig)\n\n if subset in ('hybridization','all'):\n hyb_red = control_R[control_R['Control_Type']=='HYBRIDIZATION'].copy().drop(columns=['Control_Type']).reset_index(drop=True)\n hyb_green = control_G[control_G['Control_Type']=='HYBRIDIZATION'].copy().drop(columns=['Control_Type']).reset_index(drop=True)\n color_dict = dict(zip(hyb_green.Extended_Type, hyb_green.Color))\n hyb_green = hyb_green.drop(columns=['Color']).set_index('Extended_Type')\n hyb_red = hyb_red.drop(columns=['Color']).set_index('Extended_Type')\n hyb_red = hyb_red.T\n hyb_green = hyb_green.T\n fig = _qc_plotter(hyb_red, hyb_green, color_dict, ymax=35000, xticks=plotx, title='Hybridization', return_fig=return_fig)\n if fig:\n figs.append(fig)\n\n if subset in ('extension','all'):\n ext_red = control_R[control_R['Control_Type']=='EXTENSION'].copy().drop(columns=['Control_Type']).reset_index(drop=True)\n ext_green = control_G[control_G['Control_Type']=='EXTENSION'].copy().drop(columns=['Control_Type']).reset_index(drop=True)\n color_dict = dict(zip(ext_green.Extended_Type, ext_green.Color))\n ext_green = ext_green.drop(columns=['Color']).set_index('Extended_Type')\n ext_red = ext_red.drop(columns=['Color']).set_index('Extended_Type')\n ext_red = ext_red.T\n ext_green = ext_green.T\n if ext_red.shape[1] == 0 or ext_green.shape[1] == 0:\n LOGGER.info(\"No extension probes found\")\n else:\n fig = _qc_plotter(ext_red, ext_green, color_dict, ymax=50000, xticks=plotx, title='Extension', return_fig=return_fig)\n if fig:\n figs.append(fig)\n\n if subset in ('bisulfite','all'):\n bci_red = control_R[control_R['Control_Type'].isin(['BISULFITE CONVERSION I','BISULFITE CONVERSION II'])].copy().drop(columns=['Control_Type']).reset_index(drop=True)\n bci_green = control_G[control_G['Control_Type'].isin(['BISULFITE CONVERSION I','BISULFITE CONVERSION II'])].copy().drop(columns=['Control_Type']).reset_index(drop=True)\n color_dict = dict(zip(bci_green.Extended_Type, bci_green.Color))\n color_dict.update({k: (v if v != 'Both' else 'seagreen') for k,v in color_dict.items()}) # mouse has Both; others don't\n bci_green = bci_green.drop(columns=['Color']).set_index('Extended_Type')\n bci_red = bci_red.drop(columns=['Color']).set_index('Extended_Type')\n bci_red = bci_red.T\n bci_green = bci_green.T\n fig = _qc_plotter(bci_red, bci_green, color_dict, ymax=30000, xticks=plotx, title='Bisulfite Conversion', return_fig=return_fig)\n if fig:\n figs.append(fig)\n\n if subset in ('non-polymorphic','all'):\n np_red = control_R[control_R['Control_Type']=='NON-POLYMORPHIC'].copy().drop(columns=['Control_Type']).reset_index(drop=True)\n np_green = control_G[control_G['Control_Type']=='NON-POLYMORPHIC'].copy().drop(columns=['Control_Type']).reset_index(drop=True)\n color_dict = dict(zip(np_green.Extended_Type, np_green.Color))\n color_dict.update({k: (v if v != '-99' else 'Black') for k,v in color_dict.items()})\n np_green = np_green.drop(columns=['Color']).set_index('Extended_Type')\n np_red = np_red.drop(columns=['Color']).set_index('Extended_Type')\n np_red = np_red.T\n np_green = np_green.T\n if np_red.shape[1] == 0 or np_green.shape[1] == 0:\n LOGGER.info(\"No non-polymorphic probes found\")\n else:\n fig = _qc_plotter(np_red, np_green, color_dict, ymax=30000, xticks=plotx, title='Non-polymorphic', return_fig=return_fig)\n if fig:\n figs.append(fig)\n\n if subset in ('target-removal','all'):\n tar_red = control_R[control_R['Control_Type']=='TARGET REMOVAL'].copy().drop(columns=['Control_Type']).reset_index(drop=True)\n tar_green = control_G[control_G['Control_Type']=='TARGET REMOVAL'].copy().drop(columns=['Control_Type']).reset_index(drop=True)\n color_dict = dict(zip(tar_green.Extended_Type, tar_green.Color))\n tar_green = tar_green.drop(columns=['Color']).set_index('Extended_Type')\n tar_red = tar_red.drop(columns=['Color']).set_index('Extended_Type')\n tar_red = tar_red.T\n tar_green = tar_green.T\n if tar_red.shape[1] == 0 or tar_green.shape[1] == 0:\n LOGGER.info(\"No target-removal probes found\")\n else:\n fig = _qc_plotter(tar_red, tar_green, color_dict, ymax=2000, xticks=plotx, title='Target Removal', return_fig=return_fig)\n if fig:\n figs.append(fig)\n\n if subset in ('specificity','all'):\n spec_red = control_R[control_R['Control_Type'].isin(['SPECIFICITY I','SPECIFICITY II'])].copy().drop(columns=['Control_Type']).reset_index(drop=True)\n spec_green = control_G[control_G['Control_Type'].isin(['SPECIFICITY I','SPECIFICITY II'])].copy().drop(columns=['Control_Type']).reset_index(drop=True)\n color_dict = dict(zip(spec_green.Extended_Type, spec_green.Color))\n spec_green = spec_green.drop(columns=['Color']).set_index('Extended_Type')\n spec_red = spec_red.drop(columns=['Color']).set_index('Extended_Type')\n spec_red = spec_red.T\n spec_green = spec_green.T\n fig = _qc_plotter(spec_red, spec_green, color_dict, ymax=30000, xticks=plotx, title='Specificity (Type I)', return_fig=return_fig)\n if fig:\n figs.append(fig)\n\n if return_fig and figs != []:\n return figs\n plt.show()\n plt.close('all')\n\n\ndef _qc_plotter(stain_red, stain_green, color_dict=None, columns=None, ymax=None, xticks='show',\n title='', return_fig=False):\n \"\"\" draft generic plotting function for all the control intensity QC plots.\n used by plot_staining_controls()\n\noptions:\n========\n required: stain_red and stain_green\n contains: red/green values in columns and probe characteristics in rows (transposed from control_probes.pkl format).\n color_dict\n {value: color-code} dictionary passed in to define which color to make each value in the index.\n ymax\n if defined, constrains the plot y-max values. Used to standardize view of each probe type within normal ranges.\n any probe values that fall outside this range generate warnings.\n columns\n list of columns(probes) in stain_red and stain_green to plot (if ommitted it plots everything).\n return_fig (False)\n if True, returns the figure object instead of showing plot\n\ntodo:\n=====\n add a batch option that splits large datasets into multiple charts, so labels are readable on x-axis.\n currently: if N>30, it suppresses the X-axis sample labels, which would be unreadable\n \"\"\"\n fig, (ax1,ax2) = plt.subplots(nrows=1,ncols=2,figsize=(10,8)) # was (12,10)\n plt.tight_layout(w_pad=15)\n plt.setp(ax1.xaxis.get_majorticklabels(), rotation=90, fontsize='small')\n plt.setp(ax2.xaxis.get_majorticklabels(), rotation=90, fontsize='small')\n ax1.grid(axis='both', linestyle='dotted')\n ax2.grid(axis='both', linestyle='dotted')\n title = title + ' ' if title != '' else title\n ax1.set_title(f'{title}Green')\n ax2.set_title(f'{title}Red')\n if color_dict is None:\n color_dict = {}\n\n # DEBUG: control probes contain '-99 in the Color column. Breaks plot.' But resolved by plot_controls() now.\n if '-99' in color_dict.values():\n missing_colors = {k:v for k,v in color_dict.items() if v == '-99'}\n LOGGER.warning(f\"{title} has invalid colors: {missing_colors}\")\n color_dict.update({k:'Black' for k,v in missing_colors.items()})\n\n if columns != None:\n # TODO: ensure all columns in list are in stain_red/green first.\n # failed with Barnes idats_part3 missing some probes\n if (set(columns) - set(stain_red.columns) != set() or\n set(columns) - set(stain_green.columns) != set()):\n cols_removed = [c for c in columns if c not in stain_red or c not in stain_green]\n columns = [c for c in columns if c in stain_red and c in stain_green]\n LOGGER.warning(f'These probes were expected but missing from the {title}data: ({\", \".join(cols_removed)})')\n stain_red = stain_red.loc[:, columns]\n stain_green = stain_green.loc[:, columns]\n for c in stain_red.columns:\n if ymax is not None and (stain_red[c] > ymax).any():\n LOGGER.warning(f'Some Red {c} values exceed chart maximum and are not shown.')\n if ymax is not None and (stain_green[c] > ymax).any():\n LOGGER.warning(f'Some Green {c} values exceed chart maximum and are not shown.')\n ax1.plot(stain_green.index,\n c,\n data=stain_green, label=c,\n color=color_dict[c], linewidth=0, marker='o')\n\n ax2.plot(stain_red.index,\n c,\n data=stain_red, label=c,\n color=color_dict[c], linewidth=0, marker='o')\n\n ax1.legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize='medium')\n ax2.legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize='medium')\n\n if ymax != None:\n ax1.set_ylim([0,ymax])\n ax2.set_ylim([0,ymax])\n if xticks != 'show':\n #plt.xticks([]) # hide\n ax1.get_xaxis().set_visible(False)\n ax2.get_xaxis().set_visible(False)\n if return_fig:\n return fig\n plt.show()\n plt.close('all')\n\n\ndef bis_conversion_control(path_or_df, use_median=False, on_lambda=False, verbose=False):\n \"\"\" GCT score: requires path to noob_meth or raw meth_values.pkl; or you can pass in a meth dataframe.\n use_median: not supported yet. Always uses mean of probe values \"\"\"\n found_meth = False\n try:\n if isinstance(path_or_df, pd.DataFrame):\n meth = path_or_df\n found_meth = True\n else:\n path = Path(path_or_df)\n if path.is_dir() and Path(path, 'meth_values.pkl').is_file():\n meth = pd.read_pickle(Path(path, 'meth_values.pkl'))\n found_meth = True\n if path.is_dir() and Path(path, 'noob_meth_values.pkl').is_file() and not found_meth:\n meth = pd.read_pickle(Path(path, 'noob_meth_values.pkl'))\n found_meth = True\n except Exception as e: # cannot unpack NoneType\n print(e)\n print(\"No data.\")\n return {}\n if not found_meth:\n raise FileNotFoundError(\"this requires methylated intensities in a pickle file.\")\n # using the number of probes in meth df to determine array\n array_type, man_filepath = methylcheck.detect_array(meth, returns='filepath', on_lambda=on_lambda)\n try:\n from methylprep import Manifest, ArrayType\n except ImportError:\n raise ImportError(\"this function requires methylprep\")\n if Path.exists(man_filepath):\n LOGGER.setLevel(logging.WARNING)\n manifest = Manifest(ArrayType(array_type), man_filepath, on_lambda=on_lambda)\n LOGGER.setLevel(logging.INFO)\n else:\n # initialize and force download with filepath=None\n LOGGER.setLevel(logging.WARNING)\n manifest = Manifest(ArrayType(array_type), filepath_or_buffer=None, on_lambda=on_lambda)\n LOGGER.setLevel(logging.INFO)\n\n # want meth channel data; 89203 probes\n oobG_mask = set(manifest.data_frame[(manifest.data_frame['Infinium_Design_Type'] == 'I') & (manifest.data_frame['Color_Channel'] == 'Red')].index)\n if str(array_type) == 'epic+':\n array_type = 'epic' #file match below\n # 'epic' should suffice for this test, except that probe names won't match\n oobG_mask = set([probe.split('_')[0] for probe in oobG_mask]) # these probe names have extra crap on end\n meth = meth.rename(index=lambda x: x.split('_')[0])\n\n try:\n from importlib import resources # py3.7+\n except ImportError:\n import pkg_resources\n pkg_namespace = 'methylcheck.data_files'\n try:\n with resources.path(pkg_namespace, f'{array_type}_extC.csv') as probe_filepath:\n ext_C_probes = pd.read_csv(probe_filepath)\n ext_C_probes = ext_C_probes['x'].values # simple, flat list of probe cgXXX names\n with resources.path(pkg_namespace, f'{array_type}_extT.csv') as probe_filepath:\n ext_T_probes = pd.read_csv(probe_filepath)\n ext_T_probes = ext_T_probes['x'].values\n except:\n probe_filepath = pkg_resources.resource_filename(pkg_namespace, f'{array_type}_extC.csv')\n ext_C_probes = pd.read_csv(probe_filepath)\n ext_C_probes = ext_C_probes['x'].values # simple, flat list of probe cgXXX names\n probe_filepath = pkg_resources.resource_filename(pkg_namespace, f'{array_type}_extT.csv')\n ext_T_probes = pd.read_csv(probe_filepath)\n ext_T_probes = ext_T_probes['x'].values\n ext_C = set(ext_C_probes).intersection(oobG_mask)\n ext_T = set(ext_T_probes).intersection(oobG_mask)\n # GCT: mean (C) / mean (T), after removing NaNs\n # TEST bis_conversion_control('/Volumes/LEGX/GSE69852/idats_2021_04_12')\n table = {} # keys are sentrix_ids; values are GCT scores\n for sample in meth.columns:\n C_mask = meth[sample].index.isin(ext_C)\n C_mean = meth[sample].loc[C_mask].mean() # excludes NAN by default\n T_mask = meth[sample].index.isin(ext_T)\n T_mean = meth[sample].loc[T_mask].mean()\n if verbose:\n LOGGER.info(f\"{sample}: ({int(round(C_mean))} / {int(round(T_mean))}) = GCT {round(100*C_mean/T_mean, 1)}\")\n table[sample] = round(100*C_mean/T_mean, 1)\n return table\n" ]
[ [ "matplotlib.pyplot.legend", "pandas.merge", "matplotlib.pyplot.tight_layout", "pandas.read_csv", "pandas.concat", "matplotlib.pyplot.title", "numpy.linspace", "pandas.read_pickle", "matplotlib.pyplot.ylim", "matplotlib.pyplot.subplots", "pandas.DataFrame", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlim", "matplotlib.pyplot.grid", "matplotlib.pyplot.close", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
co-develop-drv/FGVC
[ "9820d3c1a33ba402009ecb1d25e897cbcddc74d5" ]
[ "edgeconnecttest/models.py" ]
[ "import os\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom .networks import InpaintGenerator, EdgeGenerator, Discriminator\nfrom .loss import AdversarialLoss, PerceptualLoss, StyleLoss, TotalVariationalLoss\n\n\nclass BaseModel(nn.Module):\n def __init__(self, name, config):\n super(BaseModel, self).__init__()\n\n self.name = name\n self.config = config\n self.iteration = 0\n\n self.gen_weights_path = os.path.join(config.PATH, name + '_gen.pth')\n self.dis_weights_path = os.path.join(config.PATH, name + '_dis.pth')\n\n def load(self):\n if os.path.exists(self.gen_weights_path):\n print('Loading %s generator...' % self.name)\n\n if torch.cuda.is_available():\n data = torch.load(self.gen_weights_path)\n else:\n data = torch.load(self.gen_weights_path, map_location=lambda storage, loc: storage)\n\n self.generator.load_state_dict(data['generator'])\n self.iteration = data['iteration']\n\n # load discriminator only when training\n if self.config.MODE == 1 and os.path.exists(self.dis_weights_path):\n print('Loading %s discriminator...' % self.name)\n\n if torch.cuda.is_available():\n data = torch.load(self.dis_weights_path)\n else:\n data = torch.load(self.dis_weights_path, map_location=lambda storage, loc: storage)\n\n self.discriminator.load_state_dict(data['discriminator'])\n\n def save(self):\n print('\\nsaving %s...\\n' % self.name)\n torch.save({\n 'iteration': self.iteration,\n 'generator': self.generator.state_dict()\n }, self.gen_weights_path)\n\n torch.save({\n 'discriminator': self.discriminator.state_dict()\n }, self.dis_weights_path)\n\n\nclass EdgeModel(BaseModel):\n def __init__(self, config):\n super(EdgeModel, self).__init__('EdgeModel', config)\n\n # generator input: [grayscale(1) + edge(1) + mask(1)]\n # discriminator input: (grayscale(1) + edge(1))\n generator = EdgeGenerator(use_spectral_norm=True)\n discriminator = Discriminator(in_channels=2, use_sigmoid=config.GAN_LOSS != 'hinge')\n if len(config.GPU) > 1:\n generator = nn.DataParallel(generator, config.GPU)\n discriminator = nn.DataParallel(discriminator, config.GPU)\n l1_loss = nn.L1Loss()\n adversarial_loss = AdversarialLoss(type=config.GAN_LOSS)\n\n self.add_module('generator', generator)\n self.add_module('discriminator', discriminator)\n\n self.add_module('l1_loss', l1_loss)\n self.add_module('adversarial_loss', adversarial_loss)\n\n self.gen_optimizer = optim.Adam(\n params=generator.parameters(),\n lr=float(config.LR),\n betas=(config.BETA1, config.BETA2)\n )\n\n self.dis_optimizer = optim.Adam(\n params=discriminator.parameters(),\n lr=float(config.LR) * float(config.D2G_LR),\n betas=(config.BETA1, config.BETA2)\n )\n\n def process(self, images, edges, masks):\n self.iteration += 1\n\n\n # zero optimizers\n self.gen_optimizer.zero_grad()\n self.dis_optimizer.zero_grad()\n\n\n # process outputs\n outputs = self(images, edges, masks)\n gen_loss = 0\n dis_loss = 0\n\n\n # discriminator loss\n dis_input_real = torch.cat((images, edges), dim=1)\n dis_input_fake = torch.cat((images, outputs.detach()), dim=1)\n dis_real, dis_real_feat = self.discriminator(dis_input_real) # in: (grayscale(1) + edge(1))\n dis_fake, dis_fake_feat = self.discriminator(dis_input_fake) # in: (grayscale(1) + edge(1))\n dis_real_loss = self.adversarial_loss(dis_real, True, True)\n dis_fake_loss = self.adversarial_loss(dis_fake, False, True)\n dis_loss += (dis_real_loss + dis_fake_loss) / 2\n\n\n # generator adversarial loss\n gen_input_fake = torch.cat((images, outputs), dim=1)\n gen_fake, gen_fake_feat = self.discriminator(gen_input_fake) # in: (grayscale(1) + edge(1))\n gen_gan_loss = self.adversarial_loss(gen_fake, True, False)\n gen_loss += gen_gan_loss\n\n\n # generator feature matching loss\n gen_fm_loss = 0\n for i in range(len(dis_real_feat)):\n gen_fm_loss += self.l1_loss(gen_fake_feat[i], dis_real_feat[i].detach())\n gen_fm_loss = gen_fm_loss * self.config.FM_LOSS_WEIGHT\n gen_loss += gen_fm_loss\n\n\n # create logs\n logs = [\n (\"l_d1\", dis_loss.item()),\n (\"l_g1\", gen_gan_loss.item()),\n (\"l_fm\", gen_fm_loss.item()),\n ]\n\n return outputs, gen_loss, dis_loss, logs\n\n def forward(self, images, edges, masks):\n edges_masked = (edges * (1 - masks))\n images_masked = (images * (1 - masks)) + masks\n inputs = torch.cat((images_masked, edges_masked, masks), dim=1)\n outputs = self.generator(inputs) # in: [grayscale(1) + edge(1) + mask(1)]\n return outputs\n\n def backward(self, gen_loss=None, dis_loss=None):\n if dis_loss is not None:\n dis_loss.backward()\n self.dis_optimizer.step()\n\n if gen_loss is not None:\n gen_loss.backward()\n self.gen_optimizer.step()\n\n\nclass InpaintingModel(BaseModel):\n def __init__(self, config):\n super(InpaintingModel, self).__init__('InpaintingModel', config)\n\n # generator input: [rgb(3) + edge(1)]\n # discriminator input: [rgb(3)]\n generator = InpaintGenerator(config)\n self.config = config\n if config.FLO == 1:\n in_channels = 2\n elif config.FLO == 0:\n in_channels = 3\n else:\n assert(0)\n discriminator = Discriminator(in_channels=in_channels, use_sigmoid=config.GAN_LOSS != 'hinge')\n if len(config.GPU) > 1:\n generator = nn.DataParallel(generator, config.GPU)\n discriminator = nn.DataParallel(discriminator , config.GPU)\n\n l1_loss = nn.L1Loss()\n tv_loss = TotalVariationalLoss()\n perceptual_loss = PerceptualLoss()\n style_loss = StyleLoss()\n adversarial_loss = AdversarialLoss(type=config.GAN_LOSS)\n\n self.add_module('generator', generator)\n self.add_module('discriminator', discriminator)\n\n self.add_module('l1_loss', l1_loss)\n self.add_module('tv_loss', tv_loss)\n self.add_module('perceptual_loss', perceptual_loss)\n self.add_module('style_loss', style_loss)\n self.add_module('adversarial_loss', adversarial_loss)\n\n self.gen_optimizer = optim.Adam(\n params=generator.parameters(),\n lr=float(config.LR),\n betas=(config.BETA1, config.BETA2)\n )\n\n self.dis_optimizer = optim.Adam(\n params=discriminator.parameters(),\n lr=float(config.LR) * float(config.D2G_LR),\n betas=(config.BETA1, config.BETA2)\n )\n\n def process(self, images, images_filled, edges, masks):\n self.iteration += 1\n\n # zero optimizers\n self.gen_optimizer.zero_grad()\n self.dis_optimizer.zero_grad()\n\n # process outputs\n outputs = self(images, images_filled, edges, masks)\n\n gen_loss = 0\n dis_loss = 0\n gen_gan_loss = 0\n\n if self.config.GAN == 1:\n # discriminator loss\n dis_input_real = images\n dis_input_fake = outputs.detach()\n dis_real, _ = self.discriminator(dis_input_real) # in: [rgb(3)]\n dis_fake, _ = self.discriminator(dis_input_fake) # in: [rgb(3)]\n dis_real_loss = self.adversarial_loss(dis_real, True, True)\n dis_fake_loss = self.adversarial_loss(dis_fake, False, True)\n dis_loss += (dis_real_loss + dis_fake_loss) / 2\n\n\n # generator adversarial loss\n gen_input_fake = outputs\n gen_fake, _ = self.discriminator(gen_input_fake) # in: [rgb(3)]\n gen_gan_loss = self.adversarial_loss(gen_fake, True, False) * self.config.INPAINT_ADV_LOSS_WEIGHT\n gen_loss += gen_gan_loss\n\n\n # generator l1 loss\n gen_l1_loss = self.l1_loss(outputs, images) * self.config.L1_LOSS_WEIGHT / torch.mean(masks)\n gen_loss += gen_l1_loss\n\n if self.config.ENFORCE == 1:\n gen_l1_masked_loss = self.l1_loss(outputs * masks, images * masks) * 10 * self.config.L1_LOSS_WEIGHT\n gen_loss += gen_l1_masked_loss\n elif self.config.ENFORCE != 0:\n assert(0)\n\n if self.config.TV == 1:\n # generator tv loss\n gen_tv_loss = self.tv_loss(outputs) * self.config.TV_LOSS_WEIGHT\n gen_loss += gen_tv_loss\n\n if self.config.FLO != 1:\n # generator perceptual loss\n gen_content_loss = self.perceptual_loss(outputs, images)\n gen_content_loss = gen_content_loss * self.config.CONTENT_LOSS_WEIGHT\n gen_loss += gen_content_loss\n\n # generator style loss\n gen_style_loss = self.style_loss(outputs * masks, images * masks)\n gen_style_loss = gen_style_loss * self.config.STYLE_LOSS_WEIGHT\n gen_loss += gen_style_loss\n\n # create logs\n logs = [\n (\"l_d2\", dis_loss.item()),\n (\"l_g2\", gen_gan_loss.item()),\n (\"l_l1\", gen_l1_loss.item()),\n (\"l_per\", gen_content_loss.item()),\n (\"l_sty\", gen_style_loss.item()),\n ]\n else:\n logs = []\n logs.append((\"l_l1\", gen_l1_loss.item()))\n logs.append((\"l_gen\", gen_loss.item()))\n\n if self.config.GAN == 1:\n logs.append((\"l_d2\", dis_loss.item()))\n logs.append((\"l_g2\", gen_gan_loss.item()))\n\n if self.config.TV == 1:\n logs.append((\"l_tv\", gen_tv_loss.item()))\n\n if self.config.ENFORCE == 1:\n logs.append((\"l_masked_l1\", gen_l1_masked_loss.item()))\n\n return outputs, gen_loss, dis_loss, logs\n\n def forward(self, images, images_filled, edges, masks):\n\n if self.config.FILL == 1:\n images_masked = images_filled\n elif self.config.FILL == 0:\n images_masked = (images * (1 - masks).float()) # + masks\n else:\n assert(0)\n\n if self.config.PASSMASK == 1:\n inputs = torch.cat((images_masked, edges, masks), dim=1)\n elif self.config.PASSMASK == 0:\n inputs = torch.cat((images_masked, edges), dim=1)\n else:\n assert(0)\n\n outputs = self.generator(inputs)\n # if self.config.RESIDUAL == 1:\n # assert(self.config.PASSMASK == 1)\n # outputs = self.generator(inputs) + images_filled\n # elif self.config.RESIDUAL == 0:\n # outputs = self.generator(inputs)\n # else:\n # assert(0)\n\n return outputs\n\n def backward(self, gen_loss=None, dis_loss=None):\n\n if self.config.GAN == 1:\n dis_loss.backward()\n self.dis_optimizer.step()\n\n gen_loss.backward()\n self.gen_optimizer.step()\n" ]
[ [ "torch.mean", "torch.cat", "torch.load", "torch.cuda.is_available", "torch.nn.DataParallel", "torch.nn.L1Loss" ] ]
AuckeBos/MLiPPaA
[ "4b6c563f93e1eb7fc90f66a9a6ada16c07664d71" ]
[ "Element3/read_and_run.py" ]
[ "import argparse\nimport csv\n\nimport numpy as np\nimport pandas as pd\nfrom tensorflow.keras.models import load_model\n\nimport Element2.Evaluator\nfrom Element2.BaseClassification import BaseClassifier\n\n# As computed by the training data distribution (RebalanceTrainVal=False)\nmulti_train_prior = np.array([.5, .125, .125, .125, .125])\nbinary_train_prior = np.array([.5, .5])\n\nmulti_test_prior = np.array([.04, .02, .19, .51, .24])\nbinary_test_prior = np.array([.96, .04])\n\n\ndef read():\n \"\"\"\n Read command line arguments for the script:\n - --data-file: The data file with the data to test. If not provided, use ExamData.csv in /data\n - --classification-type: Classify binary or multiclass\n - --model: Which type of model to use: The BinaryClassifier, MultiClassifier, or RecurrentClassifier\n - --h5: The h5 file of the pretrained model, should match with --model\n - --bayes: Apply bayes on the predictions\n @return:\n \"\"\"\n parser = argparse.ArgumentParser(\n description='Load a model, test them on a test dataset; save predictions to csv',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n parser.add_argument('-d', '--data-file', type=str, required=False, default='../Element2/data/ExamData2.csv', help='The datafile containing the test data')\n parser.add_argument('-t', '--classification-type', type=str, required=True, choices=['binary', 'multi'], help='Classification type: multi label or binary')\n parser.add_argument('-m', '--model', type=str, required=True, choices=['binary', 'multi', 'recurrent'], help='Classification model: BinaryClassifier, MultiClassifier, or RecurrentClassifier')\n parser.add_argument('-h5', '--h5', type=str, required=True, help='The h5 file of the saved model')\n parser.add_argument('-b', '--bayes', type=str, required=True, choices=['True', 'False'], help='Apply bayes to the prediction outputs?')\n args = parser.parse_args()\n return args.data_file, args.classification_type, args.model, args.h5, bool(args.bayes)\n\n\ndef run(data_file: str, classification_type: str, model_type: str, h5: str, apply_bayes: bool):\n \"\"\"\n After commandline args have been read, run the model:\n - Load the classifier\n - Load the data\n - Predict the data\n - Generate csv in the desired format (predictions.csv)\n @param data_file: The file that contains the testset\n @param classification_type: The type of classification: binary or multi\n @param model_type: The classifier type: binary, multi, recurrent\n @param h5: The h5 file of the trained model\n @param apply_bayes: Bool that indicates whether to apply bayes on the predictions\n \"\"\"\n classifier = Element2.Evaluator.Evaluator.parse_classifier_type(model_type)\n classifier.apply_bayes = apply_bayes\n if model_type == 'binary':\n classifier.train_prior = binary_train_prior\n classifier.test_prior = binary_test_prior\n else: # Multi or recurrent\n classifier.train_prior = multi_train_prior\n classifier.test_prior = multi_test_prior\n net = load_model(h5, custom_objects={'f1': BaseClassifier.f1, 'loss': classifier.loss()})\n\n # Use manual label mapping for multi classifier:\n predictions_to_labels = ['4top', 'ttbar', 'ttbarHiggs', 'ttbarW', 'ttbarZ']\n # Define the number of objects per row. Needed because we need to have the exact same input shape as during training, otherwise\n # The network won't be able to predict. Note that this does not decrease performance, since the network will mask them out\n objects_per_row = 19\n ids = pd.read_csv(data_file, delimiter=';', usecols=[0], names=['EventID'])['EventID'].tolist()\n x, _ = classifier.load_data(data_file, False, objects_per_row)\n predictions = classifier.predict(net, x)\n with open('predictions.csv', 'w') as file:\n writer = csv.writer(file, delimiter=',')\n for (prediction, id) in zip(predictions, ids):\n # Prefix with labels\n if classification_type == 'binary': # prediction[0] must be the probability of 4-top\n prediction = [f'4top={prediction[0]}']\n else: # multi: prediction is array of probs\n prediction = [f'{label}={value}' for (label, value) in zip(predictions_to_labels, prediction)]\n writer.writerow([int(id)] + prediction)\n\n\nif __name__ == '__main__':\n run(*read())\n" ]
[ [ "numpy.array", "pandas.read_csv" ] ]
sdpython/csharpyml
[ "f814af89c5b988924a7f31fe71ec6eb515292070" ]
[ "_unittests/ut_notebook/test_dynamic_cs.py" ]
[ "\"\"\"\n@brief test log(time=2s)\n\"\"\"\nimport sys\nimport os\nimport unittest\nfrom sklearn import datasets\nimport pandas\nfrom pyquickhelper.pycode import ExtTestCase, get_temp_folder\n\ntry:\n import src\nexcept ImportError:\n path = os.path.normpath(\n os.path.abspath(\n os.path.join(\n os.path.split(__file__)[0],\n \"..\",\n \"..\")))\n if path not in sys.path:\n sys.path.append(path)\n import src\n\nfrom src.csharpyml.notebook.csmlmagics import CsMLMagics\n\n\nclass TestDynamicCS(ExtTestCase):\n \"\"\"Test dynamic compilation.\"\"\"\n\n _script = \"\"\"\n public class IrisObservation\n {\n [Column(\"0\")]\n [ColumnName(\"Label\")]\n public string Label;\n\n [Column(\"1\")]\n public float Sepal_length;\n\n [Column(\"2\")]\n public float Sepal_width;\n\n [Column(\"3\")]\n public float Petal_length;\n\n [Column(\"4\")]\n public float Petal_width;\n }\n\n public class IrisPrediction\n {\n public uint PredictedLabel;\n\n [VectorType(4)]\n public float[] Score;\n }\n\n public class TrainTestIris\n {\n string _dataset;\n PredictionFunction<IrisObservation, IrisPrediction> _fct;\n\n public TrainTestIris(string iris)\n {\n _dataset = iris;\n }\n\n public void Train()\n {\n using (var env = new ConsoleEnvironment(verbose:false))\n {\n var args = new TextLoader.Arguments()\n {\n Separator = \",\",\n HasHeader = true,\n Column = new TextLoader.Column[] {\n TextLoader.Column.Parse(\"Label:U4[0-2]:0\"),\n new TextLoader.Column(\"Sepal_length\", DataKind.R4, 1),\n new TextLoader.Column(\"Sepal_width\", DataKind.R4, 2),\n new TextLoader.Column(\"Petal_length\", DataKind.R4, 3),\n new TextLoader.Column(\"Petal_width\", DataKind.R4, 4),\n }\n };\n\n var reader = new TextLoader(env, args);\n var concat = new ColumnConcatenatingEstimator(env,\n \"Features\", \"Sepal_length\",\n \"Sepal_width\", \"Petal_length\", \"Petal_width\");\n var km = new MulticlassLogisticRegression(env, \"Label\", \"Features\");\n var pipeline = concat.Append(km);\n\n IDataView trainingDataView = reader.Read(new MultiFileSource(_dataset));\n var model = pipeline.Fit(trainingDataView);\n\n var obs = new IrisObservation()\n {\n Sepal_length = 3.3f,\n Sepal_width = 1.6f,\n Petal_length = 0.2f,\n Petal_width = 5.1f,\n };\n\n _fct = model.MakePredictionFunction<IrisObservation, IrisPrediction>(env);\n }\n }\n\n public IrisPrediction Predict(double sl, double sw, double pl, double pw)\n {\n var obs = new IrisObservation()\n {\n Sepal_length = (float)sl,\n Sepal_width = (float)sw,\n Petal_length = (float)pl,\n Petal_width = (float)pw,\n };\n return _fct.Predict(obs);\n }\n }\n\n public static TrainTestIris ReturnMLClass(string ds)\n {\n return new TrainTestIris(ds);\n }\n \"\"\"\n\n def test_src(self):\n \"skip pylint\"\n self.assertFalse(src is None)\n\n def test_magic_cs(self):\n cm = CsMLMagics()\n fct = cm.mlnet(\"ReturnMLClass\", TestDynamicCS._script)\n if fct is None:\n raise Exception(TestDynamicCS._script)\n\n temp = get_temp_folder(__file__, \"temp_nb_mlnet\")\n iris = datasets.load_iris()\n X = iris.data\n y = iris.target\n features = ['Slength', 'Swidth', 'Plength', 'Pwidth']\n df = pandas.DataFrame(X, columns=features)\n df[\"Label\"] = y\n df = df[[\"Label\"] + ['Slength', 'Swidth', 'Plength', 'Pwidth']]\n dest = os.path.join(temp, \"iris_data_id.txt\")\n df.to_csv(dest, sep=',', index=False)\n\n cl = fct(dest)\n cl.Train()\n res = cl.Predict(3.4, 5.4, 3.2, 5.6)\n label = res.PredictedLabel\n score = list(res.Score)\n self.assertEqual(label, 3)\n self.assertEqual(len(score), 3)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "sklearn.datasets.load_iris", "pandas.DataFrame" ] ]
yashpatel5400/ARia
[ "1f9ad25f943f5b8859a80470715be8698863b2f8" ]
[ "detect_board.py" ]
[ "import numpy as np\nimport cv2\n\ndef rectify(h):\n if h.shape[0] * h.shape[1] != 8:\n return None\n\n h = h.reshape((4,2))\n hnew = np.zeros((4,2))\n\n add = h.sum(1)\n hnew[0] = h[np.argmin(add)]\n hnew[2] = h[np.argmax(add)]\n\n diff = np.diff(h,axis=1)\n hnew[1] = h[np.argmin(diff)]\n hnew[3] = h[np.argmax(diff)]\n\n return hnew\n\n\ndef get_corners(frame):\n\n imcopy = frame.copy()\n \n # Convert BGR to HSV\n hsv = cv2.cvtColor(imcopy, cv2.COLOR_BGR2HSV)\n # define range of orange color in HSV\n lower_orange = np.array([0,100,100])\n upper_orange = np.array([50,255,255])\n # Threshold the HSV image to get only orange colors\n mask = cv2.inRange(imcopy, lower_orange, upper_orange)\n imcopy = cv2.bitwise_and(imcopy,imcopy, mask=mask)\n\n # Get thresh into the correct cv2 readable format\n ret,thresh = cv2.threshold(imcopy, 0, 1, cv2.THRESH_BINARY)\n thresh = cv2.cvtColor(thresh, cv2.COLOR_RGB2GRAY)\n # Find all the contours in the image\n _, contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n # Get the convex hull of all those contours\n convex_hulls = np.array(contours[:])\n # Find the area of all those convex hulls so we can take the largest\n contour_areas = [cv2.contourArea(c) for c in convex_hulls]\n # Get the indices of the 4 largest contours. \n largest_contour_idxes = np.array(contour_areas).argsort()[-4:][::-1]\n # Get the 4 largest convex hulls\n largest_convex_hulls = [convex_hulls[i] for i in largest_contour_idxes]\n # TODO: Ensure the convex hulls are a minimum area\n \n moments = [cv2.moments(c) for c in largest_convex_hulls]\n centers = [(int(m['m10']/m['m00']), int(m['m01']/m['m00'])) for m in moments if m['m00'] != 0]\n\n centers = np.array(centers)\n if centers.shape == (0,):\n return None\n\n centers = rectify(centers)\n return centers\n\ndef get_C_key(frame,corners):\n\n imcopy = frame.copy()\n\n # Convert BGR to HSV\n hsv = cv2.cvtColor(imcopy, cv2.COLOR_BGR2HSV)\n # define range of blue color in HSV\n lower_blue = np.array([150,0,0])\n upper_blue = np.array([255,100,100])\n # Threshold the HSV image to get only blue colors\n mask = cv2.inRange(imcopy, lower_blue, upper_blue)\n imcopy = cv2.bitwise_and(imcopy,imcopy, mask=mask)\n\n # Get thresh into the correct cv2 readable format\n ret,thresh = cv2.threshold(imcopy, 0, 1, cv2.THRESH_BINARY)\n thresh = cv2.cvtColor(thresh, cv2.COLOR_RGB2GRAY)\n # Find all the contours in the image\n _, contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n # Get the convex hull of all those contours\n convex_hulls = np.array(contours)\n # Find the area of all those convex hulls so we can take the largest\n contour_areas = [cv2.contourArea(c) for c in convex_hulls]\n # Get the indices of the largest contours. \n largest_contour_idxes = np.array(contour_areas).argsort()[-1:][::-1]\n # Get the largest convex hull\n largest_convex_hulls = [convex_hulls[i] for i in largest_contour_idxes]\n # TODO: Ensure the convex hull are a minimum area\n\n # approximate the contour with a quadrangle\n if len(largest_convex_hulls) == 0:\n return None\n\n peri = cv2.arcLength(largest_convex_hulls[0],True)\n approx = cv2.approxPolyDP(largest_convex_hulls[0],0.02*peri,True)\n approx = rectify(approx)\n\n if approx is None:\n return None\n\n # get midpoints of corners\n left_mdpt = [(corners[0,0]+corners[3,0])/2,(corners[0,1]+corners[3,1])/2]\n right_mdpt = [(corners[1,0]+corners[2,0])/2,(corners[1,1]+corners[2,1])/2]\n top_mdpt = [(corners[0,0]+corners[1,0])/2,(corners[0,1]+corners[1,1])/2]\n bot_mdpt = [(corners[2,0]+corners[3,0])/2,(corners[2,1]+corners[3,1])/2]\n # get bounding coordinates\n board_left_x = left_mdpt[0]\n board_right_x = right_mdpt[0]\n board_top_y = top_mdpt[1]\n board_bot_y = bot_mdpt[1]\n\n # get top line of box which will be bottom of black key\n top = (approx[0,1]+approx[1,1])/2\n \n # get width of box, which will be width of a white key\n # black keys will be 2/3 as wide as a white key\n left_mdpt = [(approx[0,0]+approx[3,0])/2,(approx[0,1]+approx[3,1])/2]\n right_mdpt = [(approx[1,0]+approx[2,0])/2,(approx[1,1]+approx[2,1])/2]\n left_x = left_mdpt[0]\n right_x = right_mdpt[0]\n width = right_x - left_x\n\n # get corners of key\n ckey = [[left_x,board_top_y],[right_x,board_top_y],[right_x,board_bot_y],[left_x,board_bot_y]]\n\n return(ckey,width,top,[board_left_x,board_right_x])\n\ndef remainder_black_keys(remainder,higher):\n if higher:\n if remainder == 1:\n return 1\n elif remainder == 2:\n return 2\n elif remainder == 3:\n return 2\n elif remainder == 4:\n return 3\n elif remainder == 5:\n return 4\n elif remainder == 6:\n return 5\n else:\n return 0\n else:\n if remainder == 1:\n return 0\n elif remainder == 2:\n return 1\n elif remainder == 3:\n return 2\n elif remainder == 4:\n return 3\n elif remainder == 5:\n return 3\n elif remainder == 6:\n return 4\n else:\n return 0\n\ndef get_all_keys(frame,corners):\n\n # get the C key\n C_key_output = get_C_key(frame,corners)\n if C_key_output is None:\n return {}\n\n ckey = C_key_output[0]\n key_width = C_key_output[1]\n black_bot = C_key_output[2]\n board_bounds = C_key_output[3]\n\n # extrapolate positions of other keys\n num_higher_white_keys = np.around((board_bounds[1] - ckey[1][0])/key_width,decimals=0)\n higher_remainder = num_higher_white_keys % 7\n higher_remainder = remainder_black_keys(higher_remainder,True)\n num_higher_black_keys = (num_higher_white_keys//7)*5\n keys = [(ckey[0][0],ckey)]\n\n # white keys\n repeats = np.arange(num_higher_white_keys)\n higher_keys = [ (ckey[0][0]+shift*key_width,[[ckey[0][0]+shift*key_width,ckey[0][1]],[ckey[1][0]+shift*key_width,ckey[1][1]],[ckey[2][0]+shift*key_width,ckey[2][1]],[ckey[3][0]+shift*key_width,ckey[3][1]]]) for shift in repeats ]\n\n # black keys\n black_keys = []\n key = [[ckey[0][0]+2*key_width/3,ckey[0][1]],[ckey[1][0]+key_width/3,ckey[1][1]],[ckey[2][0]+2*key_width/3,black_bot],[ckey[3][0]+key_width/3,black_bot]]\n black_keys.append((key[0][0],key))\n last_key = black_keys[-1][1]\n key = [[last_key[0][0]+key_width,last_key[0][1]],[last_key[1][0]+key_width,last_key[1][1]],[last_key[2][0]+key_width,last_key[2][1]],[last_key[3][0]+key_width,last_key[3][1]]]\n black_keys.append((key[0][0],key))\n last_key = black_keys[-1][1]\n key = [[last_key[0][0]+2*key_width,last_key[0][1]],[last_key[1][0]+2*key_width,last_key[1][1]],[last_key[2][0]+2*key_width,last_key[2][1]],[last_key[3][0]+2*key_width,last_key[3][1]]]\n black_keys.append((key[0][0],key))\n last_key = black_keys[-1][1]\n key = [[last_key[0][0]+key_width,last_key[0][1]],[last_key[1][0]+key_width,last_key[1][1]],[last_key[2][0]+key_width,last_key[2][1]],[last_key[3][0]+key_width,last_key[3][1]]]\n black_keys.append((key[0][0],key))\n last_key = black_keys[-1][1]\n key = [[last_key[0][0]+key_width,last_key[0][1]],[last_key[1][0]+key_width,last_key[1][1]],[last_key[2][0]+key_width,last_key[2][1]],[last_key[3][0]+key_width,last_key[3][1]]]\n black_keys.append((key[0][0],key))\n for i in range(int(num_higher_black_keys/5-1)):\n last_key = black_keys[-1][1]\n key = [[last_key[0][0]+2*key_width,last_key[0][1]],[last_key[1][0]+2*key_width,last_key[1][1]],[last_key[2][0]+2*key_width,last_key[2][1]],[last_key[3][0]+2*key_width,last_key[3][1]]]\n black_keys.append((key[0][0],key))\n last_key = black_keys[-1][1]\n key = [[last_key[0][0]+key_width,last_key[0][1]],[last_key[1][0]+key_width,last_key[1][1]],[last_key[2][0]+key_width,last_key[2][1]],[last_key[3][0]+key_width,last_key[3][1]]]\n black_keys.append((key[0][0],key))\n last_key = black_keys[-1][1]\n key = [[last_key[0][0]+2*key_width,last_key[0][1]],[last_key[1][0]+2*key_width,last_key[1][1]],[last_key[2][0]+2*key_width,last_key[2][1]],[last_key[3][0]+2*key_width,last_key[3][1]]]\n black_keys.append((key[0][0],key))\n last_key = black_keys[-1][1]\n key = [[last_key[0][0]+key_width,last_key[0][1]],[last_key[1][0]+key_width,last_key[1][1]],[last_key[2][0]+key_width,last_key[2][1]],[last_key[3][0]+key_width,last_key[3][1]]]\n black_keys.append((key[0][0],key))\n last_key = black_keys[-1][1]\n key = [[last_key[0][0]+key_width,last_key[0][1]],[last_key[1][0]+key_width,last_key[1][1]],[last_key[2][0]+key_width,last_key[2][1]],[last_key[3][0]+key_width,last_key[3][1]]]\n black_keys.append((key[0][0],key))\n count = 0\n for i in range(1):\n if count >= higher_remainder:\n break\n last_key = black_keys[-1][1]\n key = [[last_key[0][0]+2*key_width,last_key[0][1]],[last_key[1][0]+2*key_width,last_key[1][1]],[last_key[2][0]+2*key_width,last_key[2][1]],[last_key[3][0]+2*key_width,last_key[3][1]]]\n black_keys.append((key[0][0],key))\n count = count + 1\n if count >= higher_remainder:\n break\n last_key = black_keys[-1][1]\n key = [[last_key[0][0]+key_width,last_key[0][1]],[last_key[1][0]+key_width,last_key[1][1]],[last_key[2][0]+key_width,last_key[2][1]],[last_key[3][0]+key_width,last_key[3][1]]]\n black_keys.append((key[0][0],key))\n count = count + 1\n if count >= higher_remainder:\n break\n last_key = black_keys[-1][1]\n key = [[last_key[0][0]+2*key_width,last_key[0][1]],[last_key[1][0]+2*key_width,last_key[1][1]],[last_key[2][0]+2*key_width,last_key[2][1]],[last_key[3][0]+2*key_width,last_key[3][1]]]\n black_keys.append((key[0][0],key))\n count = count + 1\n if count >= higher_remainder:\n break\n last_key = black_keys[-1][1]\n key = [[last_key[0][0]+key_width,last_key[0][1]],[last_key[1][0]+key_width,last_key[1][1]],[last_key[2][0]+key_width,last_key[2][1]],[last_key[3][0]+key_width,last_key[3][1]]]\n black_keys.append((key[0][0],key))\n count = count + 1\n if count >= higher_remainder:\n break\n last_key = black_keys[-1][1]\n key = [[last_key[0][0]+key_width,last_key[0][1]],[last_key[1][0]+key_width,last_key[1][1]],[last_key[2][0]+key_width,last_key[2][1]],[last_key[3][0]+key_width,last_key[3][1]]]\n black_keys.append((key[0][0],key))\n\n # sort by left x coordinate\n for black_key in black_keys:\n higher_keys.append(black_key)\n higher_keys.sort()\n key_boxes = [x for y,x in higher_keys]\n notes = range(len(key_boxes))\n key_dict = dict(zip(notes,key_boxes))\n\n return key_dict\n\ndef get_board(frame):\n\n corners = get_corners(frame)\n if corners is None:\n return {}\n\n key_dict = get_all_keys(frame,corners)\n return key_dict\n" ]
[ [ "numpy.arange", "numpy.around", "numpy.argmax", "numpy.diff", "numpy.argmin", "numpy.array", "numpy.zeros" ] ]
sappelhoff/sp_psychopy
[ "79cae80eb920b35fb27a52acfde0eda38b9124b1" ]
[ "sp_experiment/tests/test_utils.py" ]
[ "\"\"\"Testing the utility functions.\"\"\"\nimport time\nimport os\nimport os.path as op\nfrom tempfile import gettempdir\nfrom shutil import rmtree, copyfile\nfrom collections import OrderedDict\n\nimport pytest\nimport numpy as np\nimport pandas as pd\n\nimport sp_experiment\nfrom sp_experiment.define_settings import (EXPECTED_FPS,\n KEYLIST_SAMPLES\n )\nfrom sp_experiment.utils import (Fake_serial,\n My_serial,\n calc_bonus_payoff,\n get_final_choice_outcomes,\n get_payoff_dict_from_df,\n get_passive_action,\n get_passive_outcome,\n get_jittered_waitframes,\n log_data,\n _get_payoff_setting,\n )\nfrom sp_experiment.define_payoff_settings import (get_payoff_settings,\n get_payoff_dict\n )\ninit_dir = op.dirname(sp_experiment.__file__)\ndata_dir = op.join(init_dir, 'experiment_data')\ntest_data_dir = op.join(init_dir, 'tests', 'data')\n\nno_errors_file = op.join(test_data_dir, '2_trials_no_errors.tsv')\n\n\ndef test_serials():\n \"\"\"Test the Fake_serial class.\"\"\"\n some_byte = bytes([1])\n ser = Fake_serial()\n assert ser.write(some_byte) == some_byte\n\n # Also covers \"mysleep\"\n waitsecs = 1\n ser = My_serial(ser, waitsecs)\n start = time.perf_counter()\n ser.write(some_byte)\n stop = time.perf_counter()\n assert (stop - start) >= waitsecs\n\n\ndef test_calc_bonus_payoff():\n \"\"\"Test bonus calculation.\"\"\"\n # Check for non-present data\n bonus = calc_bonus_payoff(998)\n assert isinstance(bonus, list)\n assert len(bonus) == 4\n assert bonus[0] == 'did not yet complete task \"A\".'\n\n bonus = calc_bonus_payoff(999)\n assert bonus[1] == 'did not yet complete task \"B\".'\n\n # present data ... temporarily copy over a test file\n tmp_fpath1 = op.join(data_dir, 'sub-998_task-spactive_events.tsv')\n tmp_fpath2 = op.join(data_dir, 'sub-998_task-sppassive_events.tsv')\n copyfile(no_errors_file, tmp_fpath1)\n copyfile(no_errors_file, tmp_fpath2)\n\n bonus = calc_bonus_payoff(998, exchange_rate=0.1)\n\n # remove tmp files\n os.remove(tmp_fpath1)\n os.remove(tmp_fpath2)\n assert bonus[-1] == '4 Euros'\n\n\ndef test_get_final_choice_outcomes():\n \"\"\"Test getting final choice outcomes.\"\"\"\n df = pd.read_csv(no_errors_file, sep='\\t')\n outcomes = get_final_choice_outcomes(df)\n expected_outcomes = [5, 9] # as can be read in the data file\n np.testing.assert_array_equal(outcomes, expected_outcomes)\n\n\ndef test_get_payoff_dict_from_df():\n \"\"\"Test getting payoff_dicts.\"\"\"\n df = pd.read_csv(no_errors_file, sep='\\t')\n\n # The trial argument is 0-indexed\n payoff_dict = get_payoff_dict_from_df(df, 0)\n assert isinstance(payoff_dict, OrderedDict)\n\n # Make a more thorough test with the second payoff distribution\n payoff_dict = get_payoff_dict_from_df(df, 1)\n read_set = set(payoff_dict[0])\n expected_set = set((3, 9))\n assert len(read_set) == len(expected_set)\n assert sorted(read_set) == sorted(expected_set)\n\n read_set = set(payoff_dict[1])\n expected_set = set((7, 8))\n assert len(read_set) == len(expected_set)\n assert sorted(read_set) == sorted(expected_set)\n\n # There were only 2 trials, this should be out of index\n with pytest.raises(IndexError):\n get_payoff_dict_from_df(df, 2)\n\n\ndef test_get_passive_action():\n \"\"\"Test getting an action for replay in passive condition.\"\"\"\n df = pd.read_csv(no_errors_file, sep='\\t')\n\n keys_rts = get_passive_action(df, 0, 0)\n\n # keys_rts should be a list of tuples\n assert isinstance(keys_rts, list)\n assert len(keys_rts) == 1\n assert isinstance(keys_rts[0], tuple)\n\n # did we read the correct numbers\n assert keys_rts[0][0] == KEYLIST_SAMPLES[0]\n np.testing.assert_allclose(keys_rts[0][1], 0.227, rtol=0.01)\n\n\ndef test_get_passive_outcome():\n \"\"\"Test getting an outcome for replay in passive condition.\"\"\"\n df = pd.read_csv(no_errors_file, sep='\\t')\n\n # If we pass the \"last sample\", we get the final choice outcome\n outcome = get_passive_outcome(df, 0, -1)\n outcomes = get_final_choice_outcomes(df)\n assert outcome == outcomes[0]\n\n # Other samples give us reasonable results\n expected_outcomes = [3, 3, 3, 5, 5, 5, 4, 5, 3, 3, 3, 3]\n for sample, expected in zip(range(12), expected_outcomes):\n out = get_passive_outcome(df, 0, sample)\n assert out == expected\n\n\ndef test_get_jittered_waitframes():\n \"\"\"Test the waitframes func.\"\"\"\n n = 100\n for _ in range(n):\n wait_frames = get_jittered_waitframes(1000, 2000)\n assert wait_frames >= EXPECTED_FPS and wait_frames <= EXPECTED_FPS*2\n\n\ndef test_log_data():\n \"\"\"Sanity check the data logging.\"\"\"\n df = pd.read_csv(no_errors_file, sep='\\t')\n\n # Check that action_types are as expected\n action_types = df['action_type'].dropna().unique().tolist()\n np.testing.assert_array_equal(action_types,\n ['sample', 'forced_stop', 'final_choice'])\n\n # Create a temporary logging file\n myhash = str(hash(os.times()))\n data_dir = op.join(gettempdir(), myhash)\n os.makedirs(data_dir)\n fname = 'tmp_data_file.tsv'\n fpath = op.join(data_dir, fname)\n\n # Log some data\n log_data(fpath)\n\n with open(fpath, 'r') as fin:\n for i, line in enumerate(fin.readlines()):\n # spot check some known data in the line\n assert line.strip().split('\\t')[-2] == '0'\n\n # There should have been only one line\n assert i == 0\n\n # Log more data\n log_data(fpath, action=5)\n log_data(fpath, action=2)\n log_data(fpath, action=3)\n log_data(fpath, action=7)\n\n df = pd.read_csv(fpath, sep='\\t', header=None)\n\n action_types = df[3].tolist()\n action_vals = df[4].tolist()\n assert len(action_types) == 5 and len(action_vals) == 5\n assert np.isnan(action_types[0]) and np.isnan(action_vals[0])\n assert action_types[1] == 'forced_stop' and action_vals[1] == 0\n assert action_types[2] == 'stop' and action_vals[2] == 2\n assert action_types[3] == 'final_choice' and action_vals[3] == 0\n assert action_types[4] == 'premature_stop' and action_vals[4] == 2\n\n # And even more data logging\n payoff_settings = get_payoff_settings(0.1)\n setting = payoff_settings[0, :]\n payoff_dict = get_payoff_dict(setting)\n log_data(fpath, payoff_dict=payoff_dict)\n\n # Remove the temporary dir and all its contents\n rmtree(data_dir, ignore_errors=True)\n\n\[email protected]('trial, expected_setting', (\n pytest.param(0, np.array((3, 98, 1, 0, 5, 4, 0.8, 0.2))), # noqa: E501\n pytest.param(1, np.array((3, 9, 0.22, 0.78, 8, 7, 0.67, 0.33))), # noqa: E501\n ))\ndef test_get_payoff_setting_aux(trial, expected_setting):\n \"\"\"Test private func for getting payoff sets from df.\"\"\"\n # Test experienced\n df = pd.read_csv(no_errors_file, sep='\\t')\n setting = _get_payoff_setting(df, trial, experienced=True)\n np.testing.assert_array_equal(setting.squeeze(), expected_setting)\n" ]
[ [ "pandas.read_csv", "numpy.isnan", "numpy.testing.assert_array_equal", "numpy.testing.assert_allclose", "numpy.array" ] ]
samedii/latent-diffusion
[ "f13bf9bf463d95b5a16aeadd2b02abde31f769f8" ]
[ "ldm/data/imagenet.py" ]
[ "import os, yaml, pickle, shutil, tarfile, glob\nimport cv2\nimport albumentations\nimport PIL\nimport numpy as np\nimport torchvision.transforms.functional as TF\nfrom omegaconf import OmegaConf\nfrom functools import partial\nfrom PIL import Image\nfrom tqdm import tqdm\nfrom torch.utils.data import Dataset, Subset\n\nimport taming.data.utils as tdu\nfrom taming.data.imagenet import str_to_indices, give_synsets_from_indices, download, retrieve\nfrom taming.data.imagenet import ImagePaths\n\nfrom ldm.modules.image_degradation import degradation_fn_bsr, degradation_fn_bsr_light\n\n\ndef synset2idx(path_to_yaml=\"data/index_synset.yaml\"):\n with open(path_to_yaml) as f:\n di2s = yaml.load(f)\n return dict((v,k) for k,v in di2s.items())\n\n\nclass ImageNetBase(Dataset):\n def __init__(self, config=None):\n self.config = config or OmegaConf.create()\n if not type(self.config)==dict:\n self.config = OmegaConf.to_container(self.config)\n self.keep_orig_class_label = self.config.get(\"keep_orig_class_label\", False)\n self.process_images = True # if False we skip loading & processing images and self.data contains filepaths\n self._prepare()\n self._prepare_synset_to_human()\n self._prepare_idx_to_synset()\n self._prepare_human_to_integer_label()\n self._load()\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, i):\n return self.data[i]\n\n def _prepare(self):\n raise NotImplementedError()\n\n def _filter_relpaths(self, relpaths):\n ignore = set([\n \"n06596364_9591.JPEG\",\n ])\n relpaths = [rpath for rpath in relpaths if not rpath.split(\"/\")[-1] in ignore]\n if \"sub_indices\" in self.config:\n indices = str_to_indices(self.config[\"sub_indices\"])\n synsets = give_synsets_from_indices(indices, path_to_yaml=self.idx2syn) # returns a list of strings\n self.synset2idx = synset2idx(path_to_yaml=self.idx2syn)\n files = []\n for rpath in relpaths:\n syn = rpath.split(\"/\")[0]\n if syn in synsets:\n files.append(rpath)\n return files\n else:\n return relpaths\n\n def _prepare_synset_to_human(self):\n SIZE = 2655750\n URL = \"https://heibox.uni-heidelberg.de/f/9f28e956cd304264bb82/?dl=1\"\n self.human_dict = os.path.join(self.root, \"synset_human.txt\")\n if (not os.path.exists(self.human_dict) or\n not os.path.getsize(self.human_dict)==SIZE):\n download(URL, self.human_dict)\n\n def _prepare_idx_to_synset(self):\n URL = \"https://heibox.uni-heidelberg.de/f/d835d5b6ceda4d3aa910/?dl=1\"\n self.idx2syn = os.path.join(self.root, \"index_synset.yaml\")\n if (not os.path.exists(self.idx2syn)):\n download(URL, self.idx2syn)\n\n def _prepare_human_to_integer_label(self):\n URL = \"https://heibox.uni-heidelberg.de/f/2362b797d5be43b883f6/?dl=1\"\n self.human2integer = os.path.join(self.root, \"imagenet1000_clsidx_to_labels.txt\")\n if (not os.path.exists(self.human2integer)):\n download(URL, self.human2integer)\n with open(self.human2integer, \"r\") as f:\n lines = f.read().splitlines()\n assert len(lines) == 1000\n self.human2integer_dict = dict()\n for line in lines:\n value, key = line.split(\":\")\n self.human2integer_dict[key] = int(value)\n\n def _load(self):\n with open(self.txt_filelist, \"r\") as f:\n self.relpaths = f.read().splitlines()\n l1 = len(self.relpaths)\n self.relpaths = self._filter_relpaths(self.relpaths)\n print(\"Removed {} files from filelist during filtering.\".format(l1 - len(self.relpaths)))\n\n self.synsets = [p.split(\"/\")[0] for p in self.relpaths]\n self.abspaths = [os.path.join(self.datadir, p) for p in self.relpaths]\n\n unique_synsets = np.unique(self.synsets)\n class_dict = dict((synset, i) for i, synset in enumerate(unique_synsets))\n if not self.keep_orig_class_label:\n self.class_labels = [class_dict[s] for s in self.synsets]\n else:\n self.class_labels = [self.synset2idx[s] for s in self.synsets]\n\n with open(self.human_dict, \"r\") as f:\n human_dict = f.read().splitlines()\n human_dict = dict(line.split(maxsplit=1) for line in human_dict)\n\n self.human_labels = [human_dict[s] for s in self.synsets]\n\n labels = {\n \"relpath\": np.array(self.relpaths),\n \"synsets\": np.array(self.synsets),\n \"class_label\": np.array(self.class_labels),\n \"human_label\": np.array(self.human_labels),\n }\n\n if self.process_images:\n self.size = retrieve(self.config, \"size\", default=256)\n self.data = ImagePaths(self.abspaths,\n labels=labels,\n size=self.size,\n random_crop=self.random_crop,\n )\n else:\n self.data = self.abspaths\n\n\nclass ImageNetTrain(ImageNetBase):\n NAME = \"ILSVRC2012_train\"\n URL = \"http://www.image-net.org/challenges/LSVRC/2012/\"\n AT_HASH = \"a306397ccf9c2ead27155983c254227c0fd938e2\"\n FILES = [\n \"ILSVRC2012_img_train.tar\",\n ]\n SIZES = [\n 147897477120,\n ]\n\n def __init__(self, process_images=True, data_root=None, **kwargs):\n self.process_images = process_images\n self.data_root = data_root\n super().__init__(**kwargs)\n\n def _prepare(self):\n if self.data_root:\n self.root = os.path.join(self.data_root, self.NAME)\n else:\n cachedir = os.environ.get(\"XDG_CACHE_HOME\", os.path.expanduser(\"~/.cache\"))\n self.root = os.path.join(cachedir, \"autoencoders/data\", self.NAME)\n\n self.datadir = os.path.join(self.root, \"data\")\n self.txt_filelist = os.path.join(self.root, \"filelist.txt\")\n self.expected_length = 1281167\n self.random_crop = retrieve(self.config, \"ImageNetTrain/random_crop\",\n default=True)\n if not tdu.is_prepared(self.root):\n # prep\n print(\"Preparing dataset {} in {}\".format(self.NAME, self.root))\n\n datadir = self.datadir\n if not os.path.exists(datadir):\n path = os.path.join(self.root, self.FILES[0])\n if not os.path.exists(path) or not os.path.getsize(path)==self.SIZES[0]:\n import academictorrents as at\n atpath = at.get(self.AT_HASH, datastore=self.root)\n assert atpath == path\n\n print(\"Extracting {} to {}\".format(path, datadir))\n os.makedirs(datadir, exist_ok=True)\n with tarfile.open(path, \"r:\") as tar:\n tar.extractall(path=datadir)\n\n print(\"Extracting sub-tars.\")\n subpaths = sorted(glob.glob(os.path.join(datadir, \"*.tar\")))\n for subpath in tqdm(subpaths):\n subdir = subpath[:-len(\".tar\")]\n os.makedirs(subdir, exist_ok=True)\n with tarfile.open(subpath, \"r:\") as tar:\n tar.extractall(path=subdir)\n\n filelist = glob.glob(os.path.join(datadir, \"**\", \"*.JPEG\"))\n filelist = [os.path.relpath(p, start=datadir) for p in filelist]\n filelist = sorted(filelist)\n filelist = \"\\n\".join(filelist)+\"\\n\"\n with open(self.txt_filelist, \"w\") as f:\n f.write(filelist)\n\n tdu.mark_prepared(self.root)\n\n\nclass ImageNetValidation(ImageNetBase):\n NAME = \"ILSVRC2012_validation\"\n URL = \"http://www.image-net.org/challenges/LSVRC/2012/\"\n AT_HASH = \"5d6d0df7ed81efd49ca99ea4737e0ae5e3a5f2e5\"\n VS_URL = \"https://heibox.uni-heidelberg.de/f/3e0f6e9c624e45f2bd73/?dl=1\"\n FILES = [\n \"ILSVRC2012_img_val.tar\",\n \"validation_synset.txt\",\n ]\n SIZES = [\n 6744924160,\n 1950000,\n ]\n\n def __init__(self, process_images=True, data_root=None, **kwargs):\n self.data_root = data_root\n self.process_images = process_images\n super().__init__(**kwargs)\n\n def _prepare(self):\n if self.data_root:\n self.root = os.path.join(self.data_root, self.NAME)\n else:\n cachedir = os.environ.get(\"XDG_CACHE_HOME\", os.path.expanduser(\"~/.cache\"))\n self.root = os.path.join(cachedir, \"autoencoders/data\", self.NAME)\n self.datadir = os.path.join(self.root, \"data\")\n self.txt_filelist = os.path.join(self.root, \"filelist.txt\")\n self.expected_length = 50000\n self.random_crop = retrieve(self.config, \"ImageNetValidation/random_crop\",\n default=False)\n if not tdu.is_prepared(self.root):\n # prep\n print(\"Preparing dataset {} in {}\".format(self.NAME, self.root))\n\n datadir = self.datadir\n if not os.path.exists(datadir):\n path = os.path.join(self.root, self.FILES[0])\n if not os.path.exists(path) or not os.path.getsize(path)==self.SIZES[0]:\n import academictorrents as at\n atpath = at.get(self.AT_HASH, datastore=self.root)\n assert atpath == path\n\n print(\"Extracting {} to {}\".format(path, datadir))\n os.makedirs(datadir, exist_ok=True)\n with tarfile.open(path, \"r:\") as tar:\n tar.extractall(path=datadir)\n\n vspath = os.path.join(self.root, self.FILES[1])\n if not os.path.exists(vspath) or not os.path.getsize(vspath)==self.SIZES[1]:\n download(self.VS_URL, vspath)\n\n with open(vspath, \"r\") as f:\n synset_dict = f.read().splitlines()\n synset_dict = dict(line.split() for line in synset_dict)\n\n print(\"Reorganizing into synset folders\")\n synsets = np.unique(list(synset_dict.values()))\n for s in synsets:\n os.makedirs(os.path.join(datadir, s), exist_ok=True)\n for k, v in synset_dict.items():\n src = os.path.join(datadir, k)\n dst = os.path.join(datadir, v)\n shutil.move(src, dst)\n\n filelist = glob.glob(os.path.join(datadir, \"**\", \"*.JPEG\"))\n filelist = [os.path.relpath(p, start=datadir) for p in filelist]\n filelist = sorted(filelist)\n filelist = \"\\n\".join(filelist)+\"\\n\"\n with open(self.txt_filelist, \"w\") as f:\n f.write(filelist)\n\n tdu.mark_prepared(self.root)\n\n\n\nclass ImageNetSR(Dataset):\n def __init__(self, size=None,\n degradation=None, downscale_f=4, min_crop_f=0.5, max_crop_f=1.,\n random_crop=True):\n \"\"\"\n Imagenet Superresolution Dataloader\n Performs following ops in order:\n 1. crops a crop of size s from image either as random or center crop\n 2. resizes crop to size with cv2.area_interpolation\n 3. degrades resized crop with degradation_fn\n\n :param size: resizing to size after cropping\n :param degradation: degradation_fn, e.g. cv_bicubic or bsrgan_light\n :param downscale_f: Low Resolution Downsample factor\n :param min_crop_f: determines crop size s,\n where s = c * min_img_side_len with c sampled from interval (min_crop_f, max_crop_f)\n :param max_crop_f: \"\"\n :param data_root:\n :param random_crop:\n \"\"\"\n self.base = self.get_base()\n assert size\n assert (size / downscale_f).is_integer()\n self.size = size\n self.LR_size = int(size / downscale_f)\n self.min_crop_f = min_crop_f\n self.max_crop_f = max_crop_f\n assert(max_crop_f <= 1.)\n self.center_crop = not random_crop\n\n self.image_rescaler = albumentations.SmallestMaxSize(max_size=size, interpolation=cv2.INTER_AREA)\n\n self.pil_interpolation = False # gets reset later if incase interp_op is from pillow\n\n if degradation == \"bsrgan\":\n self.degradation_process = partial(degradation_fn_bsr, sf=downscale_f)\n\n elif degradation == \"bsrgan_light\":\n self.degradation_process = partial(degradation_fn_bsr_light, sf=downscale_f)\n\n else:\n interpolation_fn = {\n \"cv_nearest\": cv2.INTER_NEAREST,\n \"cv_bilinear\": cv2.INTER_LINEAR,\n \"cv_bicubic\": cv2.INTER_CUBIC,\n \"cv_area\": cv2.INTER_AREA,\n \"cv_lanczos\": cv2.INTER_LANCZOS4,\n \"pil_nearest\": PIL.Image.NEAREST,\n \"pil_bilinear\": PIL.Image.BILINEAR,\n \"pil_bicubic\": PIL.Image.BICUBIC,\n \"pil_box\": PIL.Image.BOX,\n \"pil_hamming\": PIL.Image.HAMMING,\n \"pil_lanczos\": PIL.Image.LANCZOS,\n }[degradation]\n\n self.pil_interpolation = degradation.startswith(\"pil_\")\n\n if self.pil_interpolation:\n self.degradation_process = partial(TF.resize, size=self.LR_size, interpolation=interpolation_fn)\n\n else:\n self.degradation_process = albumentations.SmallestMaxSize(max_size=self.LR_size,\n interpolation=interpolation_fn)\n\n def __len__(self):\n return len(self.base)\n\n def __getitem__(self, i):\n example = self.base[i]\n image = Image.open(example[\"file_path_\"])\n\n if not image.mode == \"RGB\":\n image = image.convert(\"RGB\")\n\n image = np.array(image).astype(np.uint8)\n\n min_side_len = min(image.shape[:2])\n crop_side_len = min_side_len * np.random.uniform(self.min_crop_f, self.max_crop_f, size=None)\n crop_side_len = int(crop_side_len)\n\n if self.center_crop:\n self.cropper = albumentations.CenterCrop(height=crop_side_len, width=crop_side_len)\n\n else:\n self.cropper = albumentations.RandomCrop(height=crop_side_len, width=crop_side_len)\n\n image = self.cropper(image=image)[\"image\"]\n image = self.image_rescaler(image=image)[\"image\"]\n\n if self.pil_interpolation:\n image_pil = PIL.Image.fromarray(image)\n LR_image = self.degradation_process(image_pil)\n LR_image = np.array(LR_image).astype(np.uint8)\n\n else:\n LR_image = self.degradation_process(image=image)[\"image\"]\n\n example[\"image\"] = (image/127.5 - 1.0).astype(np.float32)\n example[\"LR_image\"] = (LR_image/127.5 - 1.0).astype(np.float32)\n\n return example\n\n\nclass ImageNetSRTrain(ImageNetSR):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n def get_base(self):\n with open(\"data/imagenet_train_hr_indices.p\", \"rb\") as f:\n indices = pickle.load(f)\n dset = ImageNetTrain(process_images=False,)\n return Subset(dset, indices)\n\n\nclass ImageNetSRValidation(ImageNetSR):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n def get_base(self):\n with open(\"data/imagenet_val_hr_indices.p\", \"rb\") as f:\n indices = pickle.load(f)\n dset = ImageNetValidation(process_images=False,)\n return Subset(dset, indices)\n" ]
[ [ "torch.utils.data.Subset", "numpy.random.uniform", "numpy.array", "numpy.unique" ] ]
hodgestar/qiskit-ignis
[ "0e511df442e864cd0e06efcdd1db7b03c011168b" ]
[ "qiskit/ignis/verification/randomized_benchmarking/circuits.py" ]
[ "# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n# TODO(mtreinish): Remove these disables when implementation is finished\n# pylint: disable=unused-argument,unnecessary-pass\n\n\"\"\"\nGenerates randomized benchmarking sequences\n\"\"\"\n\nimport copy\nimport numpy as np\nimport qiskit\n\nfrom .Clifford import Clifford\nfrom .clifford_utils import CliffordUtils as clutils\nfrom .dihedral import CNOTDihedral\nfrom .dihedral_utils import DihedralUtils as dutils\n\n\ndef handle_length_multiplier(length_multiplier, len_pattern,\n is_purity=False):\n \"\"\"\n Check validity of length_multiplier.\n In addition, transform it into a vector if it is a constant.\n In case of purity rb the length multiplier should be None.\n\n Args:\n length_multiplier: length of the multiplier\n len_pattern: length of the RB pattern\n is_purity: True only for purity rb (default is False)\n\n Returns:\n length_multiplier\n \"\"\"\n\n if hasattr(length_multiplier, \"__len__\"):\n if is_purity:\n raise ValueError(\n \"In case of Purity RB the length multiplier should be None\")\n if len(length_multiplier) != len_pattern:\n raise ValueError(\n \"Length mulitiplier must be the same length as the pattern\")\n length_multiplier = np.array(length_multiplier)\n if length_multiplier.dtype != 'int' or (length_multiplier < 1).any():\n raise ValueError(\"Invalid length multiplier\")\n else:\n length_multiplier = np.ones(len_pattern, dtype='int')*length_multiplier\n\n return length_multiplier\n\n\ndef check_pattern(pattern, is_purity=False):\n \"\"\"\n Verifies that the input pattern is valid\n i.e., that each qubit appears at most once\n\n In case of purity rb, checks that all simultaneous sequences have the same\n dimension (e.g. only 1-qubit sequences, or only 2-qubit sequences etc.)\n\n Args:\n pattern: RB pattern\n n_qubits: number of qubits\n is_purity: True only for purity rb (default is False)\n\n Raises:\n ValueError: if the pattern is not valid\n\n Return:\n qlist: flat list of all the qubits in the pattern\n maxqubit: the maximum qubit number\n maxdim: the maximal dimension (maximal number of qubits\n in all sequences)\n \"\"\"\n\n pattern_flat = []\n pattern_dim = []\n for pat in pattern:\n pattern_flat.extend(pat)\n pattern_dim.append(len(pat))\n\n _, uni_counts = np.unique(np.array(pattern_flat), return_counts=True)\n if (uni_counts > 1).any():\n raise ValueError(\"Invalid pattern. Duplicate qubit index.\")\n\n dim_distinct = np.unique(pattern_dim)\n if is_purity:\n if len(dim_distinct) > 1:\n raise ValueError(\"Invalid pattern for purity RB. \\\n All simultaneous sequences should have the \\\n same dimension.\")\n\n return pattern_flat, np.max(pattern_flat).item(), np.max(pattern_dim)\n\n\ndef calc_xdata(length_vector, length_multiplier):\n \"\"\"\n Calculate the set of sequences lengths\n\n Args:\n length_vector: vector length\n length_multiplier: length of the multiplier of the vector length\n\n Returns:\n An array of sequences lengths\n \"\"\"\n\n xdata = []\n for mult in length_multiplier:\n xdata.append(np.array(length_vector)*mult)\n\n return np.array(xdata)\n\n\ndef randomized_benchmarking_seq(nseeds=1, length_vector=None,\n rb_pattern=None,\n length_multiplier=1, seed_offset=0,\n align_cliffs=False,\n interleaved_gates=None,\n is_purity=False,\n group_gates=None):\n \"\"\"Get a generic randomized benchmarking sequence\n\n Args:\n nseeds: number of seeds\n length_vector: 'm' length vector of sequence lengths. Must be in\n ascending order. RB sequences of increasing length grow on top of\n the previous sequences.\n rb_pattern: A list of the form [[i,j],[k],...] which will make\n simultaneous RB sequences where\n Qi,Qj are a 2Q RB sequence and Qk is a 1Q sequence, etc.\n E.g. [[0,3],[2],[1]] would create RB sequences that are\n 2Q for Q0/Q3, 1Q for Q1+Q2\n The number of qubits is the sum of the entries.\n For 'regular' RB the qubit_pattern is just [[0]],[[0,1]].\n length_multiplier: if this is an array it scales each rb_sequence by\n the multiplier\n seed_offset: What to start the seeds at (e.g. if we\n want to add more seeds later)\n align_cliffs: If true adds a barrier across all qubits in rb_pattern\n after each set of elements, not necessarily Cliffords\n (note: aligns after each increment of elements including the\n length multiplier so if the multiplier is [1,3] it will barrier\n after 1 element for the first pattern and 3 for the second).\n interleaved_gates: A list of gates of elements that\n will be interleaved (for interleaved randomized benchmarking)\n The length of the list would equal the length of the rb_pattern.\n is_purity: True only for purity rb (default is False)\n group_gates: On which group (or gate set) we perform RB\n (default is the Clifford group)\n '0' or None or 'Clifford': Clifford group\n '1' or 'CNOT-Dihedral' or 'Non-Clifford': CNOT-Dihedral group\n\n\n Returns:\n A tuple of different fields depending on inputs. The different fields\n are:\n\n * ``circuits``: list of lists of circuits for the rb sequences\n (separate list for each seed)\n * ``xdata``: the sequences lengths (with multiplier if applicable)\n * ``circuits_interleaved`` `(only if interleaved_gates is not None)`:\n list of lists of circuits for the interleaved rb sequences\n (separate list for each seed)\n * ``circuits_purity`` `(only if is_purity=True)`:\n list of lists of lists of circuits for purity rb\n (separate list for each seed and each of the 3^n circuits)\n * ``npurity`` `(only if is_purity=True)`:\n the number of purity rb circuits (per seed)\n which equals to 3^n, where n is the dimension\n\n \"\"\"\n # Set modules (default is Clifford)\n if group_gates is None or group_gates in ('0',\n 'Clifford',\n 'clifford'):\n Gutils = clutils()\n Ggroup = Clifford\n rb_circ_type = 'rb'\n group_gates_type = 0\n elif group_gates in ('1', 'Non-Clifford',\n 'NonClifford'\n 'CNOTDihedral',\n 'CNOT-Dihedral'):\n Gutils = dutils()\n Ggroup = CNOTDihedral\n rb_circ_type = 'rb_cnotdihedral'\n group_gates_type = 1\n else:\n raise ValueError(\"Unknown group or set of gates.\")\n\n if rb_pattern is None:\n rb_pattern = [[0]]\n if length_vector is None:\n length_vector = [1, 10, 20]\n\n qlist_flat, n_q_max, max_dim = check_pattern(rb_pattern, is_purity)\n length_multiplier = handle_length_multiplier(length_multiplier,\n len(rb_pattern),\n is_purity)\n # number of purity rb circuits per seed\n npurity = 3**max_dim\n\n xdata = calc_xdata(length_vector, length_multiplier)\n\n pattern_sizes = [len(pat) for pat in rb_pattern]\n max_nrb = np.max(pattern_sizes)\n\n # load group tables\n group_tables = [[] for _ in range(max_nrb)]\n for rb_num in range(max_nrb):\n group_tables[rb_num] = Gutils.load_tables(rb_num+1)\n\n # initialization: rb sequences\n circuits = [[] for e in range(nseeds)]\n # initialization: interleaved rb sequences\n circuits_interleaved = [[] for e in range(nseeds)]\n # initialization: non-clifford cnot-dihedral\n # rb sequences\n circuits_cnotdihedral = [[] for e in range(nseeds)]\n # initialization: non-clifford cnot-dihedral\n # interleaved rb sequences\n circuits_cnotdihedral_interleaved = [[] for e in range(nseeds)]\n # initialization: purity rb sequences\n circuits_purity = [[[] for d in range(npurity)]\n for e in range(nseeds)]\n\n # go through for each seed\n for seed in range(nseeds):\n qr = qiskit.QuantumRegister(n_q_max+1, 'qr')\n cr = qiskit.ClassicalRegister(len(qlist_flat), 'cr')\n general_circ = qiskit.QuantumCircuit(qr, cr)\n interleaved_circ = qiskit.QuantumCircuit(qr, cr)\n\n # make sequences for each of the separate sequences in\n # rb_pattern\n Elmnts = []\n for rb_q_num in pattern_sizes:\n Elmnts.append(Ggroup(rb_q_num))\n # Sequences for interleaved rb sequences\n Elmnts_interleaved = []\n for rb_q_num in pattern_sizes:\n Elmnts_interleaved.append(Ggroup(rb_q_num))\n\n # go through and add elements to RB sequences\n length_index = 0\n for elmnts_index in range(length_vector[-1]):\n for (rb_pattern_index, rb_q_num) in enumerate(pattern_sizes):\n\n for _ in range(length_multiplier[rb_pattern_index]):\n new_elmnt_gatelist = Gutils.random_gates(\n rb_q_num)\n Elmnts[rb_pattern_index] = Gutils.compose_gates(\n Elmnts[rb_pattern_index], new_elmnt_gatelist)\n general_circ += replace_q_indices(\n get_quantum_circuit(Gutils.gatelist(),\n rb_q_num),\n rb_pattern[rb_pattern_index], qr)\n\n # add a barrier\n general_circ.barrier(\n *[qr[x] for x in rb_pattern[rb_pattern_index]])\n\n # interleaved rb sequences\n if interleaved_gates is not None:\n Elmnts_interleaved[rb_pattern_index] = \\\n Gutils.compose_gates(\n Elmnts_interleaved[rb_pattern_index],\n new_elmnt_gatelist)\n interleaved_circ += replace_q_indices(\n get_quantum_circuit(Gutils.gatelist(),\n rb_q_num),\n rb_pattern[rb_pattern_index], qr)\n Elmnts_interleaved[rb_pattern_index] = \\\n Gutils.compose_gates(\n Elmnts_interleaved[rb_pattern_index],\n interleaved_gates[rb_pattern_index])\n # add a barrier - interleaved rb\n interleaved_circ.barrier(\n *[qr[x] for x in rb_pattern[rb_pattern_index]])\n interleaved_circ += replace_q_indices(\n get_quantum_circuit(Gutils.gatelist(),\n rb_q_num),\n rb_pattern[rb_pattern_index], qr)\n # add a barrier - interleaved rb\n interleaved_circ.barrier(\n *[qr[x] for x in rb_pattern[rb_pattern_index]])\n\n if align_cliffs:\n # if align at a barrier across all patterns\n general_circ.barrier(\n *[qr[x] for x in qlist_flat])\n # align for interleaved rb\n if interleaved_gates is not None:\n interleaved_circ.barrier(\n *[qr[x] for x in qlist_flat])\n\n # if the number of elements matches one of the sequence lengths\n # then calculate the inverse and produce the circuit\n if (elmnts_index+1) == length_vector[length_index]:\n # circ for rb:\n circ = qiskit.QuantumCircuit(qr, cr)\n circ += general_circ\n # circ_interleaved for interleaved rb:\n circ_interleaved = qiskit.QuantumCircuit(qr, cr)\n circ_interleaved += interleaved_circ\n\n for (rb_pattern_index, rb_q_num) in enumerate(pattern_sizes):\n inv_key = Gutils.find_key(Elmnts[rb_pattern_index],\n rb_q_num)\n inv_circuit = Gutils.find_inverse_gates(\n rb_q_num,\n group_tables[rb_q_num-1][inv_key])\n circ += replace_q_indices(\n get_quantum_circuit(inv_circuit, rb_q_num),\n rb_pattern[rb_pattern_index], qr)\n # calculate the inverse and produce the circuit\n # for interleaved rb\n if interleaved_gates is not None:\n inv_key = Gutils.find_key(Elmnts_interleaved\n [rb_pattern_index],\n rb_q_num)\n inv_circuit = Gutils.find_inverse_gates(\n rb_q_num,\n group_tables[rb_q_num - 1][inv_key])\n circ_interleaved += replace_q_indices(\n get_quantum_circuit(inv_circuit, rb_q_num),\n rb_pattern[rb_pattern_index], qr)\n\n # Circuits for purity rb\n if is_purity:\n circ_purity = [[] for d in range(npurity)]\n for d in range(npurity):\n circ_purity[d] = qiskit.QuantumCircuit(qr, cr)\n circ_purity[d] += circ\n circ_purity[d].name = rb_circ_type + '_purity_'\n ind_d = d\n purity_qubit_num = 0\n while True:\n # Per each qubit:\n # do nothing or rx(pi/2) or ry(pi/2)\n purity_qubit_rot = np.mod(ind_d, 3)\n ind_d = np.floor_divide(ind_d, 3)\n if purity_qubit_rot == 0: # do nothing\n circ_purity[d].name += 'Z'\n if purity_qubit_rot == 1: # add rx(pi/2)\n for pat in rb_pattern:\n circ_purity[d].rx(np.pi / 2,\n qr[pat[\n purity_qubit_num]])\n circ_purity[d].name += 'X'\n if purity_qubit_rot == 2: # add ry(pi/2)\n for pat in rb_pattern:\n circ_purity[d].ry(np.pi / 2,\n qr[pat[\n purity_qubit_num]])\n circ_purity[d].name += 'Y'\n purity_qubit_num = purity_qubit_num + 1\n if ind_d == 0:\n break\n # padding the circuit name with Z's so that\n # all circuits will have names of the same length\n for _ in range(max_dim - purity_qubit_num):\n circ_purity[d].name += 'Z'\n # add measurement for purity rb\n for qind, qb in enumerate(qlist_flat):\n circ_purity[d].measure(qr[qb], cr[qind])\n circ_purity[d].name += '_length_%d_seed_%d' \\\n % (length_index,\n seed + seed_offset)\n\n # add measurement for Non-Clifford cnot-dihedral rb\n # measure both the ground state |0...0> (circ)\n # and the |+...+> state (cnot-dihedral_circ)\n cnotdihedral_circ = qiskit.QuantumCircuit(qr, cr)\n cnotdihedral_interleaved_circ = qiskit.QuantumCircuit(qr, cr)\n if group_gates_type == 1:\n for _, qb in enumerate(qlist_flat):\n cnotdihedral_circ.h(qr[qb])\n cnotdihedral_circ.barrier(qr[qb])\n cnotdihedral_interleaved_circ.h(qr[qb])\n cnotdihedral_interleaved_circ.barrier(qr[qb])\n cnotdihedral_circ += circ\n cnotdihedral_interleaved_circ += circ_interleaved\n for _, qb in enumerate(qlist_flat):\n cnotdihedral_circ.barrier(qr[qb])\n cnotdihedral_circ.h(qr[qb])\n cnotdihedral_interleaved_circ.barrier(qr[qb])\n cnotdihedral_interleaved_circ.h(qr[qb])\n for qind, qb in enumerate(qlist_flat):\n cnotdihedral_circ.measure(qr[qb], cr[qind])\n cnotdihedral_interleaved_circ.measure(qr[qb], cr[qind])\n\n # add measurement for standard rb\n # qubits measure to the c registers as\n # they appear in the pattern\n for qind, qb in enumerate(qlist_flat):\n circ.measure(qr[qb], cr[qind])\n # add measurement for interleaved rb\n circ_interleaved.measure(qr[qb], cr[qind])\n\n circ.name = \\\n rb_circ_type + '_length_%d_seed_%d' % \\\n (length_index, seed + seed_offset)\n circ_interleaved.name = \\\n rb_circ_type + '_interleaved_length_%d_seed_%d' % \\\n (length_index, seed + seed_offset)\n\n if group_gates_type == 1:\n circ.name = rb_circ_type + '_Z_length_%d_seed_%d' % \\\n (length_index, seed + seed_offset)\n circ_interleaved.name = \\\n rb_circ_type + '_interleaved_Z_length_%d_seed_%d' % \\\n (length_index, seed + seed_offset)\n cnotdihedral_circ.name = \\\n rb_circ_type + '_X_length_%d_seed_%d' % \\\n (length_index, seed + seed_offset)\n cnotdihedral_interleaved_circ.name = \\\n rb_circ_type + 'interleaved_X_length_%d_seed_%d' % \\\n (length_index, seed + seed_offset)\n\n circuits[seed].append(circ)\n circuits_interleaved[seed].append(circ_interleaved)\n circuits_cnotdihedral[seed].append(cnotdihedral_circ)\n circuits_cnotdihedral_interleaved[seed].append(\n cnotdihedral_interleaved_circ)\n\n if is_purity:\n for d in range(npurity):\n circuits_purity[seed][d].append(circ_purity[d])\n length_index += 1\n\n # output of purity rb\n if is_purity:\n return circuits_purity, xdata, npurity\n # output of non-clifford cnot-dihedral interleaved rb\n if interleaved_gates is not None and group_gates_type == 1:\n return circuits, xdata, circuits_cnotdihedral, circuits_interleaved, \\\n circuits_cnotdihedral_interleaved\n # output of interleaved rb\n if interleaved_gates is not None:\n return circuits, xdata, circuits_interleaved\n # output of Non-Clifford cnot-dihedral rb\n if group_gates_type == 1:\n return circuits, xdata, circuits_cnotdihedral\n # output of standard (simultaneous) rb\n return circuits, xdata\n\n\ndef replace_q_indices(circuit, q_nums, qr):\n \"\"\"\n Take a circuit that is ordered from 0,1,2 qubits and replace 0 with the\n qubit label in the first index of q_nums, 1 with the second index...\n\n Args:\n circuit: circuit to operate on\n q_nums: list of qubit indices\n\n Returns:\n updated circuit\n \"\"\"\n\n new_circuit = qiskit.QuantumCircuit(qr)\n for instr, qargs, cargs in circuit.data:\n new_qargs = [\n qr[q_nums[x]] for x in [arg.index for arg in qargs]]\n new_op = copy.deepcopy((instr, new_qargs, cargs))\n new_circuit.data.append(new_op)\n\n return new_circuit\n\n\ndef get_quantum_circuit(gatelist, num_qubits):\n \"\"\"\n Returns the circuit in the form of a QuantumCircuit object.\n\n Args:\n num_qubits: the number of qubits (dimension).\n gatelist: a list of gates.\n\n Returns:\n A QuantumCircuit object.\n \"\"\"\n qr = qiskit.QuantumRegister(num_qubits)\n qc = qiskit.QuantumCircuit(qr)\n\n for op in gatelist:\n split = op.split()\n op_names = [split[0]]\n\n # temporary correcting the ops name since QuantumCircuit has no\n # attributes 'v' or 'w' yet:\n if op_names == ['v']:\n op_names = ['sdg', 'h']\n elif op_names == ['w']:\n op_names = ['h', 's']\n\n if op_names == ['u1']:\n qubits = [qr[int(x)] for x in split[2:]]\n theta = float(split[1])\n else:\n qubits = [qr[int(x)] for x in split[1:]]\n\n for sub_op in op_names:\n operation = eval('qiskit.QuantumCircuit.' + sub_op)\n if sub_op == 'u1':\n operation(qc, theta, *qubits)\n else:\n operation(qc, *qubits)\n\n return qc\n" ]
[ [ "numpy.unique", "numpy.floor_divide", "numpy.ones", "numpy.max", "numpy.mod", "numpy.array" ] ]
dbckz/crossing-the-line
[ "c5debb20e263e03eab9188ce7229753034939964" ]
[ "scripts/process_perspective.py" ]
[ "\"\"\"\nScript to evaluate tweets against the Perspective API\n\nHow it's used:\n* Loads \"tweets.csv\" files according to 'root_path' and 'day_paths' vars\n* Sends one tweet at a time to the API\n* Sleeps for 1 second between requests due to API rate-limit\n* Appends results to perspective_processed_tweets.csv after every 50 tweets, so that not all progress is lost if the\n script were to die midway through processing a file\n\"\"\"\nimport os\nimport time\n\nimport numpy as np\nimport pandas as pd\nfrom googleapiclient import discovery\n\n\ndef get_perspective_client(api_key):\n return discovery.build(\n \"commentanalyzer\",\n \"v1alpha1\",\n developerKey=api_key,\n discoveryServiceUrl=\"https://commentanalyzer.googleapis.com/$discovery/rest?version=v1alpha1\",\n static_discovery=False,\n )\n\n\ndef query_perspective(client, text, tweet_id, logfile):\n analyze_request = {\n 'comment': {\n 'text': text\n },\n 'requestedAttributes': {\n 'TOXICITY': {},\n 'SEVERE_TOXICITY': {},\n 'IDENTITY_ATTACK': {},\n 'INSULT': {},\n 'THREAT': {},\n 'SEXUALLY_EXPLICIT': {}\n }\n }\n try:\n response = client.comments().analyze(body=analyze_request).execute()\n toxicity_score = response['attributeScores']['TOXICITY']['summaryScore']['value']\n severe_toxicity_score = response['attributeScores']['SEVERE_TOXICITY']['summaryScore']['value']\n identity_attack_score = response['attributeScores']['IDENTITY_ATTACK']['summaryScore']['value']\n insult_score = response['attributeScores']['INSULT']['summaryScore']['value']\n threat_score = response['attributeScores']['THREAT']['summaryScore']['value']\n sexually_explicit_score = response['attributeScores']['SEXUALLY_EXPLICIT']['summaryScore']['value']\n return {\n \"toxicity_score\": toxicity_score,\n \"severe_toxicity_score\": severe_toxicity_score,\n \"identity_attack_score\": identity_attack_score,\n \"insult_score\": insult_score,\n \"threat_score\": threat_score,\n \"sexually_explicit_score\": sexually_explicit_score,\n \"error\": \"\"\n }\n except Exception as e:\n with open(logfile, 'a') as f:\n f.write(f\"{time.ctime()}: EXCEPTION. Tweet Id: {tweet_id}: {e}\")\n f.write('\\n')\n print(f\"EXCEPTION. Tweet Id: {tweet_id}: {e}\")\n if ('reason' in e.error_details[0] and e.error_details[0]['reason'] == 'RATE_LIMIT_EXCEEDED'):\n with open(logfile, 'a') as f:\n sleeptime = 70\n f.write(f\"{time.ctime()}: Sleeping for {sleeptime} seconds\")\n f.write('\\n')\n print(f\"Sleeping for {sleeptime} seconds\")\n time.sleep(70)\n return query_perspective(client, text, tweet_id, logfile)\n return {\n \"toxicity_score\": -1,\n \"severe_toxicity_score\": -1,\n \"identity_attack_score\": -1,\n \"insult_score\": -1,\n \"threat_score\": -1,\n \"sexually_explicit_score\": -1,\n \"error\": \"ERROR\"\n }\n\n\ndef process_tweet(tweet, perspective_client, output_dataframe, logfile):\n data = query_perspective(perspective_client, tweet['tweet_text'], tweet['tweet_id'], logfile)\n output_dataframe.loc[tweet['tweet_id']] = [\n tweet['tweet_id'],\n data['toxicity_score'],\n data['severe_toxicity_score'],\n data['identity_attack_score'],\n data['insult_score'],\n data['threat_score'],\n data['sexually_explicit_score'],\n data['error']\n ]\n\n\ndef process_day(directory):\n logfile = directory + \"/perspective_error_log.txt\"\n progress_logfile = directory + \"/perspective_progress_log.txt\"\n\n with open(progress_logfile, 'a') as f:\n f.write(f\"{time.ctime()}: Starting processing for {directory}\")\n f.write('\\n')\n print(f\"Starting processing for {directory}\")\n\n # Load tweet CSV file\n in_csv = directory + \"/tweets.csv\"\n out_csv = directory + \"/perspective_processed_tweets.csv\"\n\n # Delete existing output file if it exists\n if os.path.exists(out_csv):\n os.remove(out_csv)\n\n number_lines = sum(1 for row in (open(in_csv)))\n chunk_size = 50\n\n tweets_remaining = number_lines - 1\n\n with open(progress_logfile, 'a') as f:\n f.write(f\"{time.ctime()}: Number of tweets: {tweets_remaining}\")\n f.write('\\n')\n print(f\"Number of tweets: {tweets_remaining}\")\n\n for i in range(0, number_lines, chunk_size):\n start = time.time()\n in_tweets = pd.read_csv(in_csv,\n header=0,\n nrows=chunk_size, # number of rows to read at each loop\n skiprows=range(1, i)) # skip rows that have been read\n if (i == 0):\n print(f\"Loaded first {len(in_tweets.index)} tweets.\")\n\n out_tweets = pd.DataFrame(\n columns=[\"tweet_id\", \"toxicity_score\", \"severe_toxicity_score\", \"identity_attack_score\", \"insult_score\",\n \"threat_score\", \"sexually_explicit_score\", \"error\"])\n\n # Do processing for tweet\n for _, row in in_tweets.iterrows():\n process_tweet(row, perspective_client, out_tweets, logfile)\n time.sleep(1) # Sleep due to 1 req/second limit on Perspective API\n\n # Ensure tweet_id written as int\n new_dtypes = {\n \"tweet_id\": int,\n \"toxicity_score\": np.float64,\n \"severe_toxicity_score\": np.float64,\n \"identity_attack_score\": np.float64,\n \"insult_score\": np.float64,\n \"threat_score\": np.float64,\n \"sexually_explicit_score\": np.float64,\n \"error\": str\n }\n out_tweets = out_tweets.astype(new_dtypes)\n\n if (i == 0):\n out_tweets.to_csv(out_csv,\n index=False,\n header=True,\n mode='a', # append data to csv file\n chunksize=chunk_size) # size of data to append for each loop\n else:\n out_tweets.to_csv(out_csv,\n index=False,\n header=False,\n mode='a', # append data to csv file\n chunksize=chunk_size) # size of data to append for each loop\n\n tweets_remaining = tweets_remaining - len(out_tweets.index)\n msg = f\"Processed {len(out_tweets.index)} tweets in {time.time() - start} seconds. {tweets_remaining} tweets remaining.\"\n with open(progress_logfile, 'a') as f:\n f.write(f\"{time.ctime()}: {msg}\")\n f.write('\\n')\n print(msg)\n\n with open(progress_logfile, 'a') as f:\n f.write(f\"{time.ctime()}: Completed processing for {directory}\")\n f.write('\\n')\n print(f\"Completed processing for {directory}\")\n\n\nif __name__ == \"__main__\":\n root_path = \"/Users/davebuckley/Documents/Kings/Dissertation/dissertation/data_collection\"\n\n day_paths = [\n \"/01\",\n \"/02\",\n \"/03\",\n \"/04\",\n \"/05\",\n \"/06\",\n \"/07\",\n \"/08\",\n \"/09\",\n \"/10\",\n \"/11\",\n \"/12\",\n \"/13\",\n \"/14\",\n \"/15\",\n \"/16\",\n \"/17\",\n \"/18\",\n \"/19\",\n \"/20\",\n \"/21\",\n \"/22\",\n \"/23\",\n \"/24\",\n \"/25\",\n \"/26\",\n \"/27\",\n \"/28\",\n \"/29\",\n \"/30\",\n \"/31\",\n \"/32\",\n \"/33\",\n \"/34\",\n \"/35\",\n \"/36\"\n ]\n # Auth to Perspective API\n print(\"Connecting to Perspective API\")\n API_KEY = os.getenv(\"PERSPECTIVE_API_KEY\")\n perspective_client = get_perspective_client(API_KEY)\n print(\"Connected to Perspective API\")\n\n for day in day_paths:\n process_day(root_path + day)\n print(\"All completed\")\n" ]
[ [ "pandas.DataFrame" ] ]
leaiannotti/jesse
[ "564c54845774891ff3b5a8d3c02cc7cea890ac54" ]
[ "jesse/indicators/pfe.py" ]
[ "from typing import Union\n\nimport numpy as np\nimport talib\n\nfrom jesse.helpers import get_candle_source, slice_candles, same_length\n\n\ndef pfe(candles: np.ndarray, period: int = 10, smoothing: int = 5, source_type: str = \"close\", sequential: bool = False) -> Union[\n float, np.ndarray]:\n \"\"\"\n Polarized Fractal Efficiency (PFE)\n\n :param candles: np.ndarray\n :param period: int - default: 10\n :param smoothing: int - default: 5\n :param source_type: str - default: \"close\"\n :param sequential: bool - default=False\n\n :return: float | np.ndarray\n \"\"\"\n candles = slice_candles(candles, sequential)\n\n source = get_candle_source(candles, source_type=source_type)\n\n ln = period - 1\n diff = np.diff(source, ln)\n a = np.sqrt(np.power(diff, 2) + np.power(period, 2))\n b = talib.SUM(np.sqrt(1 + np.power(np.diff(source, 1), 2)), ln)\n pfetmp = 100 * same_length(source, a) / same_length(source, b)\n res = talib.EMA(np.where(same_length(source, diff) > 0, pfetmp, -pfetmp), smoothing)\n\n return res if sequential else res[-1]\n" ]
[ [ "numpy.diff", "numpy.power" ] ]
AsmaBRZ/rcrs-server
[ "d67a84a17b73dd95c5553bed68b8c4c08cd5651a" ]
[ "modules/sample/src/sample/CSV/pf.py" ]
[ "import matplotlib.pyplot as plt\nimport numpy as np\nimport os\ntime=np.arange(1,301)\narray=np.zeros(250)\na=[]\n\nfichiers=os.listdir(\"d\")\n\nfor f in fichiers:\n print(f)\n i=0\n with open(\"d/\"+f, \"r\") as ins:\n for line in ins:\n if i<300:\n print(line)\n l=line.split(\" \")\n print(int(l[1]))\n print(i)\n print('jjjjjjjj')\n print(array[i])\n array[i]=array[i]+int(l[1])\n i=i+1\n\nprint (array)\n\n\nplt.plot(array)\nplt.ylabel(\"Nombre d'obstacles nettoyés\")\nplt.xlabel('Temps')\n\nplt.suptitle('Agent random')\nplt.show()\n" ]
[ [ "numpy.arange", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.suptitle", "numpy.zeros", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
claydodo/tinkt
[ "dfd07fe7cad34c0d5a1ec0e03a6437a502410918" ]
[ "tinkt/cmap_utils.py" ]
[ "# -*- coding:utf-8 -*-\n\n# cmap utils\n\nimport six\n\nimport numpy as np\nfrom matplotlib import cm as mpl_cm\nfrom matplotlib import colors as mpl_colors\nfrom . import cm as tinkt_cm\n\n\nCM_FAMILIES = {\n 'mpl': mpl_cm,\n 'tinkt': tinkt_cm\n}\n\n\ndef set_under_over_bad_colors(cmap, under=None, over=None, bad=None):\n if under is not None:\n cmap.set_under(under)\n if over is not None:\n cmap.set_over(over)\n if bad is not None:\n cmap.set_bad(bad)\n return cmap\n\n\ndef get_cmap(base_cmap,\n clip_min=None, clip_max=None,\n N=None,\n sample_points=None,\n bad=None, over=None, under=None,\n *args, **kwargs):\n \"\"\"\n Get cmap object by name, and optionally tweak it into a new one.\n Currently only supports tweaking of continuous cmaps.\n :param base_cmap: either a name or a cmap object.\n :param clip_min: lower clip point, valid range: 0.0~1.0, default: None.\n :param clip_max: upper clip point, valid range: 0.0~1.0, default: None.\n :param N: new cmap's color number, default: None (inherits from base_cmap).\n :param sample_points: a series of sampling points (0.0~1.0) on the base_cmap. When using this arg, clip_min, clip_max and N are ignored.\n :param bad: bad color, default None (inherits from base_cmap)\n :param over: over color, default None (inherits from base_cmap)\n :param under: under color, default None (inherits from base_cmap)\n :return: a cmap object (matplotlib.colors.Colormap)\n \"\"\"\n\n if isinstance(base_cmap, tuple):\n # The tuple-form is for compatibility of old codes using metlib.color.cmap_utils.get_cmap , which read opts from json file.\n # Please neglect the complex logics and use named args whenever possible.\n return _parse_tuple_form_args_for_get_cmap(base_cmap)\n\n if isinstance(base_cmap, six.string_types):\n for cm_family in CM_FAMILIES.values():\n try:\n base_cmap = getattr(cm_family, base_cmap)\n break\n except AttributeError:\n pass\n\n if not isinstance(base_cmap, mpl_colors.Colormap):\n raise RuntimeError(u'Cannot find base_cmap: {}'.format(base_cmap))\n\n if sample_points is not None:\n new_name = u'Resampled from {}'.format(base_cmap.name)\n new_cmap = mpl_colors.LinearSegmentedColormap.from_list(new_name, base_cmap(sample_points))\n elif clip_min is not None or clip_max is not None:\n clip_min = 0.0 if clip_min is None else float(clip_min)\n clip_max = 0.0 if clip_max is None else float(clip_max)\n N = base_cmap.N if N is None else int(N)\n sample_points = np.linspace(clip_min, clip_max, N)\n new_name = u'Clipped from {}'.format(base_cmap.name)\n new_cmap = mpl_colors.LinearSegmentedColormap.from_list(new_name, base_cmap(sample_points))\n else:\n N = int(N) if N is not None else base_cmap.N\n new_cmap = base_cmap._resample(N)\n\n if bad is not None:\n new_cmap.set_bad(bad)\n elif base_cmap._rgba_bad:\n new_cmap.set_bad(base_cmap._rgba_bad)\n\n if over is not None:\n new_cmap.set_over(over)\n elif base_cmap._rgba_over:\n new_cmap.set_over(base_cmap._rgba_over)\n\n if under is not None:\n new_cmap.set_under(under)\n elif base_cmap._rgba_under:\n new_cmap.set_under(base_cmap._rgba_under)\n\n return new_cmap\n\n\ndef _parse_tuple_form_args_for_get_cmap(opts):\n # The tuple-form is for compatibility of old codes using metlib.color.cmap_utils.get_cmap, which read opts from json file.\n if len(opts) == 1:\n return get_cmap(opts[0])\n elif len(opts) == 2:\n if isinstance(opts[1], (tuple, list, np.ndarray)):\n if len(opts[1]) == 0:\n return get_cmap(opts[0])\n elif len(opts[1]) == 1:\n if isinstance(opts[1][0], (tuple, list, np.ndarray)):\n return get_cmap(opts[0], sample_points=opts[1][0])\n else:\n raise ValueError(\"\")\n elif len(opts[1]) == 2:\n clip_min, clip_max = opts[1]\n N = None\n elif len(opts[1]) == 3:\n clip_min, clip_max, N = opts[1]\n else:\n return get_cmap(opts[0], sample_points=opts[1])\n return get_cmap(opts[0], clip_min=clip_min, clip_max=clip_max, N=N)\n else:\n raise ValueError(\"\")\n else:\n raise ValueError(\"\")\n" ]
[ [ "numpy.linspace" ] ]
idf/FaceReader
[ "d649bf7ca7f9cf66ac99e81a5187cfcc2b54f49d" ]
[ "facerec_py/facerec/svm.py" ]
[ "from facerec_py.facerec.classifier import SVM\nfrom facerec_py.facerec.validation import KFoldCrossValidation\nfrom facerec_py.facerec.model import PredictableModel\nfrom svmutil import *\nfrom itertools import product\nimport numpy as np\nimport logging\n\n\ndef range_f(begin, end, step):\n seq = []\n while True:\n if step == 0: break\n if step > 0 and begin > end: break\n if step < 0 and begin < end: break\n seq.append(begin)\n begin = begin + step\n return seq\n\n\ndef grid(grid_parameters):\n grid = []\n for parameter in grid_parameters:\n begin, end, step = parameter\n grid.append(range_f(begin, end, step))\n return product(*grid)\n\n\ndef grid_search(model, X, y, C_range=(-5, 15, 2), gamma_range=(3, -15, -2), k=5, num_cores=1):\n \n if not isinstance(model, PredictableModel):\n raise TypeError(\"GridSearch expects a PredictableModel. If you want to perform optimization on raw data use facerec.feature.Identity to pass unpreprocessed data!\")\n if not isinstance(model.classifier, SVM):\n raise TypeError(\"GridSearch expects a SVM as classifier. Please use a facerec.classifier.SVM!\")\n \n logger = logging.getLogger(\"facerec.svm.gridsearch\")\n logger.info(\"Performing a Grid Search.\")\n \n # best parameter combination to return\n best_parameter = svm_parameter(\"-q\")\n best_parameter.kernel_type = model.classifier.param.kernel_type\n best_parameter.nu = model.classifier.param.nu\n best_parameter.coef0 = model.classifier.param.coef0\n # either no gamma given or kernel is linear (only C to optimize)\n if (gamma_range is None) or (model.classifier.param.kernel_type == LINEAR):\n gamma_range = (0, 0, 1)\n \n # best validation error so far\n best_accuracy = np.finfo('float').min\n \n # create grid (cartesian product of ranges) \n g = grid([C_range, gamma_range])\n results = []\n for p in g:\n C, gamma = p\n C, gamma = 2**C, 2**gamma\n model.classifier.param.C, model.classifier.param.gamma = C, gamma\n\n # perform a k-fold cross validation\n cv = KFoldCrossValidation(model=model,k=k)\n cv.validate(X,y)\n\n # append parameter into list with accuracies for all parameter combinations\n results.append([C, gamma, cv.accuracy])\n \n # store best parameter combination\n if cv.accuracy > best_accuracy:\n logger.info(\"best_accuracy=%s\" % (cv.accuracy))\n best_accuracy = cv.accuracy\n best_parameter.C, best_parameter.gamma = C, gamma\n \n logger.info(\"%d-CV Result = %.2f.\" % (k, cv.accuracy))\n \n # set best parameter combination to best found\n return best_parameter, results\n" ]
[ [ "numpy.finfo" ] ]
andrewyguo/privacy
[ "a33afde0c105ece6c48b17a80f13899cf3e7c1b3", "a33afde0c105ece6c48b17a80f13899cf3e7c1b3" ]
[ "tensorflow_privacy/privacy/privacy_tests/membership_inference_attack/keras_evaluation_test.py", "tensorflow_privacy/privacy/estimators/binary_class_head_test.py" ]
[ "# Copyright 2020, The TensorFlow Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom absl.testing import absltest\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow_privacy.privacy.privacy_tests.membership_inference_attack import keras_evaluation\nfrom tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import AttackResults\nfrom tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import AttackType\nfrom tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import get_flattened_attack_metrics\n\n\nclass UtilsTest(absltest.TestCase):\n\n def __init__(self, methodname):\n \"\"\"Initialize the test class.\"\"\"\n super().__init__(methodname)\n\n self.ntrain, self.ntest = 50, 100\n self.nclass = 5\n self.ndim = 10\n\n # Generate random training and test data\n self.train_data = np.random.rand(self.ntrain, self.ndim)\n self.test_data = np.random.rand(self.ntest, self.ndim)\n self.train_labels = np.random.randint(self.nclass, size=self.ntrain)\n self.test_labels = np.random.randint(self.nclass, size=self.ntest)\n\n self.model = tf.keras.Sequential([tf.keras.layers.Dense(self.nclass)])\n\n loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n self.model.compile(optimizer='Adam', loss=loss, metrics=['accuracy'])\n\n def test_calculate_losses(self):\n \"\"\"Test calculating the loss.\"\"\"\n pred, loss = keras_evaluation.calculate_losses(self.model, self.train_data,\n self.train_labels)\n self.assertEqual(pred.shape, (self.ntrain, self.nclass))\n self.assertEqual(loss.shape, (self.ntrain,))\n\n pred, loss = keras_evaluation.calculate_losses(self.model, self.test_data,\n self.test_labels)\n self.assertEqual(pred.shape, (self.ntest, self.nclass))\n self.assertEqual(loss.shape, (self.ntest,))\n\n def test_run_attack_on_keras_model(self):\n \"\"\"Test the attack.\"\"\"\n results = keras_evaluation.run_attack_on_keras_model(\n self.model, (self.train_data, self.train_labels),\n (self.test_data, self.test_labels),\n attack_types=[AttackType.THRESHOLD_ATTACK])\n self.assertIsInstance(results, AttackResults)\n att_types, att_slices, att_metrics, att_values = get_flattened_attack_metrics(\n results)\n self.assertLen(att_types, 2)\n self.assertLen(att_slices, 2)\n self.assertLen(att_metrics, 2)\n self.assertLen(att_values, 2)\n\n\nif __name__ == '__main__':\n absltest.main()\n", "# Copyright 2020, The TensorFlow Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow_privacy.privacy.estimators import binary_class_head\nfrom tensorflow_privacy.privacy.estimators import test_utils\nfrom tensorflow_privacy.privacy.optimizers.dp_optimizer_keras import DPKerasSGDOptimizer\n\n\nclass DPBinaryClassHeadTest(tf.test.TestCase):\n \"\"\"Tests for DP-enabled binary class heads.\"\"\"\n\n def testLoss(self):\n \"\"\"Tests loss() returns per-example losses.\"\"\"\n\n head = binary_class_head.DPBinaryClassHead()\n features = {'feature_a': np.full((4), 1.0)}\n labels = np.array([[1.0], [1.0], [1.0], [0.0]])\n logits = np.full((4, 1), 0.5)\n\n actual_loss = head.loss(labels, logits, features)\n expected_loss = tf.nn.sigmoid_cross_entropy_with_logits(\n labels=labels, logits=logits)\n\n self.assertEqual(actual_loss.shape, [4, 1])\n\n if tf.executing_eagerly():\n self.assertEqual(actual_loss.shape, [4, 1])\n self.assertAllClose(actual_loss, expected_loss)\n return\n\n self.assertAllClose(expected_loss, self.evaluate(actual_loss))\n\n def testCreateTPUEstimatorSpec(self):\n \"\"\"Tests that an Estimator built with this head works.\"\"\"\n\n train_features, train_labels = test_utils.make_input_data(256, 2)\n feature_columns = []\n for key in train_features:\n feature_columns.append(tf.feature_column.numeric_column(key=key))\n\n head = binary_class_head.DPBinaryClassHead()\n optimizer = DPKerasSGDOptimizer(\n learning_rate=0.5,\n l2_norm_clip=1.0,\n noise_multiplier=0.0,\n num_microbatches=2)\n model_fn = test_utils.make_model_fn(head, optimizer, feature_columns)\n classifier = tf.estimator.Estimator(model_fn=model_fn)\n\n classifier.train(\n input_fn=test_utils.make_input_fn(train_features, train_labels, True),\n steps=4)\n\n test_features, test_labels = test_utils.make_input_data(64, 2)\n classifier.evaluate(\n input_fn=test_utils.make_input_fn(test_features, test_labels, False),\n steps=4)\n\n predict_features, predict_labels_ = test_utils.make_input_data(64, 2)\n classifier.predict(\n input_fn=test_utils.make_input_fn(predict_features, predict_labels_,\n False))\n\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "tensorflow.keras.losses.SparseCategoricalCrossentropy", "tensorflow.keras.layers.Dense", "numpy.random.rand", "numpy.random.randint" ], [ "tensorflow.executing_eagerly", "tensorflow.estimator.Estimator", "tensorflow.test.main", "numpy.full", "tensorflow.nn.sigmoid_cross_entropy_with_logits", "tensorflow.feature_column.numeric_column", "numpy.array" ] ]
Jack407/TFCNs_source_code
[ "f41466ad18457dd6335287112191e5daacf6d80d" ]
[ "train_utils.py" ]
[ "import argparse\nimport logging\nimport random\nimport sys\nimport time\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom tensorboardX import SummaryWriter\nfrom torch.nn.modules.loss import CrossEntropyLoss\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\nfrom utils import one_hot_encoder\nfrom loss import mixed_focal_loss\nfrom loss import dice_loss as dl\nfrom torchvision import transforms\nimport os\n\ndef train_starter(args, model, snapshot_path):\n\n from preprocess import TFCNs_dataset, RandomGenerator\n logging.basicConfig(filename=snapshot_path + \"/log.txt\", level=logging.INFO,\n format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')\n logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))\n logging.info(str(args))\n base_lr = args.base_lr\n num_classes = args.num_classes\n batch_size = args.batch_size * args.n_gpu\n db_train = TFCNs_dataset(base_dir=args.root_path, list_dir=args.list_dir, split=\"train\",\n transform=transforms.Compose(\n [RandomGenerator(output_size=[args.img_size, args.img_size])]))\n print(\"The length of train set is: {}\".format(len(db_train)))\n def worker_init_fn(worker_id):\n random.seed(args.seed + worker_id)\n\n trainloader = DataLoader(db_train, batch_size=batch_size, shuffle=True, num_workers=8, pin_memory=True,\n worker_init_fn=worker_init_fn)\n if args.n_gpu > 1:\n model = nn.DataParallel(model)\n model.train()\n optimizer = optim.SGD(model.parameters(), lr=base_lr, momentum=0.9, weight_decay=0.0001)\n writer = SummaryWriter(snapshot_path + '/log')\n iter_num = 0\n max_epoch = args.max_epochs\n max_iterations = args.max_epochs * len(trainloader) # max_epoch = max_iterations // len(trainloader) + 1\n logging.info(\"{} iterations per epoch. {} max iterations \".format(len(trainloader), max_iterations))\n best_performance = 0.0\n iterator = tqdm(range(max_epoch), ncols=70)\n for epoch_num in iterator:\n for i_batch, sampled_batch in enumerate(trainloader):\n image_batch, label_batch = sampled_batch['image'], sampled_batch['label']\n image_batch, label_batch = image_batch.cuda(), label_batch.cuda()\n outputs = model(image_batch)\n label_batch = one_hot_encoder(label_batch,args.dataset,args.num_classes)\n outputs = torch.softmax(outputs,dim=1)\n loss = mixed_focal_loss(label_batch,outputs)\n loss = torch.mean(loss,axis=0)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr_\n\n iter_num = iter_num + 1\n writer.add_scalar('info/lr', lr_, iter_num)\n writer.add_scalar('info/total_loss', loss, iter_num)\n\n logging.info('iteration %d : loss : %f' % (iter_num, loss.item()))\n\n save_interval = 50 # int(max_epoch/6)\n if epoch_num > int(max_epoch / 2) and (epoch_num + 1) % save_interval == 0:\n save_mode_path = os.path.join(snapshot_path, 'epoch_' + str(epoch_num) + '.pth')\n torch.save(model.state_dict(), save_mode_path)\n logging.info(\"save model to {}\".format(save_mode_path))\n\n if epoch_num >= max_epoch - 1:\n save_mode_path = os.path.join(snapshot_path, 'epoch_' + str(epoch_num) + '.pth')\n torch.save(model.state_dict(), save_mode_path)\n logging.info(\"save model to {}\".format(save_mode_path))\n iterator.close()\n break\n\n writer.close()\n return \"Training Finished!\"\n" ]
[ [ "torch.softmax", "torch.mean", "torch.nn.DataParallel", "torch.utils.data.DataLoader" ] ]
G-Simeone/Learning_Accident_Occurence_on_Dutch_Highways
[ "1f3992a529fed70fd488811d68128a1e255fac5f" ]
[ "src/create_experiment.py" ]
[ "import sys\nfrom utils import write_exp_utils\nimport pandas as pd\nfrom utils import misc_utils\nimport psycopg2\nfrom psycopg2.extras import Json, DictCursor\n\ndef main(argv):\n print(argv[1])\n w = write_exp_utils.ExperimentConfig(argv[1], argv[2])\n print(\"writing {} to database\".format(argv[1]) )\n w.write_to_db()# write experiment on database \n\n # check if the experiment is written correctly\n q = 'select experiment_id from rws_experiment.experiment_table order by experiment_id desc limit 1;'\t\n conn = misc_utils.connect_rds()\n print(pd.read_sql(q, conn)) \n\nif __name__== '__main__':\n main(sys.argv)\n" ]
[ [ "pandas.read_sql" ] ]
melodist/MELTNET
[ "47548e4a027ea4e23cdcb5ba1f1d9aa1aa7bbf29" ]
[ "Analysis/SampleVisualization_AE.py" ]
[ "\"\"\"\n Sample Visualization\n Make 2-D image of sample distribution\n 1-1. Extract Features using initial network\n 1-2. Extract Features using trained network\n 2. Using K-means to classify the patches\n 3. Dimension reduction using PCA\n 4. Visualize results\n\"\"\"\n\nimport tensorflow as tf\nimport numpy as np\nfrom Network import NetworkKeras\nimport os\nimport time\nfrom Extraction import PatchExtraction\nfrom sklearn.cluster import KMeans\nfrom sklearn.decomposition import PCA\nimport matplotlib.pyplot as plt\nfrom datetime import datetime\n\n\ndef SampleVisualization_AE(path_model, path_image):\n \"\"\" Visualize sample distribution using PCA.\n The result image will be saved on 'Results_%Y%m%d_%H%M%S'\n\n Input\n ______\n path_model: path of trained model\n path_image: path of test image\n\n Output\n ______\n \"\"\"\n\n tf.enable_eager_execution()\n\n time_start = time.time()\n\n # Extract Features using trained network\n # Load model\n input_shape = (17 * 17)\n\n initial_model_CT = NetworkKeras.create_autoencoder(input_shape)\n initial_model_PT = NetworkKeras.create_autoencoder(input_shape)\n\n trained_model_CT = NetworkKeras.create_autoencoder(input_shape)\n trained_model_CT.load_weights(path_model + 'CT')\n\n trained_model_PT = NetworkKeras.create_autoencoder(input_shape)\n trained_model_PT.load_weights(path_model + 'PT')\n\n # Make feature extraction model\n initial_extractor_CT = tf.keras.models.Model(inputs=initial_model_CT.input,\n outputs=initial_model_CT.get_layer('tf_op_layer_l2_normalize').output)\n initial_extractor_PT = tf.keras.models.Model(inputs=initial_model_PT.input,\n outputs=initial_model_PT.get_layer('tf_op_layer_l2_normalize_2').output)\n\n feature_extractor_CT = tf.keras.models.Model(inputs=trained_model_CT.input,\n outputs=trained_model_CT.get_layer('tf_op_layer_l2_normalize_4').output)\n feature_extractor_PT = tf.keras.models.Model(inputs=trained_model_PT.input,\n outputs=trained_model_PT.get_layer('tf_op_layer_l2_normalize_6').output)\n\n # Load Images\n ind_CT = [[230, 380], [150, 370]]\n ind_PT = [[230, 380], [150, 370]]\n\n # Make Results Folder\n now = datetime.now()\n path_result = f\"./Results_{now.strftime('%Y%m%d_%H%M%S')}/\"\n os.makedirs(path_result)\n\n # Print Patients Number\n patient_dir = os.listdir(path_image)\n print(f'Patients Number: {len(patient_dir)}')\n\n for path_patient in patient_dir:\n addr_patient = f'{path_image}/{path_patient}/'\\\n\n img_CT, img_PT = PatchExtraction.stackImages(addr_patient, ind_CT, ind_PT)\n patches_CT, patches_PT = PatchExtraction.patch_extraction_thres(img_CT, img_PT, 0)\n\n # Extract Features using initial network\n print(f\"Extract Features using initial network...\")\n features_init_CT = initial_extractor_CT.predict(patches_CT, steps=1)\n features_init_PT = initial_extractor_PT.predict(patches_PT, steps=1)\n features_init = np.hstack((features_init_CT, features_init_PT))\n\n # Extract Features\n print(f\"Extract Features...\")\n features_CT = feature_extractor_CT.predict(patches_CT, steps=1)\n features_PT = feature_extractor_PT.predict(patches_PT, steps=1)\n features = np.hstack((features_CT, features_PT))\n\n # Using K-means\n print(f\"K-means Clustering...\")\n num_labels = 5\n model_k_means = KMeans(n_clusters=num_labels, random_state=0)\n model_k_means.fit(features)\n\n # Merging Patches\n num_x = 44\n num_y = 30\n stride = 5\n\n label_predict = model_k_means.fit_predict(features)\n label_predict_batch = label_predict.reshape((-1, num_y * num_x))\n\n # Dimension reduction using PCA\n pca = PCA(n_components=2)\n features_low = pca.fit_transform(features)\n features_init_low = pca.transform(features_init)\n\n colors = ['salmon', 'orange', 'steelblue', 'violet', 'khaki']\n fig, ax = plt.subplots(2, figsize=(5, 5), constrained_layout=True)\n\n for i in range(5):\n data_init = features_init_low[label_predict == i]\n X_init = data_init[:, 0]\n Y_init = data_init[:, 1]\n ax[0].scatter(X_init, Y_init, color=colors[i], label=i, s=1)\n\n data = features_low[label_predict == i]\n X = data[:, 0]\n Y = data[:, 1]\n ax[1].scatter(X, Y, color=colors[i], label=i, s=1)\n\n ax[0].legend(loc='best')\n ax[0].set_xticks([])\n ax[0].set_yticks([])\n ax[1].legend(loc='best')\n ax[1].set_xticks([])\n ax[1].set_yticks([])\n\n fig.suptitle('Distribution of patches')\n plt.savefig(f\"{path_result}Plot_{path_patient}.png\", format='png', dpi=300)\n\n time_end = time.time()\n print(f\"Evaluation Finished! Elapsed time: {time_end - time_start}\")\n" ]
[ [ "numpy.hstack", "tensorflow.enable_eager_execution", "sklearn.cluster.KMeans", "matplotlib.pyplot.subplots", "matplotlib.pyplot.savefig", "sklearn.decomposition.PCA" ] ]
francois-rozet/spiq
[ "a2e68c38da9129c85867e77641ed29d88e84c9d7" ]
[ "piqa/fsim.py" ]
[ "r\"\"\"Feature Similarity (FSIM)\n\nThis module implements the FSIM in PyTorch.\n\nOriginal:\n https://www4.comp.polyu.edu.hk/~cslzhang/IQA/FSIM/FSIM.htm\n\nReferences:\n .. [Zhang2011] FSIM: A Feature Similarity Index for Image Quality Assessment (Zhang et al., 2011)\n\n .. [Kovesi1999] Image Features From Phase Congruency (Kovesi, 1999)\n\"\"\"\n\nimport math\nimport torch\nimport torch.fft as fft\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom torch import Tensor\n\nfrom .utils import _jit, assert_type, reduce_tensor\nfrom .utils import complex as cx\nfrom .utils.color import ColorConv\nfrom .utils.functional import (\n scharr_kernel,\n gradient_kernel,\n filter_grid,\n log_gabor,\n channel_conv,\n l2_norm,\n)\n\n\n@_jit\ndef fsim(\n x: Tensor,\n y: Tensor,\n pc_x: Tensor,\n pc_y: Tensor,\n kernel: Tensor,\n value_range: float = 1.,\n t1: float = 0.85,\n t2: float = 160. / (255. ** 2),\n t3: float = 200. / (255. ** 2),\n t4: float = 200. / (255. ** 2),\n lmbda: float = 0.03,\n) -> Tensor:\n r\"\"\"Returns the FSIM between :math:`x` and :math:`y`,\n without color space conversion and downsampling.\n\n Args:\n x: An input tensor, :math:`(N, 3 \\text{ or } 1, H, W)`.\n y: A target tensor, :math:`(N, 3 \\text{ or } 1, H, W)`.\n pc_x: The input phase congruency, :math:`(N, H, W)`.\n pc_y: The target phase congruency, :math:`(N, H, W)`.\n kernel: A gradient kernel, :math:`(2, 1, K, K)`.\n value_range: The value range :math:`L` of the inputs (usually `1.` or `255`).\n\n Note:\n For the remaining arguments, refer to [Zhang2011]_.\n\n Returns:\n The FSIM vector, :math:`(N,)`.\n\n Example:\n >>> x = torch.rand(5, 3, 256, 256)\n >>> y = torch.rand(5, 3, 256, 256)\n >>> filters = pc_filters(x)\n >>> pc_x = phase_congruency(x[:, :1], filters)\n >>> pc_y = phase_congruency(y[:, :1], filters)\n >>> kernel = gradient_kernel(scharr_kernel())\n >>> l = fsim(x, y, pc_x, pc_y, kernel)\n >>> l.size()\n torch.Size([5])\n \"\"\"\n\n t2 *= value_range ** 2\n t3 *= value_range ** 2\n t4 *= value_range ** 2\n\n y_x, y_y = x[:, :1], y[:, :1]\n\n # Phase congruency similarity\n pc_m = torch.max(pc_x, pc_y)\n s_pc = (2 * pc_x * pc_y + t1) / (pc_x ** 2 + pc_y ** 2 + t1)\n\n # Gradient magnitude similarity\n pad = kernel.size(-1) // 2\n\n g_x = l2_norm(channel_conv(y_x, kernel, padding=pad), dims=[1])\n g_y = l2_norm(channel_conv(y_y, kernel, padding=pad), dims=[1])\n\n s_g = (2 * g_x * g_y + t2) / (g_x ** 2 + g_y ** 2 + t2)\n\n # Chrominance similarity\n s_l = s_pc * s_g\n\n if x.size(1) == 3:\n i_x, i_y = x[:, 1], y[:, 1]\n q_x, q_y = x[:, 2], y[:, 2]\n\n s_i = (2 * i_x * i_y + t3) / (i_x ** 2 + i_y ** 2 + t3)\n s_q = (2 * q_x * q_y + t4) / (q_x ** 2 + q_y ** 2 + t4)\n\n s_iq = s_i * s_q\n s_iq = cx.complx(s_iq, torch.zeros_like(s_iq))\n s_iq_lambda = cx.real(cx.pow(s_iq, lmbda))\n\n s_l = s_l * s_iq_lambda\n\n # Feature similarity\n fs = (s_l * pc_m).sum(dim=(-1, -2)) / pc_m.sum(dim=(-1, -2))\n\n return fs\n\n\n@_jit\ndef pc_filters(\n x: Tensor,\n scales: int = 4,\n orientations: int = 4,\n wavelength: float = 6.,\n factor: float = 2.,\n sigma_f: float = 0.5978, # -log(0.55)\n sigma_theta: float = 0.6545, # pi / (4 * 1.2)\n) -> Tensor:\n r\"\"\"Returns the log-Gabor filters for :func:`phase_congruency`.\n\n Args:\n x: An input tensor, :math:`(*, H, W)`.\n scales: The number of scales, :math:`S_1`.\n orientations: The number of orientations, :math:`S_2`.\n\n Note:\n For the remaining arguments, refer to [Kovesi1999]_.\n\n Returns:\n The filters tensor, :math:`(S_1, S_2, H, W)`.\n \"\"\"\n\n r, theta = filter_grid(x)\n\n # Low-pass filter\n lowpass = 1 / (1 + (r / 0.45) ** (2 * 15))\n\n # Radial\n radial = []\n\n for i in range(scales):\n f_0 = 1 / (wavelength * factor ** i)\n lg = log_gabor(r, f_0, sigma_f)\n radial.append(lg)\n\n radial = torch.stack(radial)\n\n # Angular\n cos_theta = torch.cos(theta)\n sin_theta = torch.sin(theta)\n\n theta_j = math.pi * torch.arange(orientations).to(x) / orientations\n theta_j = theta_j.reshape(orientations, 1, 1)\n\n ## Measure (theta - theta_j) in the sine/cosine domains\n ## to prevent wrap-around errors\n delta_sin = sin_theta * theta_j.cos() - cos_theta * theta_j.sin()\n delta_cos = cos_theta * theta_j.cos() + sin_theta * theta_j.sin()\n delta_theta = torch.atan2(delta_sin, delta_cos)\n\n angular = torch.exp(-delta_theta ** 2 / (2 * sigma_theta ** 2))\n\n # Combination\n filters = lowpass * radial[:, None] * angular[None, :]\n\n return filters\n\n\n@_jit\ndef phase_congruency(\n x: Tensor,\n filters: Tensor,\n value_range: float = 1.,\n k: float = 2.,\n rescale: float = 1.7,\n eps: float = 1e-8,\n) -> Tensor:\n r\"\"\"Returns the Phase Congruency (PC) of :math:`x`.\n\n Args:\n x: An input tensor, :math:`(N, 1, H, W)`.\n filters: The frequency domain filters, :math:`(S_1, S_2, H, W)`.\n value_range: The value range :math:`L` of the input (usually `1.` or `255`).\n\n Note:\n For the remaining arguments, refer to [Kovesi1999]_.\n\n Returns:\n The PC tensor, :math:`(N, H, W)`.\n\n Example:\n >>> x = torch.rand(5, 1, 256, 256)\n >>> filters = pc_filters(x)\n >>> pc = phase_congruency(x, filters)\n >>> pc.size()\n torch.Size([5, 256, 256])\n \"\"\"\n\n x = x * (255. / value_range)\n\n # Filters\n M_hat = filters\n M = fft.ifft2(M_hat)\n M = cx.real(torch.view_as_real(M))\n\n # Even & odd (real and imaginary) responses\n eo = fft.ifft2(fft.fft2(x[:, None]) * M_hat)\n eo = torch.view_as_real(eo)\n\n # Amplitude\n A = cx.mod(eo)\n\n # Expected E^2\n A2 = A[:, 0] ** 2\n median_A2, _ = A2.flatten(-2).median(dim=-1)\n expect_A2 = median_A2 / math.log(2)\n\n expect_M2_hat = (M_hat[0] ** 2).mean(dim=(-1, -2))\n expect_MiMj = (M[:, None] * M[None, :]).sum(dim=(0, 1, 3, 4))\n\n expect_E2 = expect_A2 * expect_MiMj / expect_M2_hat\n\n # Threshold\n sigma_G = expect_E2.sqrt()\n mu_R = sigma_G * (math.pi / 2) ** 0.5\n sigma_R = sigma_G * (2 - math.pi / 2) ** 0.5\n\n T = mu_R + k * sigma_R\n T = T / rescale # emprirical rescaling\n T = T[..., None, None]\n\n # Phase deviation\n FH = eo.sum(dim=1, keepdim=True)\n phi_eo = FH / (cx.mod(FH)[..., None] + eps)\n\n E = cx.dot(eo, phi_eo) - cx.dot(eo, cx.turn(phi_eo)).abs()\n E = E.sum(dim=1)\n\n # Phase congruency\n pc = (E - T).relu().sum(dim=1) / (A.sum(dim=(1, 2)) + eps)\n\n return pc\n\n\nclass FSIM(nn.Module):\n r\"\"\"Creates a criterion that measures the FSIM\n between an input and a target.\n\n Before applying :func:`fsim`, the input and target are converted from\n RBG to Y(IQ) and downsampled by a factor :math:`\\frac{\\min(H, W)}{256}`.\n\n Args:\n chromatic: Whether to use the chromatic channels (IQ) or not.\n downsample: Whether downsampling is enabled or not.\n kernel: A gradient kernel, :math:`(2, 1, K, K)`.\n If `None`, use the Scharr kernel instead.\n reduction: Specifies the reduction to apply to the output:\n `'none'` | `'mean'` | `'sum'`.\n\n Note:\n `**kwargs` are passed to :func:`fsim`.\n\n Shapes:\n input: :math:`(N, 3, H, W)`\n target: :math:`(N, 3, H, W)`\n output: :math:`(N,)` or :math:`()` depending on `reduction`\n\n Example:\n >>> criterion = FSIM().cuda()\n >>> x = torch.rand(5, 3, 256, 256, requires_grad=True).cuda()\n >>> y = torch.rand(5, 3, 256, 256).cuda()\n >>> l = 1 - criterion(x, y)\n >>> l.size()\n torch.Size([])\n >>> l.backward()\n \"\"\"\n\n def __init__(\n self,\n chromatic: bool = True,\n downsample: bool = True,\n kernel: Tensor = None,\n reduction: str = 'mean',\n **kwargs,\n ):\n super().__init__()\n\n if kernel is None:\n kernel = gradient_kernel(scharr_kernel())\n\n self.register_buffer('kernel', kernel)\n self.register_buffer('filters', torch.zeros((0, 0, 0, 0)))\n\n self.convert = ColorConv('RGB', 'YIQ' if chromatic else 'Y')\n self.downsample = downsample\n self.reduction = reduction\n self.value_range = kwargs.get('value_range', 1.)\n self.kwargs = kwargs\n\n def forward(self, input: Tensor, target: Tensor) -> Tensor:\n assert_type(\n input, target,\n device=self.kernel.device,\n dim_range=(4, 4),\n n_channels=3,\n value_range=(0., self.value_range),\n )\n\n # Downsample\n if self.downsample:\n _, _, h, w = input.size()\n M = round(min(h, w) / 256)\n\n if M > 1:\n input = F.avg_pool2d(input, kernel_size=M, ceil_mode=True)\n target = F.avg_pool2d(target, kernel_size=M, ceil_mode=True)\n\n # RGB to Y(IQ)\n input = self.convert(input)\n target = self.convert(target)\n\n # Phase congruency\n if self.filters.shape[-2:] != input.shape[-2:]:\n self.filters = pc_filters(input)\n\n pc_input = phase_congruency(input[:, :1], self.filters, self.value_range)\n pc_target = phase_congruency(target[:, :1], self.filters, self.value_range)\n\n # FSIM\n l = fsim(input, target, pc_input, pc_target, kernel=self.kernel, **self.kwargs)\n\n return reduce_tensor(l, self.reduction)\n" ]
[ [ "torch.cos", "torch.view_as_real", "torch.max", "torch.sin", "torch.fft.fft2", "torch.zeros", "torch.nn.functional.avg_pool2d", "torch.zeros_like", "torch.fft.ifft2", "torch.exp", "torch.arange", "torch.stack", "torch.atan2" ] ]
ludysama/crp
[ "08027b67f174426ddac5eef8186349e8337481fc" ]
[ "solo/methods/nnsiam.py" ]
[ "# Copyright 2021 solo-learn development team.\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy of\n# this software and associated documentation files (the \"Software\"), to deal in\n# the Software without restriction, including without limitation the rights to use,\n# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is furnished to do so,\n# subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all copies\n# or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE\n# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nimport argparse\nfrom typing import Any, Dict, List, Sequence, Tuple\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom solo.losses.simsiam import simsiam_loss_func\nfrom solo.methods.base import BaseMethod\nfrom solo.utils.misc import gather\n\n\nclass NNSiam(BaseMethod):\n def __init__(\n self,\n proj_output_dim: int,\n proj_hidden_dim: int,\n pred_hidden_dim: int,\n queue_size: int,\n **kwargs,\n ):\n \"\"\"Implements NNSiam (https://arxiv.org/abs/2104.14548).\n\n Args:\n proj_output_dim (int): number of dimensions of projected features.\n proj_hidden_dim (int): number of neurons of the hidden layers of the projector.\n pred_hidden_dim (int): number of neurons of the hidden layers of the predictor.\n queue_size (int): number of samples to keep in the queue.\n \"\"\"\n\n super().__init__(**kwargs)\n\n self.queue_size = queue_size\n\n # projector\n self.projector = nn.Sequential(\n nn.Linear(self.features_dim, proj_hidden_dim, bias=False),\n nn.BatchNorm1d(proj_hidden_dim),\n nn.ReLU(),\n nn.Linear(proj_hidden_dim, proj_hidden_dim, bias=False),\n nn.BatchNorm1d(proj_hidden_dim),\n nn.ReLU(),\n nn.Linear(proj_hidden_dim, proj_output_dim),\n nn.BatchNorm1d(proj_output_dim, affine=False),\n )\n self.projector[6].bias.requires_grad = False # hack: not use bias as it is followed by BN\n\n # predictor\n self.predictor = nn.Sequential(\n nn.Linear(proj_output_dim, pred_hidden_dim, bias=False),\n nn.BatchNorm1d(pred_hidden_dim),\n nn.ReLU(),\n nn.Linear(pred_hidden_dim, proj_output_dim),\n )\n\n # queue\n self.register_buffer(\"queue\", torch.randn(self.queue_size, proj_output_dim))\n self.register_buffer(\"queue_y\", -torch.ones(self.queue_size, dtype=torch.long))\n self.queue = F.normalize(self.queue, dim=1)\n self.register_buffer(\"queue_ptr\", torch.zeros(1, dtype=torch.long))\n\n @staticmethod\n def add_model_specific_args(parent_parser: argparse.ArgumentParser) -> argparse.ArgumentParser:\n parent_parser = super(NNSiam, NNSiam).add_model_specific_args(parent_parser)\n parser = parent_parser.add_argument_group(\"nnsiam\")\n\n # projector\n parser.add_argument(\"--proj_output_dim\", type=int, default=128)\n parser.add_argument(\"--proj_hidden_dim\", type=int, default=2048)\n\n # predictor\n parser.add_argument(\"--pred_hidden_dim\", type=int, default=512)\n\n # queue settings\n parser.add_argument(\"--queue_size\", default=65536, type=int)\n\n return parent_parser\n\n @property\n def learnable_params(self) -> List[dict]:\n \"\"\"Adds projector and predictor parameters to the parent's learnable parameters.\n\n Returns:\n List[dict]: list of learnable parameters.\n \"\"\"\n\n extra_learnable_params: List[dict] = [\n {\"params\": self.projector.parameters()},\n {\"params\": self.predictor.parameters(), \"static_lr\": True},\n ]\n return super().learnable_params + extra_learnable_params\n\n @torch.no_grad()\n def dequeue_and_enqueue(self, z: torch.Tensor, y: torch.Tensor):\n \"\"\"Adds new samples and removes old samples from the queue in a fifo manner. Also stores\n the labels of the samples.\n\n Args:\n z (torch.Tensor): batch of projected features.\n y (torch.Tensor): labels of the samples in the batch.\n \"\"\"\n\n z = gather(z)\n y = gather(y)\n\n batch_size = z.shape[0]\n\n ptr = int(self.queue_ptr) # type: ignore\n assert self.queue_size % batch_size == 0\n\n self.queue[ptr : ptr + batch_size, :] = z\n self.queue_y[ptr : ptr + batch_size] = y # type: ignore\n ptr = (ptr + batch_size) % self.queue_size\n\n self.queue_ptr[0] = ptr # type: ignore\n\n @torch.no_grad()\n def find_nn(self, z: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Finds the nearest neighbor of a sample.\n\n Args:\n z (torch.Tensor): a batch of projected features.\n\n Returns:\n Tuple[torch.Tensor, torch.Tensor]:\n indices and projected features of the nearest neighbors.\n \"\"\"\n\n idx = (z @ self.queue.T).max(dim=1)[1]\n nn = self.queue[idx]\n return idx, nn\n\n def forward(self, X: torch.Tensor, *args, **kwargs) -> Dict[str, Any]:\n \"\"\"Performs the forward pass of the encoder, the projector and the predictor.\n\n Args:\n X (torch.Tensor): a batch of images in the tensor format.\n\n Returns:\n Dict[str, Any]:\n a dict containing the outputs of the parent\n and the projected and predicted features.\n \"\"\"\n\n out = super().forward(X, *args, **kwargs)\n z = self.projector(out[\"feats\"])\n p = self.predictor(z)\n return {**out, \"z\": z, \"p\": p}\n\n def training_step(self, batch: Sequence[Any], batch_idx: int) -> torch.Tensor:\n \"\"\"Training step for NNSiam reusing BaseMethod training step.\n\n Args:\n batch (Sequence[Any]): a batch of data in the format of [img_indexes, [X], Y], where\n [X] is a list of size self.num_crops containing batches of images\n batch_idx (int): index of the batch\n\n Returns:\n torch.Tensor: total loss composed of SimSiam loss and classification loss\n \"\"\"\n\n targets = batch[-1]\n\n out = super().training_step(batch, batch_idx)\n class_loss = out[\"loss\"]\n feats1, feats2 = out[\"feats\"]\n\n z1 = self.projector(feats1)\n z2 = self.projector(feats2)\n\n p1 = self.predictor(z1)\n p2 = self.predictor(z2)\n\n z1 = F.normalize(z1, dim=-1)\n z2 = F.normalize(z2, dim=-1)\n\n # find nn\n idx1, nn1 = self.find_nn(z1)\n _, nn2 = self.find_nn(z2)\n\n # ------- negative cosine similarity loss -------\n neg_cos_sim = simsiam_loss_func(p1, nn2) / 2 + simsiam_loss_func(p2, nn1) / 2\n\n # compute nn accuracy\n b = targets.size(0)\n nn_acc = (targets == self.queue_y[idx1]).sum() / b\n\n # dequeue and enqueue\n self.dequeue_and_enqueue(z1, targets)\n\n # calculate std of features\n z1_std = F.normalize(z1, dim=-1).std(dim=0).mean()\n z2_std = F.normalize(z2, dim=-1).std(dim=0).mean()\n z_std = (z1_std + z2_std) / 2\n\n metrics = {\n \"train_neg_cos_sim\": neg_cos_sim,\n \"train_z_std\": z_std,\n \"train_nn_acc\": nn_acc,\n }\n self.log_dict(metrics, on_epoch=True, sync_dist=True)\n\n return neg_cos_sim + class_loss\n" ]
[ [ "torch.nn.functional.normalize", "torch.nn.BatchNorm1d", "torch.ones", "torch.zeros", "torch.randn", "torch.nn.Linear", "torch.no_grad", "torch.nn.ReLU" ] ]
Pabsm94/Easyplume
[ "ee54194c1c0930b2a0ef442c47f80bd4570913d2" ]
[ "src/HYPERPLUME/hyperplume.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 22 14:07:39 2016\n\n@author: pablo\n\"\"\"\n\nimport numpy as np \n\nimport abc\n\nimport matplotlib.pyplot as plt \n\nclass Hyperplume():\n \n \"\"\" Parent class Hyperplume loads target plasma and defines common attributes as well as\n shared methods in the AEM and SSM plume classes\"\"\"\n \n __metaclass__= abc.ABCMeta # Python decorator used to define abstract methods at any location in the class\n \n @abc.abstractclassmethod # Defining abstract method\n \n def solver(self):\n \n \"\"\"Solver Abstract Method to be particularised by each Plume code. It is only defined for\n structure purposes in parent class Hyperplume\"\"\"\n \n return \n \n @abc.abstractclassmethod\n \n def query(self,z,r):\n \n \"\"\"Query abstract method returns plasma profile data at specified grid points. query method is\n to be particularised by each plume code.It is only defined forstructure purposes \n in parent class Hyperplume\"\"\"\n \n return \n \n def __init__(self,plasma={'Electrons': {'Gamma': 1,'T_0_electron': 2.1801714e-19,'q_electron': -1.6e-19},'Ions': {'mass_ion': 2.1801714e-25, 'q_ion': 1.6e-19}},z_span=np.linspace(0,100,500),r_span=np.linspace(0,40,500),n_init=0.0472*np.linspace(1,0,500)**2):\n \n \"\"\" plume_constructor loads common class properties for AEM and SSM plume classes\n \n Args:\n plasma (dict): simple_plasma object dictionary containing basic plasma parameters.\n z_span (numpy.ndarray): axial region where the problem will be integrated.\n r_span (numpy.ndarray): initial far-field plasma radial profile.\n n_init (numpy.ndarray): initial dimensional density front.\n \n Usage:\n >>> Plasma = {'Electrons': {'Gamma': 1,'T_0_electron': 2.1801714e-19,'q_electron': -1.6e-19},'Ions': {'mass_ion': 2.1801714e-25, 'q_ion': 1.6e-19}}\n >>> z_span = np.linspace(0,100,100)\n >>> r0 = np.linspace(0,3,100)\n >>> n0 = np.exp(-6.15/2*r_span**2)\n >>> Plume = Hyperplume(Plasma,z_span,r0,n0)\n \"\"\"\n \n self.plasma = plasma\n self.Gamma = plasma['Electrons']['Gamma']\n self.T_0 = plasma['Electrons']['T_0_electron']\n self.m_ion = plasma['Ions']['mass_ion']\n self.q_ion = plasma['Ions']['q_ion']\n self.z_span = z_span\n self.eta = r_span\n self.n0 = n_init\n \n \n def simple_plasma(self,charge=1.6e-19,ion_mass=2.1801714e-25,init_plasma_temp=2.1801714e-19,Gamma=1):\n \n \"\"\" Method simple_plasma allows the user to quickly create a Plasma dictionary with two particle species (ions and electrons), \n and well defined attributes.\n \n Args:\n charge (float): Electron charge given dimensional in units [C]\n ion_mass(float): Ion mass given in dimensional units [Kg]\n init_plasma_temp(float): Initial plasma temperature given in dimensional units [J]\n Gamma(int or float): Dimensionless thermal expansion constant. Must be inside isothermal and polytropic boundaries [1,5/3]\n \n Returns:\n plasma (dict): Dictionary containing two simple plasma species (ions and electrons) with the before mentioned\n properties stored in favorable form \n Usage: \n >>> Plasma = Hyperplume().simple_plasma(charge=1.6e-19,ion_mass=2.1801714e-25,init_plasma_temp=2.1801714e-19,Gamma=1)\n \n \"\"\"\n \n if Gamma < 1 or Gamma > 2: #checking thermal expansion model\n \n print ('Gamma is outside isothermal or polytropic boundaries')\n \n else:\n \n plasma={'Ions':{'mass_ion': ion_mass,'q_ion':charge}, 'Electrons':{'q_electron': -charge,'T_0_electron':init_plasma_temp,'Gamma':Gamma} }\n \n return plasma \n \n def temp(self,n,n_0,T_0,Gamma):\n \n \"\"\" Method temp calculates plasma temperature (T) as function of plasma density (n)\n \n Args:\n n(int or np.ndarray): plasma density at specific (z,r) location in the plume grid\n n_0 (int):Iinitial density of plasma\n T_0 (float): Initial temperature of plasma\n Gamma (int): Dimensionless thermal expansion constant\n \n Returns:\n T (float or np.ndarray): Temperature of plasma at targeted (z,r) grid points in plume\n \n Usage:\n >>> T = Hyperplume().temp(n=0.65,n_0=1,T_0=2.1801714e-19,Gamma=1)\n \n \"\"\"\n \n if Gamma == 1: #Checking expansion model\n \n T = T_0*(n*0 + 1)\n \n else:\n \n T = T_0*((n/n_0)**(Gamma-1))\n \n return T\n \n \n def phi (self,n,n_0,T_0,Gamma,e_charge):\n \n \"\"\"Method phi calculates electric potential (\\phi) as function of plasma density (n)\n \n Args:\n n(int or np.ndarray): plasma density at specific (z,r) location in the plume grid\n n_0 (int):Iinitial density of plasma\n T_0 (float): Initial temperature of plasma\n Gamma (int): Dimensionless thermal expansion constant\n e_charge (float):Electron charge\n \n Returns:\n phi(float or np.ndarray): Electric potential of plasma at (z,r) targeted grid point\n \n Usage:\n >>> phi = Hyperplume().phi(n=0.65,n_0=1,T_0=2.1801714e-19,Gamma=1,e_charge=-1.6e-19)\n \n \"\"\"\n \n if Gamma == 1: #Checking expansion model\n \n phi = (T_0/e_charge)*np.log(n/n_0)\n \n else :\n \n phi = (T_0/e_charge)*(Gamma / ((Gamma - 1)) * ((n/n_0)**(Gamma-1)-1))\n \n return phi\n \n def n(self,n_0,T_0,phi,Gamma,e_charge):\n \n \"\"\"Method n calculates plasma density (n) as function of plasma potential (\\phi)\n \n Args:\n n_0 (int):Iinitial density of plasma\n T_0 (float): Initial temperature of plasma\n Gamma (int): Dimensionless thermal expansion constant\n e_charge (float):Electron charge\n Returns:\n n (float or numpy.ndarray): Pasma density at (z,r) targeted grid point in the plume.\n \n Usage:\n n = Hyperplume.n(n_0=1,T_0=2.1801714e-19,phi=-5.7,Gamma=1,e_charge=-1.6e-19)\n \n \"\"\"\n \n \n \n if Gamma == 1: #Checking expansion model\n \n n = n_0*np.exp(phi*e_charge/T_0)\n \n else:\n \n n = n_0*(((Gamma-1)/Gamma*phi*e_charge/T_0 + 1 )**1/(Gamma-1))\n \n return n\n \n def eta_deriver(self,x,y):\n \n \"\"\"Method eta_derivar calculates the numerical derivatives of the variables along eta, with a\n\n Args: \n x (np.ndarray): represents the derivative step (dx,dy)\n y (np.ndarray): vector to derive with respect to x\n \n Returns:\n y_prime(np.ndarray): derivaive of y over x stored in array format\n \n Usage:\n >>> x = np.array([0,0.5,1,1.2,2,2.3,2.6])\n >>> y = np.array([10,17,23,27,36,40,45])\n >>> dydx = Hyperplume.eta_deriver(x,y)\n \"\"\"\n \n dx = np.gradient(x)\n \n y_prime = np.gradient(y,dx)\n \n return y_prime\n \n def plot(self,z=np.array([15,20,25,30]),r=np.array([20,25,30,35]),var_name='n',contour_levels=[0,1,2,3,4,5,6,7,8]):\n \n \"\"\" Hyperplume Class method to plot the contours of important plasma variables along the specified (z,r) plume grid points\n \n Args:\n \n z (int,float, or np.ndarray): new interpolation axial region where plasma variabes are to be calculated and plotted. Must be inside z_grid limits\n r (int,float, or np.ndarray): new interpolation axial region where plasma variabes are to be calculated and plotted. Must be inside z_grid limits \n var_name (str): string containing the name of the variable to be visualized. Options are:\n 'lnn': logarithm of plasma density \n 'u_z': axial plume velocity\n 'u_r':radial plume velocity\n 'T': plasmaTemperature\n 'phi': ambipolar electric field\n 'eta': ion stream lines\n contour_levels (array or of list): contour lables of plasma varialbled at the targets z,r points.\n \n Returns:\n None\n \n Usage:\n >>> Plasma = Hyperplume().SIMPLE_plasma()\n >>> Plume = AEM()\n \n \"\"\"\n \n lnn,u_z,u_r,T,phi,error,eta = self.query(z,r) #Retrievibg plasma variables at z,r gid points \n \n fig = plt.figure()\n \n CE = plt.contour(z,r,eval(var_name),contour_levels) \n plt.title(var_name)\n plt.xlabel(r'$\\ z/R_0 $')\n plt.ylabel(r'$\\ r/R_0 $')\n plt.ylim(0,10)\n plt.clabel(CE,CE.levels,fontsize=6)\n \n plt.savefig(var_name + '.pdf',bbox_inches='tight')\n \n fig.show()\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n " ]
[ [ "numpy.log", "matplotlib.pyplot.clabel", "matplotlib.pyplot.title", "numpy.linspace", "numpy.gradient", "matplotlib.pyplot.ylim", "matplotlib.pyplot.savefig", "matplotlib.pyplot.ylabel", "numpy.exp", "matplotlib.pyplot.xlabel", "numpy.array", "matplotlib.pyplot.figure" ] ]
HaydenFaulkner/VidDet
[ "2dbc104a41bf1192a00ffde07695180eab18cea8" ]
[ "models/definitions/flownet/inference.py" ]
[ "import cv2\nimport mxnet as mx\nimport numpy as np\nfrom scipy.misc import imresize\nfrom tqdm import tqdm\n\nfrom flownet import get_flownet\nfrom utils import flow_to_image, crop, normalise\n\ndef process_two_images(model, imgs, ctx=None):\n \"\"\"\n Process two images into one flow image\n Args:\n model: The model to use\n imgs: a list of 2 images\n ctx: the model ctx\n\n Returns:\n\n \"\"\"\n if len(imgs) != 2:\n return None\n if isinstance(imgs[0], str):\n if os.path.exists(imgs[0]):\n imgs[0] = cv2.cvtColor(cv2.imread(files[i]), cv2.COLOR_BGR2RGB)\n else:\n return None\n if isinstance(imgs[1], str):\n if os.path.exists(imgs[1]):\n imgs[1] = cv2.cvtColor(cv2.imread(files[i]), cv2.COLOR_BGR2RGB)\n else:\n return None\n\n imgs = crop(imgs)\n imgs = np.array(imgs)\n imgs = np.moveaxis(imgs, -1, 1)\n imgs = normalise(imgs)\n\n imgs = mx.nd.array(imgs, ctx=ctx)\n imgs = mx.nd.expand_dims(imgs, 0) # add batch axis\n\n flow = model(imgs) # run the model\n\n flow = flow.asnumpy()\n flow = flow.squeeze()\n flow = flow.transpose(1, 2, 0)\n img = flow_to_image(flow)\n img = imresize(img, 4.0) # doing the bilinear interpolation on the img, NOT flow cause was too hard :'(\n\n return img, flow\n\n\ndef process_imagedir(model, input_dir, output_dir=None, ctx=None):\n \"\"\"\n Process a directory of images\n\n Args:\n model:\n input_dir:\n output_dir:\n ctx:\n\n Returns:\n\n \"\"\"\n\n files = []\n for ext in [\".jpg\", \".png\", \".jpeg\", \".JPG\", \".PNG\", \".JPEG\"]:\n files = glob.glob(input_dir + \"/**/*\" + ext, recursive=True)\n if len(files) > 0:\n break\n\n if not len(files) > 0:\n print(\"Couldn't find any files in {}\".format(input_dir))\n return None\n\n files.sort()\n\n for i in tqdm(range(len(files) - 1), desc='Calculating Flow'):\n img, flow = process_two_images(model, files[i:i+2], ctx)\n dir, file = os.path.split(files[i])\n if output_dir is None:\n output_dir = os.path.join(dir, 'flow')\n os.makedirs(output_dir, exists_ok=True)\n cv2.imwrite(os.path.join(output_dir, file), cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n\n return output_dir\n\n\ndef process_video(model, input_path, output_path=None, ctx=None):\n \"\"\"\n Process a video into a flow video\n\n Args:\n model:\n input_path:\n output_path:\n ctx:\n\n Returns:\n\n \"\"\"\n capture = cv2.VideoCapture(input_path)\n frames = []\n while_safety = 0\n while len(frames) < 200:# int(capture.get(cv2.CAP_PROP_FRAME_COUNT))-1:\n _, image = capture.read() # read an image from the capture\n\n if while_safety > 500: # break the while if our safety maxs out at 500\n break\n\n if image is None:\n while_safety += 1\n continue\n\n while_safety = 0 # reset the safety count\n frames.append(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))\n capture.release()\n\n if len(frames) < 2:\n return None\n\n if output_path is None:\n output_path = input_path[:-4] + '_flow.mp4'\n\n cropped_frames = crop(frames)\n h, w, _= cropped_frames[0].shape\n video = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc('m', 'p', '4', 'v'), 25, (w, h))\n\n for i in tqdm(range(len(frames)-1), desc='Calculating Flow'):\n mx.nd.waitall()\n img, flow = process_two_images(model, frames[i:i+2], ctx)\n video.write(img)\n\n video.release() # release the video\n\n return output_path\n\nif __name__ == '__main__':\n # just for debugging\n\n # save_path = \"models/definitions/flownet/weights/FlowNet2-S_checkpoint.params\"\n save_path = \"models/definitions/flownet/weights/FlowNet2-C_checkpoint.params\"\n\n ctx = mx.gpu(0)\n\n # net = get_flownet('S', pretrained=True, ctx=ctx)\n net = get_flownet('C', pretrained=True, ctx=ctx)\n net.hybridize()\n\n input_path = \"/path/to/test.mp4\"\n process_video(net, input_path, ctx=ctx)\n\n print(\"DONE\")\n" ]
[ [ "scipy.misc.imresize", "numpy.array", "numpy.moveaxis" ] ]
tolgadur/Sensor-Placement
[ "ad33477d1fb14052e1a9e58d149d0b8e767ea318" ]
[ "src/sensor_placement.py" ]
[ "#!/usr/bin/python\nimport numpy as np\nimport heapq\nimport pandas as pd\n\n\"\"\" FILE NAME: 'sensor_placement.py'\n DESCRIPTION: This file is implementing the class that will be used for sensor\n positioning according to solution proposed by Krause, Singh and Guestrin (2008).\n\"\"\"\n\nclass SensorPlacement:\n @staticmethod\n def isMonotonic(cov, k, V, S, U):\n \"\"\" This method checks if values in the dataset are monotonic or not. For\n datasets > 2000 observations, non-monotonicity might lead to suboptimal\n results.\n Input:\n - cov: covariance matrix\n - k: number of Sensors to be placed\n - V: indices of all position\n - S: indices of all possible sensor positions\n - U: indices of all impossible sensor positions\n \"\"\"\n A = np.array([])\n for j in range(k):\n S_A = np.setdiff1d(S, A).astype(int)\n for y in S_A:\n AHat = np.setdiff1d(V, np.append(A, [y]))\n condition = SensorPlacement.__conditionalEntropy(cov, y, A) - SensorPlacement.__conditionalEntropy(cov, y, AHat)\n if condition < 0:\n print(condition)\n return False\n return True\n\n @staticmethod\n def __conditionalVariance(cov, y, A):\n \"\"\" This method calculates the conditional variance of y given A. \"\"\"\n var = cov[y, y] - (cov[np.ix_([y], A)] @ np.linalg.inv(cov[np.ix_(A, A)]) @ cov[np.ix_(A, [y])])\n # var = np.absolute(cov[y, y] - (cov[np.ix_([y], A)] @ np.linalg.inv(cov[np.ix_(A, A)]) @ cov[np.ix_(A, [y])]))\n return var[0][0]\n\n @staticmethod\n def __conditionalEntropy(cov, y, A):\n \"\"\" This method calculates the conditional entropy of y given A. \"\"\"\n conditionalVariance = SensorPlacement.__conditionalVariance(cov, y, A)\n return 0.5 * np.log(2*np.pi*conditionalVariance)\n\n @staticmethod\n def __localConditionalEntropy(cov, y, A, epsilon):\n \"\"\" This method calculates the conditional entropy of y given A for\n all values where cov[y, A] > epsilon. \"\"\"\n A_ = SensorPlacement.__localSet(cov, y, A, epsilon)\n return SensorPlacement.__conditionalEntropy(cov, y, A_)\n\n @staticmethod\n def __localConditionalVariance(cov, y, A, epsilon):\n \"\"\" This method calculates the conditional variance of y given A for\n all values where cov[y, A] > epsilon. \"\"\"\n A_ = SensorPlacement.__localSet(cov, y, A, epsilon)\n return SensorPlacement.__conditionalVariance(cov, y, A_)\n\n @staticmethod\n def __localSet(cov, y, A, epsilon):\n \"\"\" This method returns the set of points X in S for which K(y*, x) > epsilon.\n Input:\n - cov: covariance matrix\n - S_i: array with all indices of i\n - epsilon: hyperparameter\n \"\"\"\n return [x for x in A if cov[y, x] > epsilon]\n\n @staticmethod\n def naiveSensorPlacement(cov, k, V, S, U, A, subdomain=None, output=None):\n \"\"\" This is an implementation of the first approximation method suggested in\n the 'Near-Optimal Sensor Placement' paper.\n Input:\n - cov: covariance matrix\n - k: number of Sensors to be placed\n - V: indices of all position\n - S: indices of all possible sensor positions\n - U: indices of all impossible sensor positions\n \"\"\"\n print('Algorithm is starting for subdomain', subdomain, flush=True)\n A = A\n\n for j in range(k):\n S_A = np.setdiff1d(S, A).astype(int)\n delta = np.array([])\n for y in S_A:\n AHat = np.setdiff1d(V, np.append(A, [y]))\n delta = np.append(delta, SensorPlacement.__conditionalVariance(cov, y, A) / \\\n SensorPlacement.__conditionalVariance(cov, y, AHat))\n y_star = S_A[np.argmax(delta)]\n A = np.append(A, y_star).astype(int)\n print('subdomain ', subdomain, ': ', A, flush=True)\n if subdomain != None:\n output.put((subdomain, 2*A))\n return 2*A\n\n @staticmethod\n def lazySensorPlacement(cov, k, V, S, U, A, subdomain=None, output=None):\n \"\"\" This is an implementation of the second approximation method suggested in\n the 'Near-Optimal Sensor Placement' paper. It uses a priority queue in order\n to reduce the time complexity from O(k*n^4) to O(k*n^3).\n Input:\n - cov: covariance matrix\n - k: number of Sensors to be placed\n - V: indices of all position\n - S: indices of all possible sensor positions\n - U: indices of all impossible sensor positions\n \"\"\"\n print('Algorithm is starting for subdomain', subdomain, flush=True)\n A = A\n\n delta = -1 * np.inf * np.ones((len(S), 1))\n heap = [(delta[i], S[i], -1) for i in range(len(delta))]\n heapq.heapify(heap)\n\n for j in range(k):\n while True:\n delta_star, y_star, current = heapq.heappop(heap)\n if current == j:\n break\n AHat = np.setdiff1d(V, np.append(A, [y_star]))\n criterion = SensorPlacement.__conditionalVariance(cov, y_star, A) / \\\n SensorPlacement.__conditionalVariance(cov, y_star, AHat)\n heapq.heappush(heap, (-1 * criterion, y_star, j))\n\n A = np.append(A, y_star).astype(int)\n print('subdomain ', subdomain, ': ', 2*A, flush=True)\n if subdomain != None:\n output.put((subdomain, 2*A))\n return 2*A\n\n @staticmethod\n def localKernelPlacement(cov, k, V, S, U, A, subdomain=None, output=None):\n \"\"\" This is an implementation of the third approximation method suggested in\n the 'Near-Optimal Sensor Placement' paper. It only considers local kernels\n in order to reduce the time complexity O(k*n).\n Input:\n - cov: covariance matrix\n - k: number of Sensors to be placed\n - V: indices of all position\n - S: indices of all possible sensor positions\n - U: indices of all impossible sensor positions\n \"\"\"\n print('Algorithm is starting for subdomain', subdomain, flush=True)\n A = A\n epsilon = 1e-10\n\n delta = np.array([]); N = S\n for y in S:\n V_y = np.setdiff1d(V, y).astype(int)\n delta = np.append(delta, cov[y, y] / SensorPlacement.__localConditionalVariance(cov, y, V_y, epsilon))\n\n for j in range(k):\n y_star = N[np.argmax(delta)]\n A = np.append(A, y_star).astype(int)\n print('subdomain ', subdomain, ': ', A, flush=True)\n\n N = SensorPlacement.__localSet(cov, y_star, S, epsilon)\n N = np.setdiff1d(S, A).astype(int)\n delta = np.array([])\n for y in N:\n AHat = np.setdiff1d(V, np.append(A, [y]))\n delta = np.append(delta, SensorPlacement.__localConditionalVariance(cov, y, A, epsilon) / \\\n SensorPlacement.__localConditionalVariance(cov, y, AHat, epsilon))\n\n if subdomain != None:\n output.put((subdomain, 2*A))\n return 2*A\n\n @staticmethod\n def lazyLocalKernelPlacement(cov, k, V, S, U, A, subdomain=None, output=None):\n \"\"\" This is a mix between the lazySensorPlacement method and the localKernelPlacement\n method.\n Input:\n - cov: covariance matrix\n - k: number of Sensors to be placed\n - V: indices of all position\n - S: indices of all possible sensor positions\n - U: indices of all impossible sensor positions\n \"\"\"\n print('Algorithm is starting for subdomain', subdomain, flush=True)\n A = A\n epsilon = 1e-10\n\n delta = -1 * np.inf * np.ones((len(S), 1))\n heap = [(delta[i], S[i], -1) for i in range(len(delta))]\n heapq.heapify(heap)\n\n for j in range(k):\n while True:\n delta_star, y_star, current = heapq.heappop(heap)\n if current == j:\n break\n AHat = np.setdiff1d(V, np.append(A, [y_star]))\n criterion = SensorPlacement.__localConditionalVariance(cov, y_star, A, epsilon) / \\\n SensorPlacement.__localConditionalVariance(cov, y_star, AHat, epsilon)\n heapq.heappush(heap, (-1 * criterion, y_star, j))\n\n A = np.append(A, y_star).astype(int)\n print('subdomain ', subdomain, ': ', A, flush=True)\n if subdomain != None:\n output.put((subdomain, 2*A))\n return 2*A\n" ]
[ [ "numpy.log", "numpy.ix_", "numpy.setdiff1d", "numpy.append", "numpy.argmax", "numpy.array" ] ]
tobon/nibabel
[ "ff2b5457207bb5fd6097b08f7f11123dc660fda7", "ff2b5457207bb5fd6097b08f7f11123dc660fda7" ]
[ "nibabel/minc2.py", "nibabel/tests/test_image_load_save.py" ]
[ "# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the NiBabel package for the\n# copyright and license terms.\n#\n### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\" Preliminary MINC2 support\n\nUse with care; I haven't tested this against a wide range of MINC files.\n\nIf you have a file that isn't read correctly, please send an example.\n\nTest reading with something like::\n\n import nibabel as nib\n img = nib.load('my_funny.mnc')\n data = img.get_data()\n print(data.mean())\n print(data.max())\n print(data.min())\n\nand compare against command line output of::\n\n mincstats my_funny.mnc\n\"\"\"\nimport numpy as np\n\nfrom .optpkg import optional_package\nh5py, have_h5py, setup_module = optional_package('h5py')\n\nfrom .minc1 import Minc1File, Minc1Image, MincError\n\n\nclass Hdf5Bunch(object):\n \"\"\" Make object for accessing attributes of variable\n \"\"\"\n def __init__(self, var):\n for name, value in var.attrs.items():\n setattr(self, name, value)\n\n\nclass Minc2File(Minc1File):\n ''' Class to wrap MINC2 format file\n\n Although it has some of the same methods as a ``Header``, we use\n this only when reading a MINC2 file, to pull out useful header\n information, and for the method of reading the data out\n '''\n def __init__(self, mincfile):\n self._mincfile = mincfile\n minc_part = mincfile['minc-2.0']\n # The whole image is the first of the entries in 'image'\n image = minc_part['image']['0']\n self._image = image['image']\n self._dim_names = self._get_dimensions(self._image)\n dimensions = minc_part['dimensions']\n self._dims = [Hdf5Bunch(dimensions[s]) for s in self._dim_names]\n # We don't currently support irregular spacing\n # http://en.wikibooks.org/wiki/MINC/Reference/MINC2.0_File_Format_Reference#Dimension_variable_attributes\n for dim in self._dims:\n if dim.spacing != b'regular__':\n raise ValueError('Irregular spacing not supported')\n self._spatial_dims = [name for name in self._dim_names\n if name.endswith('space')]\n self._image_max = image['image-max']\n self._image_min = image['image-min']\n\n def _get_dimensions(self, var):\n # Dimensions for a particular variable\n # Differs for MINC1 and MINC2 - see:\n # http://en.wikibooks.org/wiki/MINC/Reference/MINC2.0_File_Format_Reference#Associating_HDF5_dataspaces_with_MINC_dimensions\n return var.attrs['dimorder'].split(',')\n\n def get_data_dtype(self):\n return self._image.dtype\n\n def get_data_shape(self):\n return self._image.shape\n\n def _get_valid_range(self):\n ''' Return valid range for image data\n\n The valid range can come from the image 'valid_range' or\n failing that, from the data type range\n '''\n ddt = self.get_data_dtype()\n info = np.iinfo(ddt.type)\n try:\n valid_range = self._image.attrs['valid_range']\n except AttributeError:\n valid_range = [info.min, info.max]\n else:\n if valid_range[0] < info.min or valid_range[1] > info.max:\n raise ValueError('Valid range outside input '\n 'data type range')\n return np.asarray(valid_range, dtype=np.float)\n\n def get_scaled_data(self):\n data = np.asarray(self._image)\n return self._normalize(data)\n\n\nclass Minc2Image(Minc1Image):\n ''' Class for MINC2 images\n\n The MINC2 image class uses the default header type, rather than a\n specific MINC header type - and reads the relevant information from\n the MINC file on load.\n '''\n # MINC2 does not do compressed whole files\n _compressed_exts = ()\n\n @classmethod\n def from_file_map(klass, file_map):\n holder = file_map['image']\n if holder.filename is None:\n raise MincError('MINC2 needs filename for load')\n minc_file = Minc2File(h5py.File(holder.filename, 'r'))\n affine = minc_file.get_affine()\n if affine.shape != (4, 4):\n raise MincError('Image does not have 3 spatial dimensions')\n data_dtype = minc_file.get_data_dtype()\n shape = minc_file.get_data_shape()\n zooms = minc_file.get_zooms()\n header = klass.header_class(data_dtype, shape, zooms)\n data = klass.ImageArrayProxy(minc_file)\n return klass(data, affine, header, extra=None, file_map=file_map)\n\n\nload = Minc2Image.load\n", "# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the NiBabel package for the\n# copyright and license terms.\n#\n### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n''' Tests for loader function '''\nfrom __future__ import division, print_function, absolute_import\nfrom os.path import join as pjoin, dirname\nimport shutil\nfrom tempfile import mkdtemp\nfrom ..externals.six import BytesIO\n\nimport numpy as np\n\n# If we don't have scipy, then we cannot write SPM format files\ntry:\n import scipy.io\nexcept ImportError:\n have_scipy = False\nelse:\n have_scipy = True\n\n\nfrom .. import analyze as ana\nfrom .. import spm99analyze as spm99\nfrom .. import spm2analyze as spm2\nfrom .. import nifti1 as ni1\nfrom .. import loadsave as nils\nfrom .. import (Nifti1Image, Nifti1Header, Nifti1Pair, Nifti2Image, Nifti2Pair,\n Minc1Image, Minc2Image, Spm2AnalyzeImage, Spm99AnalyzeImage,\n AnalyzeImage, MGHImage, class_map)\n\nfrom ..tmpdirs import InTemporaryDirectory\n\nfrom ..volumeutils import native_code, swapped_code\n\nfrom numpy.testing import assert_array_equal, assert_array_almost_equal\nfrom nose.tools import assert_true, assert_equal, assert_raises\n\nDATA_PATH = pjoin(dirname(__file__), 'data')\nMGH_DATA_PATH = pjoin(dirname(__file__), '..', 'freesurfer', 'tests', 'data')\n\n\ndef round_trip(img):\n # round trip a nifti single\n sio = BytesIO()\n img.file_map['image'].fileobj = sio\n img.to_file_map()\n img2 = Nifti1Image.from_file_map(img.file_map)\n return img2\n\n\ndef test_conversion():\n shape = (2, 4, 6)\n affine = np.diag([1, 2, 3, 1])\n for npt in np.float32, np.int16:\n data = np.arange(np.prod(shape), dtype=npt).reshape(shape)\n for r_class_def in class_map.values():\n r_class = r_class_def['class']\n img = r_class(data, affine)\n img.set_data_dtype(npt)\n for w_class_def in class_map.values():\n w_class = w_class_def['class']\n img2 = w_class.from_image(img)\n assert_array_equal(img2.get_data(), data)\n assert_array_equal(img2.get_affine(), affine)\n\n\ndef test_save_load_endian():\n shape = (2, 4, 6)\n affine = np.diag([1, 2, 3, 1])\n data = np.arange(np.prod(shape), dtype='f4').reshape(shape)\n # Native endian image\n img = Nifti1Image(data, affine)\n assert_equal(img.get_header().endianness, native_code)\n img2 = round_trip(img)\n assert_equal(img2.get_header().endianness, native_code)\n assert_array_equal(img2.get_data(), data)\n # byte swapped endian image\n bs_hdr = img.get_header().as_byteswapped()\n bs_img = Nifti1Image(data, affine, bs_hdr)\n assert_equal(bs_img.get_header().endianness, swapped_code)\n # of course the data is the same because it's not written to disk\n assert_array_equal(bs_img.get_data(), data)\n # Check converting to another image\n cbs_img = AnalyzeImage.from_image(bs_img)\n # this will make the header native by doing the header conversion\n cbs_hdr = cbs_img.get_header()\n assert_equal(cbs_hdr.endianness, native_code)\n # and the byte order follows it back into another image\n cbs_img2 = Nifti1Image.from_image(cbs_img)\n cbs_hdr2 = cbs_img2.get_header()\n assert_equal(cbs_hdr2.endianness, native_code)\n # Try byteswapped round trip\n bs_img2 = round_trip(bs_img)\n bs_data2 = bs_img2.get_data()\n # now the data dtype was swapped endian, so the read data is too\n assert_equal(bs_data2.dtype.byteorder, swapped_code)\n assert_equal(bs_img2.get_header().endianness, swapped_code)\n assert_array_equal(bs_data2, data)\n # Now mix up byteswapped data and non-byteswapped header\n mixed_img = Nifti1Image(bs_data2, affine)\n assert_equal(mixed_img.get_header().endianness, native_code)\n m_img2 = round_trip(mixed_img)\n assert_equal(m_img2.get_header().endianness, native_code)\n assert_array_equal(m_img2.get_data(), data)\n\n\ndef test_save_load():\n shape = (2, 4, 6)\n npt = np.float32\n data = np.arange(np.prod(shape), dtype=npt).reshape(shape)\n affine = np.diag([1, 2, 3, 1])\n affine[:3,3] = [3,2,1]\n img = ni1.Nifti1Image(data, affine)\n img.set_data_dtype(npt)\n with InTemporaryDirectory() as pth:\n nifn = 'an_image.nii'\n sifn = 'another_image.img'\n ni1.save(img, nifn)\n re_img = nils.load(nifn)\n assert_true(isinstance(re_img, ni1.Nifti1Image))\n assert_array_equal(re_img.get_data(), data)\n assert_array_equal(re_img.get_affine(), affine)\n # These and subsequent del statements are to prevent confusing\n # windows errors when trying to open files or delete the\n # temporary directory. \n del re_img\n if have_scipy: # skip we we cannot read .mat files\n spm2.save(img, sifn)\n re_img2 = nils.load(sifn)\n assert_true(isinstance(re_img2, spm2.Spm2AnalyzeImage))\n assert_array_equal(re_img2.get_data(), data)\n assert_array_equal(re_img2.get_affine(), affine)\n del re_img2\n spm99.save(img, sifn)\n re_img3 = nils.load(sifn)\n assert_true(isinstance(re_img3,\n spm99.Spm99AnalyzeImage))\n assert_array_equal(re_img3.get_data(), data)\n assert_array_equal(re_img3.get_affine(), affine)\n ni1.save(re_img3, nifn)\n del re_img3\n re_img = nils.load(nifn)\n assert_true(isinstance(re_img, ni1.Nifti1Image))\n assert_array_equal(re_img.get_data(), data)\n assert_array_equal(re_img.get_affine(), affine)\n del re_img\n\n\ndef test_two_to_one():\n # test going from two to one file in save\n shape = (2, 4, 6)\n npt = np.float32\n data = np.arange(np.prod(shape), dtype=npt).reshape(shape)\n affine = np.diag([1, 2, 3, 1])\n affine[:3,3] = [3,2,1]\n # single file format\n img = ni1.Nifti1Image(data, affine)\n assert_equal(img.get_header()['magic'], b'n+1')\n str_io = BytesIO()\n img.file_map['image'].fileobj = str_io\n # check that the single format vox offset is set correctly\n img.to_file_map()\n assert_equal(img.get_header()['magic'], b'n+1')\n assert_equal(img.get_header()['vox_offset'], 352)\n # make a new pair image, with the single image header\n pimg = ni1.Nifti1Pair(data, affine, img.get_header())\n isio = BytesIO()\n hsio = BytesIO()\n pimg.file_map['image'].fileobj = isio\n pimg.file_map['header'].fileobj = hsio\n pimg.to_file_map()\n # the offset remains the same\n assert_equal(pimg.get_header()['magic'], b'ni1')\n assert_equal(pimg.get_header()['vox_offset'], 352)\n assert_array_equal(pimg.get_data(), data)\n # same for from_image, going from single image to pair format\n ana_img = ana.AnalyzeImage.from_image(img)\n assert_equal(ana_img.get_header()['vox_offset'], 352)\n # back to the single image, save it again to a stringio\n str_io = BytesIO()\n img.file_map['image'].fileobj = str_io\n img.to_file_map()\n assert_equal(img.get_header()['vox_offset'], 352)\n aimg = ana.AnalyzeImage.from_image(img)\n assert_equal(aimg.get_header()['vox_offset'], 352)\n aimg = spm99.Spm99AnalyzeImage.from_image(img)\n assert_equal(aimg.get_header()['vox_offset'], 352)\n aimg = spm2.Spm2AnalyzeImage.from_image(img)\n assert_equal(aimg.get_header()['vox_offset'], 352)\n nfimg = ni1.Nifti1Pair.from_image(img)\n assert_equal(nfimg.get_header()['vox_offset'], 352)\n # now set the vox offset directly\n hdr = nfimg.get_header()\n hdr['vox_offset'] = 0\n assert_equal(nfimg.get_header()['vox_offset'], 0)\n # check it gets properly set by the nifti single image\n nfimg = ni1.Nifti1Image.from_image(img)\n assert_equal(nfimg.get_header()['vox_offset'], 352)\n\n\ndef test_negative_load_save():\n shape = (1,2,5)\n data = np.arange(10).reshape(shape) - 10.0\n affine = np.eye(4)\n hdr = ni1.Nifti1Header()\n hdr.set_data_dtype(np.int16)\n img = Nifti1Image(data, affine, hdr)\n str_io = BytesIO()\n img.file_map['image'].fileobj = str_io\n img.to_file_map()\n str_io.seek(0)\n re_img = Nifti1Image.from_file_map(img.file_map)\n assert_array_almost_equal(re_img.get_data(), data, 4)\n\n\ndef test_filename_save():\n # This is to test the logic in the load and save routines, relating\n # extensions to filetypes\n # Tuples of class, ext, loadedclass\n inklass_ext_loadklasses = (\n (Nifti1Image, '.nii', Nifti1Image),\n (Nifti2Image, '.nii', Nifti2Image),\n (Nifti1Pair, '.nii', Nifti1Image),\n (Nifti2Pair, '.nii', Nifti2Image),\n (Nifti1Image, '.img', Nifti1Pair),\n (Nifti2Image, '.img', Nifti2Pair),\n (Nifti1Pair, '.img', Nifti1Pair),\n (Nifti2Pair, '.img', Nifti2Pair),\n (Nifti1Image, '.hdr', Nifti1Pair),\n (Nifti2Image, '.hdr', Nifti2Pair),\n (Nifti1Pair, '.hdr', Nifti1Pair),\n (Nifti2Pair, '.hdr', Nifti2Pair),\n (Minc1Image, '.nii', Nifti1Image),\n (Minc1Image, '.img', Nifti1Pair),\n (Spm2AnalyzeImage, '.nii', Nifti1Image),\n (Spm2AnalyzeImage, '.img', Spm2AnalyzeImage),\n (Spm99AnalyzeImage, '.nii', Nifti1Image),\n (Spm99AnalyzeImage, '.img', Spm2AnalyzeImage),\n (AnalyzeImage, '.nii', Nifti1Image),\n (AnalyzeImage, '.img', Spm2AnalyzeImage),\n )\n shape = (2, 4, 6)\n affine = np.diag([1, 2, 3, 1])\n data = np.arange(np.prod(shape), dtype='f4').reshape(shape)\n for inklass, out_ext, loadklass in inklass_ext_loadklasses:\n if not have_scipy:\n # We can't load a SPM analyze type without scipy. These types have\n # a 'mat' file (the type we can't load)\n if ('mat', '.mat') in loadklass.files_types:\n continue\n img = inklass(data, affine)\n try:\n pth = mkdtemp()\n fname = pjoin(pth, 'image' + out_ext)\n nils.save(img, fname)\n rt_img = nils.load(fname)\n assert_array_almost_equal(rt_img.get_data(), data)\n assert_true(type(rt_img) is loadklass)\n # delete image to allow file close. Otherwise windows\n # raises an error when trying to delete the directory\n del rt_img\n finally:\n shutil.rmtree(pth)\n\n\ndef test_analyze_detection():\n # Test detection of Analyze, Nifti1 and Nifti2\n # Algorithm is as described in loadsave:which_analyze_type\n def wat(hdr):\n return nils.which_analyze_type(hdr.binaryblock)\n n1_hdr = Nifti1Header(b'\\0' * 348, check=False)\n assert_equal(wat(n1_hdr), None)\n n1_hdr['sizeof_hdr'] = 540\n assert_equal(wat(n1_hdr), 'nifti2')\n assert_equal(wat(n1_hdr.as_byteswapped()), 'nifti2')\n n1_hdr['sizeof_hdr'] = 348\n assert_equal(wat(n1_hdr), 'analyze')\n assert_equal(wat(n1_hdr.as_byteswapped()), 'analyze')\n n1_hdr['magic'] = b'n+1'\n assert_equal(wat(n1_hdr), 'nifti1')\n assert_equal(wat(n1_hdr.as_byteswapped()), 'nifti1')\n n1_hdr['magic'] = b'ni1'\n assert_equal(wat(n1_hdr), 'nifti1')\n assert_equal(wat(n1_hdr.as_byteswapped()), 'nifti1')\n # Doesn't matter what magic is if it's not a nifti1 magic\n n1_hdr['magic'] = b'ni2'\n assert_equal(wat(n1_hdr), 'analyze')\n n1_hdr['sizeof_hdr'] = 0\n n1_hdr['magic'] = b''\n assert_equal(wat(n1_hdr), None)\n n1_hdr['magic'] = 'n+1'\n assert_equal(wat(n1_hdr), 'nifti1')\n n1_hdr['magic'] = 'ni1'\n assert_equal(wat(n1_hdr), 'nifti1')\n\n\ndef test_guessed_image_type():\n # Test whether we can guess the image type from example files\n assert_equal(nils.guessed_image_type(\n pjoin(DATA_PATH, 'example4d.nii.gz')),\n Nifti1Image)\n assert_equal(nils.guessed_image_type(\n pjoin(DATA_PATH, 'nifti1.hdr')),\n Nifti1Pair)\n assert_equal(nils.guessed_image_type(\n pjoin(DATA_PATH, 'example_nifti2.nii.gz')),\n Nifti2Image)\n assert_equal(nils.guessed_image_type(\n pjoin(DATA_PATH, 'nifti2.hdr')),\n Nifti2Pair)\n assert_equal(nils.guessed_image_type(\n pjoin(DATA_PATH, 'tiny.mnc')),\n Minc1Image)\n assert_equal(nils.guessed_image_type(\n pjoin(DATA_PATH, 'small.mnc')),\n Minc2Image)\n assert_equal(nils.guessed_image_type(\n pjoin(DATA_PATH, 'test.mgz')),\n MGHImage)\n assert_equal(nils.guessed_image_type(\n pjoin(DATA_PATH, 'analyze.hdr')),\n Spm2AnalyzeImage)\n" ]
[ [ "numpy.asarray", "numpy.iinfo" ], [ "numpy.diag", "numpy.arange", "numpy.eye", "numpy.testing.assert_array_equal", "numpy.prod" ] ]
Jarvis73/DINs
[ "fe967115182a47b9ad1018658cd1be745831e7aa" ]
[ "data_kits/nf_kits.py" ]
[ "# Copyright 2019-2020 Jianwei Zhang All Right Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# =================================================================================\n\nimport pickle\nimport zlib\nfrom pathlib import Path\n\nimport nibabel as nib\nimport numpy as np\nimport pandas as pd\nimport scipy.ndimage as ndi\nimport tqdm\n\nROOT = Path(__file__).parent.parent.parent\nDATA_ROOT = ROOT / \"data/NF\"\n\n\ndef read_nii(file_name, out_dtype=np.int16, special=False, only_header=False):\n nib_vol = nib.load(str(file_name))\n vh = nib_vol.header\n if only_header:\n return vh\n affine = vh.get_best_affine()\n # assert len(np.where(affine[:3, :3].reshape(-1) != 0)[0]) == 3, affine\n trans = np.argmax(np.abs(affine[:3, :3]), axis=1)\n data = nib_vol.get_fdata().astype(out_dtype).transpose(*trans[::-1])\n if special:\n data = np.flip(data, axis=2)\n if affine[0, trans[0]] > 0: # Increase x from Right to Left\n data = np.flip(data, axis=2)\n if affine[1, trans[1]] > 0: # Increase y from Anterior to Posterior\n data = np.flip(data, axis=1)\n if affine[2, trans[2]] < 0: # Increase z from Interior to Superior\n data = np.flip(data, axis=0)\n return vh, data\n\n\ndef write_nii(data, header, out_path, out_dtype=np.int16, special=False, affine=None):\n if header is not None:\n affine = header.get_best_affine()\n # assert len(np.where(affine[:3, :3].reshape(-1) != 0)[0]) == 3, affine\n trans = np.argmax(np.abs(affine[:3, :3]), axis=1)\n trans_bk = [np.argwhere(np.array(trans[::-1]) == i)[0][0] for i in range(3)]\n\n if special:\n data = np.flip(data, axis=2)\n if affine[0, trans[0]] > 0: # Increase x from Right to Left\n data = np.flip(data, axis=2)\n if affine[1, trans[1]] > 0: # Increase y from Anterior to Posterior\n data = np.flip(data, axis=1)\n if affine[2, trans[2]] < 0: # Increase z from Interior to Superior\n data = np.flip(data, axis=0)\n\n out_image = np.transpose(data, trans_bk).astype(out_dtype)\n if header is None and affine is not None:\n out = nib.Nifti1Image(out_image, affine=affine)\n else:\n out = nib.Nifti1Image(out_image, affine=None, header=header)\n nib.save(out, str(out_path))\n\n\ndef load_data(logger):\n data_dir = DATA_ROOT / \"nii_NF\"\n path_list = list(data_dir.glob(\"volume*\"))\n\n logger.info(f\"Loading data ({len(path_list)} examples) ...\")\n cache_path = DATA_ROOT / \"cache.pkl.gz\"\n if cache_path.exists():\n logger.info(f\"Loading data cache from {cache_path}\")\n with cache_path.open(\"rb\") as f:\n data = zlib.decompress(f.read())\n _data_cache = pickle.loads(data)\n logger.info(\"Finished!\")\n return _data_cache\n\n _data_cache = {}\n for path in tqdm.tqdm(path_list):\n pid = path.name.split(\".\")[0].split(\"-\")[-1]\n header, volume = read_nii(path)\n la_path = path.parent / path.name.replace(\"volume\", \"segmentation\")\n _, label = read_nii(la_path)\n assert volume.shape == label.shape, f\"{volume.shape} vs {label.shape}\"\n _data_cache[int(pid)] = {\"im_path\": path.absolute(),\n \"la_path\": la_path.absolute(),\n \"img\": volume,\n \"lab\": label.astype(np.uint8),\n \"pos\": np.stack(np.where(label > 0), axis=1),\n \"meta\": header,\n \"lab_rng\": np.unique(label)}\n with cache_path.open(\"wb\") as f:\n logger.info(f\"Saving data cache to {cache_path}\")\n cache_s = pickle.dumps(_data_cache, pickle.HIGHEST_PROTOCOL)\n f.write(zlib.compress(cache_s))\n logger.info(\"Finished!\")\n return _data_cache\n\n\ndef pre_filter_data(data, filter_thresh, connectivity=3, down_sampling=False):\n \"\"\" For object-based segmentation tasks.\n Pre-compute connected components and remove small objects\n \"\"\"\n _pre_filter_cache = None\n\n cache_path = DATA_ROOT / (\"pre-filter.pkl.gz\" if not down_sampling else \"pre-filter_ds.pkl.gz\")\n if cache_path.exists():\n logger.info(f\"Loading pre-filter cache from {cache_path}\")\n with cache_path.open(\"rb\") as f:\n data = zlib.decompress(f.read())\n _pre_filter_cache = pickle.loads(data)\n logger.info(\"Finished!\")\n return _pre_filter_cache\n\n _pre_filter_cache = {}\n for pid in data:\n mask = data[pid][\"lab\"]\n struct = ndi.generate_binary_structure(3, connectivity)\n labeled, n_obj = ndi.label(mask, struct)\n slices = ndi.find_objects(labeled)\n obj_list = []\n for i, sli in enumerate(slices):\n patch = labeled[sli]\n z, y, x = np.where(patch == i + 1)\n if z.shape[0] < filter_thresh:\n patch[z, y, x] = 0\n else:\n obj_list.append(np.stack((z, y, x), axis=1))\n better_label = np.clip(labeled, 0, 1)\n _pre_filter_cache[pid] = {\"lab\": better_label,\n \"obj_list\": obj_list}\n with cache_path.open(\"wb\") as f:\n logger.info(f\"Saving pre-filter cache to {cache_path}\")\n cache_s = pickle.dumps(_pre_filter_cache, pickle.HIGHEST_PROTOCOL)\n f.write(zlib.compress(cache_s))\n logger.info(\"Finished!\")\n return _pre_filter_cache\n\n\ndef load_split(set_key, test_fold):\n if set_key in [\"train\", \"val\", \"eval\"]:\n fold_path = DATA_ROOT / \"split.csv\"\n folds = pd.read_csv(str(fold_path)).fillna(0).astype(int)\n val_split = folds.loc[folds.split == test_fold]\n if set_key != \"train\":\n return val_split\n train_folds = list(range(5))\n train_folds.remove(test_fold)\n train_split = folds.loc[folds.split.isin(train_folds)]\n return train_split\n elif set_key == \"test\":\n fold_path = DATA_ROOT / \"split_test.csv\"\n folds = pd.read_csv(str(fold_path)).fillna(0).astype(int)\n test_split = folds.loc[folds.split == 0]\n return test_split\n elif set_key == \"extra\": # The dataset with 45 cases of 15 patients\n fold_path = DATA_ROOT / \"split_extra.csv\"\n folds = pd.read_csv(str(fold_path)).fillna(0).astype(int)\n test_split = folds.loc[folds.split == 0]\n return test_split\n else:\n raise ValueError(f\"`set_key` supports [train|val|test|extra], got {set_key}\")\n\n\ndef filter_tiny_nf(mask):\n struct2 = ndi.generate_binary_structure(2, 1)\n for i in range(mask.shape[0]):\n res, n_obj = ndi.label(mask[i], struct2)\n size = np.bincount(res.flat)\n for j in np.where(size <= 2)[0]:\n mask[i][res == j] = 0\n\n struct3 = ndi.generate_binary_structure(3, 2)\n res, n_obj = ndi.label(mask, struct3)\n size = np.bincount(res.flat)\n for i in np.where(size <= 5)[0]:\n mask[res == i] = 0\n return mask\n\n\ndef slim_labels(data, logger):\n slim_labels_path = DATA_ROOT / \"slim_labels.pkl.gz\"\n if slim_labels_path.exists():\n logger.info(f\"Loading slimmed label cache from {slim_labels_path}\")\n with slim_labels_path.open(\"rb\") as f:\n new_labels = pickle.loads(zlib.decompress(f.read()))\n for i in data:\n data[i]['slim'] = new_labels[i]\n logger.info(\"Finished!\")\n else:\n new_labels = {}\n logger.info(f\"Saving slimmed label cache to {slim_labels_path}\")\n for i, item in data.items():\n new_labels[i] = filter_tiny_nf(np.clip(item['lab'], 0, 1).copy())\n data[i]['slim'] = new_labels[i]\n with slim_labels_path.open(\"wb\") as f:\n f.write(zlib.compress(pickle.dumps(new_labels, pickle.HIGHEST_PROTOCOL)))\n logger.info(\"Finished!\")\n\n return data\n\n\ndef load_test_data_paths():\n data_dir = DATA_ROOT / \"test_NF\"\n path_list = list(data_dir.glob(\"*img.nii.gz\"))\n dataset = {}\n for path in path_list:\n pid = int(path.name.split(\"-\")[0])\n dataset[pid] = {\"img_path\": path, \"lab_path\": path.parent / path.name.replace(\"img\", \"mask\")}\n return dataset\n\n\nextra_name_mapping = {\n \"---Abdomen1__20080620-img.nii.gz\": 0,\n \"---Abdomen1__20101129-img.nii.gz\": 1,\n \"---Abdomen1__20130625-img.nii.gz\": 2,\n \"---Airway1__20031216-img.nii.gz\": 3,\n \"---Airway1__20041020-img.nii.gz\": 4,\n \"---Airway1__20060907-img.nii.gz\": 5,\n \"---Airway2__20080707-img.nii.gz\": 6,\n \"---Airway2__20110124-img.nii.gz\": 7,\n \"---Airway2__20130204-img.nii.gz\": 8,\n \"---Back1__20070330-img.nii.gz\": 9,\n \"---Back1__20081117-img.nii.gz\": 10,\n \"---Back1__20100323-img.nii.gz\": 11,\n \"---Brachial-plexus1__20130205-img.nii.gz\": 12,\n \"---Br-plexus1__20120223-img.nii.gz\": 13,\n \"---Br-plexus1__20120625-img.nii.gz\": 14,\n \"---Chest2__20011227-img.nii.gz\": 15,\n \"---Chest2__20050914-img.nii.gz\": 16,\n \"---Chest2__20080918-img.nii.gz\": 17,\n \"---Chest3__20081222-img.nii.gz\": 18,\n \"---Chest3__20110602-img.nii.gz\": 19,\n \"---Chest3__20131122-img.nii.gz\": 20,\n \"---Face1__20100719-img.nii.gz\": 21,\n \"---Face1__20110418-img.nii.gz\": 22,\n \"---Face1__20120924-img.nii.gz\": 23,\n \"---Leg1__20080714-img.nii.gz\": 24,\n \"---Leg1__20100726-img.nii.gz\": 25,\n \"---Leg1__20110228-img.nii.gz\": 26,\n \"---Neck1__20020726-img.nii.gz\": 27,\n \"---Neck1__20040315-img.nii.gz\": 28,\n \"---Neck1__20050527-img.nii.gz\": 29,\n \"---Orbit1__20030225-img.nii.gz\": 30,\n \"---Orbit1__20050217-img.nii.gz\": 31,\n \"---Orbit1__20061016-img.nii.gz\": 32,\n \"---Orbit2__20090403-img.nii.gz\": 33,\n \"---Orbit2__20121018-img.nii.gz\": 34,\n \"---Orbit2__20140520-img.nii.gz\": 35,\n \"---Pelvis1__20030916-img.nii.gz\": 36,\n \"---Pelvis1__20060109-img.nii.gz\": 37,\n \"---Pelvis1__20100726-img.nii.gz\": 38,\n \"---Pelvis2__20090114-img.nii.gz\": 39,\n \"---Pelvis2__20100112-img.nii.gz\": 40,\n \"---Pelvis2__20120423-img.nii.gz\": 41,\n \"---Thigh1__20071019-img.nii.gz\": 42,\n \"---Thigh1__20100712-img.nii.gz\": 43,\n \"---Thigh1__20120106-img.nii.gz\": 44,\n}\n\n\ndef load_extra_data_paths():\n data_dir = DATA_ROOT / \"NCI_NF1_InaLabeled\"\n path_list = list(data_dir.glob(\"*img.nii.gz\"))\n dataset = {}\n for path in path_list:\n pid = extra_name_mapping[path.name]\n dataset[pid] = {\"img_path\": path, \"lab_path\": path.parent / path.name.replace(\"img\", \"mask\")}\n return dataset\n\n\ndef load_box_csv():\n box_file = DATA_ROOT / \"nf_box.csv\"\n box_df = pd.read_csv(box_file)\n return box_df\n" ]
[ [ "pandas.read_csv", "numpy.abs", "numpy.clip", "numpy.unique", "scipy.ndimage.generate_binary_structure", "numpy.stack", "scipy.ndimage.label", "numpy.bincount", "scipy.ndimage.find_objects", "numpy.transpose", "numpy.array", "numpy.flip", "numpy.where" ] ]
SStroteich/stella-1
[ "104556a07b9736e7c28e6f1bf2f799384732f38b" ]
[ "stellapy/stellapy_old/stella_read.py" ]
[ "import numpy as np\nfrom stella_dirs import *\nfrom scipy.io import netcdf\n#plt.rcParams.update({'font.size': 28})\n#plt.rcParams['lines.linewidth'] = 2\nimport tabCompleter\nfrom tabCompleter import *\nfrom plotbox import *\nfrom aux_functions import *\nfrom os import listdir\nfrom netCDF4 import *\nimport glob \nimport os.path\n\n# ==============================================================\n# Some utils\ndef format1(value):\n return \"%.3e\" % value\ndef format2(value):\n return \"%14.6e\" % value\ndef format3(value):\n return \"%4.2f\" % value\ndef format4(value):\n return \"%6.2f\" % value\ndef format6(value):\n return \"%7.3f\" % value\ndef format5(value):\n return \"%.5e\" % value\ndef format7(value):\n return \"%22.3f\" % value\ndef format8(value):\n return \"%04d\" % value\ndef format9(value):\n return \"%7.5f\" % value\n# Some utils ended\n#===============================================================\n \ndef casestr(case=None):\n # Function that returns the string of the input, which\n # determines the name of the rest of output files.\n\n if case.endswith(\".in\"):\n buff = case.split(\"/\")\n return buff[size(buff)-1].split(\".in\")[0]\n else:\n if size(inputlist(case)) > 1:\n print(\"\\nSpecify the input in the case field, more than one input file found:\\n\")\n print(inputlist(case))\n exit\n elif size(inputlist(case) == 1):\n return inputlist(case)[0].split(\".in\")[0]\n\ndef inputlist_r(case):\n inputs_level_0 = glob.glob(outdir(case)+'/*.in', recursive = True)\n inputs_level_1 = glob.glob(outdir(case)+'/*/*.in', recursive = True)\n return (inputs_level_0+inputs_level_1)\n \ndef inputlist(case, recursive=False):\n # Function that returns all the input file names\n # with extention \".in\"\n inlist = []\n \n if recursive:\n inlist = inputlist_r(case=case)\n else:\n for f in listdir(outdir(case)):\n if f.endswith('.in'):\n if not f.startswith('.'):\n inputname=f\n inlist.append(f)\n \n return inlist\n\ndef outdir(case=None):\n if case.endswith(\".in\"):\n vcase=case.split(\"/\")\n return runsdir()+'/'+ case.replace(\"/\"+vcase[size(vcase)-1], '')\n else:\n return runsdir()+'/'+ case\n\ndef geotxtfile(case=None):\n # It returns the full path of an output file, endind with\n # the string value of \"quant\".\n if os.path.isfile(case):\n return case.split('.in')[0] + '.geometry'\n else:\n return outdir(case) + '/' + casestr(case) + '.geometry'\n\n \ndef outfile(case=None, quant=None):\n # It returns the full path of an output file, endind with\n # the string value of \"quant\".\n if os.path.isfile(case):\n return case.split('.in')[0] + '.' + quant\n else:\n return outdir(case) + '/' + casestr(case) + '.' + quant\n\ndef infile(case=None):\n # infile = input(\"Path to netcdf file: \")\n return outfile(case, quant='out.nc')\n\ndef fluxes_txt(case=None):\n # infile = input(\"Path to netcdf file: \")\n return outfile(case, quant='fluxes')\n\n# ==================================================================\n# Reading variables in the input *.in file\n\ndef torflux(case):\n # get torflux from input file.\n myfile = open(outfile(case, quant='in'))\n content = float(myfile.read().split('torflux')[1].split('\\n')[0].split('=')[1])\n return content\n\n# ==================================================================\n# Translation of quantities in stella_data module by Michael into\n# functions with the run directory (\"case\") as single argument.\ndef read_stella_float(case, var):\n\n import numpy as np\n \n ncfile = netcdf.netcdf_file(infile(case),'r')\n \n try:\n arr = np.copy(ncfile.variables[var][:])\n flag = True\n except KeyError:\n print('INFO: '+var+' not found in netcdf file')\n arr = np.arange(1,dtype=float)\n flag = False\n \n return arr\n\n\ndef read_stella_value(case, var):\n woutfile = infile(case)\n d = Dataset(woutfile, mode='r')\n return d.variables[var][:]\n\ndef kx(case):\n # get kx grid\n # this is the index of the first negative value of kx\n # note stella orders kx as (0, dkx, ..., kx_max, -kx_max, -kx_max+dkx, ..., -dkx)\n ncfile = netcdf.netcdf_file(infile(case),'r')\n kx_stella = np.copy(ncfile.variables['kx'][:])\n nakx = ncfile.dimensions['kx']\n nakx_mid = nakx//2+1\n kx = np.concatenate((kx_stella[nakx_mid:],kx_stella[:nakx_mid]))\n return kx, nakx, nakx_mid\n\ndef kx_stella(case):\n ncfile = netcdf.netcdf_file(infile(case),'r')\n kx_stella = np.copy(ncfile.variables['kx'][:])\n return kx_stella\n\ndef ky(case):\n # get ky grid\n ncfile = netcdf.netcdf_file(infile(case),'r')\n ky = np.copy(ncfile.variables['ky'][:])\n naky = ncfile.dimensions['ky']\n return ky, naky\n\ndef zed(case):\n # get zed grid\n ncfile = netcdf.netcdf_file(infile(case),'r')\n zed = np.copy(ncfile.variables['zed'][:])\n nzed = zed.size\n iz0 = nzed//2+1\n return zed, nzed, iz0\n\ndef time(case):\n # get time grid\n ncfile = netcdf.netcdf_file(infile(case),'r')\n time = np.copy(ncfile.variables['t'][:])\n ntime = time.size\n return time, ntime\n\ndef nspec(case):\n # number of kinetic species\n ncfile = netcdf.netcdf_file(infile(case),'r')\n nspec = ncfile.dimensions['species']\n return nspec\n\ndef geo(case):\n # get geometric quantities\n d = Dataset(infile(case), mode='r')\n ncfile = netcdf.netcdf_file(infile(case),'r')\n bmag = np.copy(ncfile.variables['bmag'][:])\n gradpar = np.copy(ncfile.variables['gradpar'][:])\n gbdrift = np.copy(ncfile.variables['gbdrift'][:])\n gbdrift0 = np.copy(ncfile.variables['gbdrift0'][:])\n cvdrift = np.copy(ncfile.variables['cvdrift'][:])\n cvdrift0 = np.copy(ncfile.variables['cvdrift0'][:])\n gds2 = np.copy(ncfile.variables['gds2'][:])\n gds21 = np.copy(ncfile.variables['gds21'][:])\n gds22 = np.copy(ncfile.variables['gds22'][:])\n shat = float(d.variables['shat'][:])\n \n return bmag, gradpar, gbdrift, gbdrift0, cvdrift, cvdrift0, gds2, gds21, gds22, shat\n\ndef phi2_vs_kxky(case):\n # electrostatic potential averaged over z as function of (ky,kx,t)\n phi2_vs_kxky_stella = read_stella_float(case, 'phi2_vs_kxky')\n\n# phi2_vs_kxky_stella[:,0,0] = 0.0\n# phi2_vs_kxky = np.concatenate((phi2_vs_kxky_stella[:, kx(case)[2]:,:],\\\n# phi2_vs_kxky_stella[:,:kx(case)[2] ,:]),axis=1)\n return phi2_vs_kxky_stella\n\ndef pflux_vs_kxky(case):\n pflux_vs_kxky_stella = read_stella_float(case, 'pflx_kxky')\n \n return pflux_vs_kxky_stella\n \ndef vflux_vs_kxky(case):\n vflux_vs_kxky_stella = read_stella_float(case, 'vflx_kxky')\n \n return vflux_vs_kxky_stella\n\ndef qflux_vs_kxky(case):\n qflux_vs_kxky_stella = read_stella_float(case, 'qflx_kxky')\n \n return qflux_vs_kxky_stella\n\ndef density_vs_kxky(case):\n density_vs_kxky_stella = read_stella_float(case, 'density')\n return density_vs_kxky_stella\n\ndef upar_vs_kxky(case):\n upar_vs_kxky_stella = read_stella_float(case, 'upar')\n return upar_vs_kxky_stella\n\ndef temperature_vs_kxky(case):\n temperature_vs_kxky_stella = read_stella_float(case, 'temperature')\n return temperature_vs_kxky_stella\n\ndef phi_vs_t(case):\n # electrostatic potential as a function of (z,kx,ky,t)\n phi_vs_t_stella = read_stella_float(case, 'phi_vs_t')\n return phi_vs_t_stella\n\ndef gvmus(case):\n # |g|^2 averaged over kx, ky, and z\n return read_stella_float(case, 'gvmus')\n\ndef gzvs(case):\n # |g|^2 averaged over kx, ky, and mu\n return read_stella_float(case, 'gzvs')\n \ndef jacob(case):\n # jacobian for transformation to (rho,alpha,z) coordinates\n return read_stella_float(case, 'jacob')\n\ndef jtwist(case):\n # jtwist factor for twist-and-shift BC\n return read_stella_value(case, 'jtwist')\n\ndef grho(case):\n # gradient of normalized radial coordinate rho\n return read_stella_float(case, 'grho')\n\ndef phi2_stella(case):\n # modulus squared of electrostatic potential (averaged over space)\n return read_stella_float(case, 'phi2')\n\ndef es_part_flux(case):\n # time-dependent electrostatic particle flux for each species\n return read_stella_float(case, 'es_part_flux')\n\ndef es_heat_flux(case):\n # electrostatic heat flux\n return read_stella_float(case, 'es_heat_flux')\n\ndef es_mom_flux(case):\n # electrostatic momentum flux\n return read_stella_float(case, 'es_mom_flux')\n\ndef es_energy_exchange(case):\n return read_stella_float(case, 'es_energy_exchange')\n\ndef es_part_by_k(case):\n # time-dependent particle flux for each species as a function of (kx,ky)\n\n es_part_by_k_stella, es_part_by_k_present = \\\n read_stella_float(case, 'es_part_by_k')\n\n if es_part_by_k_present is not True:\n es_part_by_k_stella, es_part_by_k_present = \\\n read_stella_float(case, 'es_part_flux_by_mode')\n \n return es_part_by_k_stella, es_part_by_k_present\n\ndef es_mom_by_k(case):\n # time-dependent momentum flux for each species as a function of (kx,ky)\n es_mom_by_k_stella, es_mom_by_k_present = \\\n read_stella_float(case, 'es_mom_by_k')\n if es_mom_by_k_present is not True:\n es_mom_by_k_stella, es_mom_by_k_present = \\\n read_stella_float(case, 'es_mom_flux_by_mode')\n return es_mom_by_k_stella, es_mom_by_k_present\n\ndef es_energy_exchange_by_k(case):\n es_energy_exchange_by_k_stella, es_energy_exchange_by_k_present = \\\n read_stella_float(case, 'es_energy_exchange_by_k')\n if es_energy_exchange_by_k_present is not True:\n es_energy_exchange_by_k_stella, es_energy_exchange_by_k_present = \\\n read_stella_float(case, 'es_energy_exchange_by_mode')\n return es_energy_exchange_by_k_stella, es_energy_exchange_by_k_present\n\ndef es_energy_exchange_by_ky(case):\n return read_stella_float(case, 'es_energy_exchange_by_ky')\n\ndef vpa(case):\n # parallel velocity grid\n return read_stella_float(case, 'vpa')\n\ndef mu(case):\n # mu grid\n return read_stella_float(case, 'mu')\n\ndef es_part_sym(case):\n # electrostatic particle flux as function of (vpa,z)\n return read_stella_float(case, 'es_part_sym')\n\ndef es_heat_sym(case):\n # electrostatic heat flux as function of (vpa,z)\n return read_stella_float(case, 'es_heat_sym')\n\ndef es_mom_sym(case):\n # electrostatic momentum flux as function of (vpa,z)\n es_mom_sym_stella, es_mom_sym_present = read_stella_float(case, 'es_mom_sym')\n if vpa(case)[1] == False:\n es_mom_sym_present = False\n return es_mom_sym_stella, es_mom_sym_present\n\ndef xgrid(case):\n xgrid_stella, xgrid_present = \\\n read_stella_float(case, 'xgrid')\n xgrid = np.concatenate((xgrid_stella[kx_stella(case).shape[0]//2+1:],\\\n xgrid_stella[:kx_stella(case).shape[0]//2+1]))\n return xgrid, xgrid_present\n\ndef dens(case):\n dens=read_stella_float(case, 'dens')\n dens_exp=factormult(dens,1e19)\n return dens_exp, size(dens)\n\ndef upar(case):\n # parallel flow fluctuation (kx,ky,z,t)\n return read_stella_float(case,'upar')\n\ndef temp(case):\n # temperature fluctuation (kx,ky,z,t)\n temp=read_stella_float(case,'temp')\n temp_exp=factormult(temp,1000)\n return temp_exp, size(temp)\n\ndef species(case):\n species=read_stella_float(case,'type_of_species')\n return species, size(species)\n\ndef nprim(case):\n return read_stella_float(case,'fprim')\n\ndef tprim(case):\n return read_stella_float(case,'tprim')\n\ndef charge(case):\n charge=read_stella_float(case,'charge')\n return charge, size(charge)\n\ndef mass(case):\n charge=read_stella_float(case,'mass')\n return charge, size(mass)\n\n# ==================================================================\n" ]
[ [ "numpy.concatenate", "numpy.arange", "numpy.copy" ] ]
RaphaelOlivier/deepspeech.pytorch
[ "eb73ef61807ab01fad3662ad03dfea8fd44439aa" ]
[ "deepspeech_pytorch/validation.py" ]
[ "from abc import ABC, abstractmethod\n\nimport torch\nfrom torch.cuda.amp import autocast\nfrom tqdm import tqdm\n\nfrom deepspeech_pytorch.decoder import Decoder, GreedyDecoder\n\nfrom pytorch_lightning.metrics import Metric\nimport Levenshtein as Lev\n\n\nclass ErrorRate(Metric, ABC):\n def __init__(self,\n decoder: Decoder,\n target_decoder: GreedyDecoder,\n save_output: bool = False,\n dist_sync_on_step: bool = False):\n super().__init__(dist_sync_on_step=dist_sync_on_step)\n self.decoder = decoder\n self.target_decoder = target_decoder\n self.save_output = save_output\n\n @abstractmethod\n def calculate_metric(self, transcript, reference):\n raise NotImplementedError\n\n def update(self, preds: torch.Tensor,\n preds_sizes: torch.Tensor,\n targets: torch.Tensor,\n target_sizes: torch.Tensor):\n # unflatten targets\n split_targets = []\n offset = 0\n for size in target_sizes:\n split_targets.append(targets[offset:offset + size])\n offset += size\n decoded_output, _ = self.decoder.decode(preds, preds_sizes)\n target_strings = self.target_decoder.convert_to_strings(split_targets)\n for x in range(len(target_strings)):\n transcript, reference = decoded_output[x][0], target_strings[x][0]\n self.calculate_metric(\n transcript=transcript,\n reference=reference\n )\n\n\nclass CharErrorRate(ErrorRate):\n def __init__(self,\n decoder: Decoder,\n target_decoder: GreedyDecoder,\n save_output: bool = False,\n dist_sync_on_step: bool = False):\n super().__init__(\n decoder=decoder,\n target_decoder=target_decoder,\n save_output=save_output,\n dist_sync_on_step=dist_sync_on_step\n )\n self.decoder = decoder\n self.target_decoder = target_decoder\n self.save_output = save_output\n self.add_state(\"cer\", default=torch.tensor(0), dist_reduce_fx=\"sum\")\n self.add_state(\"n_chars\", default=torch.tensor(0), dist_reduce_fx=\"sum\")\n\n def calculate_metric(self, transcript, reference):\n cer_inst = self.cer_calc(transcript, reference)\n self.cer += cer_inst\n self.n_chars += len(reference.replace(' ', ''))\n\n def compute(self):\n cer = float(self.cer) / self.n_chars\n return cer.item() * 100\n\n def cer_calc(self, s1, s2):\n \"\"\"\n Computes the Character Error Rate, defined as the edit distance.\n\n Arguments:\n s1 (string): space-separated sentence\n s2 (string): space-separated sentence\n \"\"\"\n s1, s2, = s1.replace(' ', ''), s2.replace(' ', '')\n return Lev.distance(s1, s2)\n\n\nclass WordErrorRate(ErrorRate):\n def __init__(self,\n decoder: Decoder,\n target_decoder: GreedyDecoder,\n save_output: bool = False,\n dist_sync_on_step: bool = False):\n super().__init__(\n decoder=decoder,\n target_decoder=target_decoder,\n save_output=save_output,\n dist_sync_on_step=dist_sync_on_step\n )\n self.decoder = decoder\n self.target_decoder = target_decoder\n self.save_output = save_output\n self.add_state(\"wer\", default=torch.tensor(0), dist_reduce_fx=\"sum\")\n self.add_state(\"n_tokens\", default=torch.tensor(0), dist_reduce_fx=\"sum\")\n\n def calculate_metric(self, transcript, reference):\n wer_inst = self.wer_calc(transcript, reference)\n self.wer += wer_inst\n self.n_tokens += len(reference.split())\n\n def compute(self):\n wer = float(self.wer) / self.n_tokens\n return wer.item() * 100\n\n def wer_calc(self, s1, s2):\n \"\"\"\n Computes the Word Error Rate, defined as the edit distance between the\n two provided sentences after tokenizing to words.\n Arguments:\n s1 (string): space-separated sentence\n s2 (string): space-separated sentence\n \"\"\"\n\n # build mapping of words to integers\n b = set(s1.split() + s2.split())\n word2char = dict(zip(b, range(len(b))))\n\n # map the words to a char array (Levenshtein packages only accepts\n # strings)\n w1 = [chr(word2char[w]) for w in s1.split()]\n w2 = [chr(word2char[w]) for w in s2.split()]\n\n return Lev.distance(''.join(w1), ''.join(w2))\n\n\[email protected]_grad()\ndef run_evaluation(test_loader,\n model,\n decoder: Decoder,\n device: torch.device,\n target_decoder: Decoder,\n precision: int):\n model.eval()\n wer = WordErrorRate(\n decoder=decoder,\n target_decoder=target_decoder\n )\n cer = CharErrorRate(\n decoder=decoder,\n target_decoder=target_decoder\n )\n for i, (batch) in tqdm(enumerate(test_loader), total=len(test_loader)):\n inputs, targets, input_percentages, target_sizes = batch\n input_sizes = input_percentages.mul_(int(inputs.size(3))).int()\n inputs = inputs.to(device)\n with autocast(enabled=precision == 16):\n out, output_sizes = model(inputs, input_sizes)\n decoded_output, _ = decoder.decode(out, output_sizes)\n wer.update(\n preds=out,\n preds_sizes=output_sizes,\n targets=targets,\n target_sizes=target_sizes\n )\n cer.update(\n preds=out,\n preds_sizes=output_sizes,\n targets=targets,\n target_sizes=target_sizes\n )\n return wer.compute(), cer.compute()\n" ]
[ [ "torch.tensor", "torch.no_grad", "torch.cuda.amp.autocast" ] ]
tsarjak/gsoc_code_library
[ "961cea8e0833d28e5c78e7dd06f7c3823b38cbfb" ]
[ "rgbContrast.py" ]
[ "import cv2\nfrom PIL import Image\nimport numpy as np\n\n\n\ndef arrayToImage(img,sizeX,sizeY,saveAs):\n rgbArray = np.zeros((sizeX,sizeY,3),'uint8')\n for i in range(0,sizeX):\n for j in range(0,sizeY):\n for k in range(0,3):\n rgbArray[i,j,k] = img[i,j,k] * 255\n img = Image.fromarray(rgbArray)\n img.save(saveAs)\n\nim = Image.open(\"inImage.jpg\")\nsizeX = im.size[0]\nsizeY = im.size[1]\nphoto = im.load()\nimg = np.zeros((sizeX,sizeY,3),'float')\nfor i in range(0,sizeX):\n for j in range(0,sizeY):\n for k in range(0,3):\n img[i,j,k] = photo[i,j][k]\n img[i,j,k] = ((img[i,j,k])/255)\n\nfactor = 0.4\nfor i in range(0, sizeX):\n for j in range(0,sizeY):\n img[i,j,0] = ((1 - img[i,j,0]) * factor) + img[i,j,0]\n img[i,j,1] = ((1 - img[i,j,1]) * factor) + img[i,j,1]\n\n # Change in blue can be recctified for sure!\n if img[i,j,0] > img[i,j,1] :\n img[i,j,2] = img[i,j,2] - (img[i,j,2] * factor)\n else:\n img[i,j,2] = ((1 - img[i,j,2]) * factor) + img[i,j,2]\n\narrayToImage(img, sizeX, sizeY, \"outImage6.jpg\")\n\n\n'''\ncv2.imshow('image', img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n'''" ]
[ [ "numpy.zeros" ] ]
ffletcherr/FaceLib
[ "fc1b8496f90ba2c6a76bfb8a59e2e2af7a439a63" ]
[ "facelib/InsightFace/models/data/data_pipe.py" ]
[ "from torch.utils.data import Dataset, ConcatDataset, DataLoader\nfrom torchvision import transforms as trans\nfrom torchvision.datasets import ImageFolder\nfrom PIL import ImageFile\n\nImageFile.LOAD_TRUNCATED_IMAGES = True\nimport numpy as np\n\n\ndef de_preprocess(tensor):\n return tensor * 0.5 + 0.5\n\n\ndef get_train_dataset(imgs_folder):\n train_transform = trans.Compose([\n trans.RandomHorizontalFlip(),\n trans.ToTensor(),\n trans.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])\n ])\n ds = ImageFolder(imgs_folder, train_transform)\n class_num = ds[-1][1] + 1\n return ds, class_num\n\n\ndef get_train_loader(conf):\n if conf.data_mode in ['ms1m', 'concat']:\n ms1m_ds, ms1m_class_num = get_train_dataset(conf.ms1m_folder / 'imgs')\n print('ms1m loader generated')\n if conf.data_mode in ['vgg', 'concat']:\n vgg_ds, vgg_class_num = get_train_dataset(conf.vgg_folder / 'imgs')\n print('vgg loader generated')\n if conf.data_mode == 'vgg':\n ds = vgg_ds\n class_num = vgg_class_num\n elif conf.data_mode == 'ms1m':\n ds = ms1m_ds\n class_num = ms1m_class_num\n elif conf.data_mode == 'concat':\n for i, (url, label) in enumerate(vgg_ds.imgs):\n vgg_ds.imgs[i] = (url, label + ms1m_class_num)\n ds = ConcatDataset([ms1m_ds, vgg_ds])\n class_num = vgg_class_num + ms1m_class_num\n elif conf.data_mode == 'emore':\n ds, class_num = get_train_dataset(conf.emore_folder / 'imgs')\n loader = DataLoader(ds, batch_size=conf.batch_size, shuffle=True, pin_memory=conf.pin_memory,\n num_workers=conf.num_workers)\n return loader, class_num\n\n\ndef get_val_data(data_path):\n agedb_30, agedb_30_issame = get_val_pair(data_path, 'agedb_30')\n cfp_fp, cfp_fp_issame = get_val_pair(data_path, 'cfp_fp')\n lfw, lfw_issame = get_val_pair(data_path, 'lfw')\n return agedb_30, cfp_fp, lfw, agedb_30_issame, cfp_fp_issame, lfw_issame\n\n\n" ]
[ [ "torch.utils.data.ConcatDataset", "torch.utils.data.DataLoader" ] ]
vnechaev/QGOpt
[ "697f02d89df67a576cd6953ffdd2db62970727da" ]
[ "examples/MERAOpt.py" ]
[ "import QGOpt.manifolds as m\nfrom tensorflow.python.keras.optimizer_v2 import optimizer_v2 as opt\nimport tensorflow as tf\n\n\ndef adj(A):\n \"\"\"Correct adjoint\n Args:\n A: tf.tensor of shape (..., n, m)\n Returns:\n tf tensor of shape (..., m, n), adjoint matrix\"\"\"\n\n return tf.math.conj(tf.linalg.matrix_transpose(A))\n\n\nclass MERAOpt(opt.OptimizerV2):\n\n def __init__(self,\n name=\"Fast\"):\n \"\"\"Constructs a new MERA inspired optimizer.\n Returns:\n object of class MERAOpt\"\"\"\n\n super(MERAOpt, self).__init__(name)\n\n def _create_slots(self, var_list):\n # MERAOpt does not need slots\n pass\n\n def _resource_apply_dense(self, grad, var):\n\n # Complex version of grad\n complex_grad = m.real_to_complex(grad)\n\n # MERA like update\n _, u, v = tf.linalg.svd(adj(complex_grad))\n var.assign(m.convert.complex_to_real(-v @ adj(u)))\n\n def _resource_apply_sparse(self, grad, var):\n raise NotImplementedError(\"Sparse gradient updates are not supported.\")\n\n def get_config(self):\n config = super(MERAOpt, self).get_config()\n config.update({\n })\n return config\n" ]
[ [ "tensorflow.linalg.matrix_transpose" ] ]
xzry6/openvino_training_extensions
[ "05cb9b30e8220445fcb27988926d88f330091c12" ]
[ "pytorch_toolkit/face_recognition/dump_features.py" ]
[ "\"\"\"\n Copyright (c) 2018 Intel Corporation\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport sys\nimport argparse\nimport os\nimport os.path as osp\n\nfrom tqdm import tqdm\nimport numpy as np\nimport glog as log\nimport torch\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms as t\n\nfrom scripts.matio import save_mat\nfrom model.common import models_backbones\nfrom datasets.megaface import MegaFace\nfrom datasets.trillion_pairs import TrillionPairs\nfrom utils.utils import load_model_state\nfrom utils.augmentation import ResizeNumpy, NumpyToTensor\n\n\ndef clean_megaface(filenames, features, noises_list_path):\n \"\"\"Filters megaface from outliers\"\"\"\n with open(noises_list_path, 'r') as f:\n noises_list = f.readlines()\n noises_list = [line.strip() for line in noises_list]\n clean_features = np.zeros((features.shape[0], features.shape[1] + 1), dtype=np.float32)\n\n for i, filename in enumerate(tqdm(filenames)):\n clean_features[i, 0: features.shape[1]] = features[i, :]\n for line in noises_list:\n if line in filename:\n clean_features[i, features.shape[1]] = 100.0\n break\n\n return clean_features\n\n\ndef clean_facescrub(filenames, features, noises_list_path):\n \"\"\"Replaces wrong instances of identities from the Facescrub with the centroids of these identities\"\"\"\n clean_feature_size = features.shape[1] + 1\n with open(noises_list_path, 'r') as f:\n noises_list = f.readlines()\n noises_list = [osp.splitext(line.strip())[0] for line in noises_list]\n clean_features = np.zeros((features.shape[0], clean_feature_size), dtype=np.float32)\n\n centroids = {}\n for i, filename in enumerate(tqdm(filenames)):\n clean_features[i, 0: features.shape[1]] = features[i, :]\n id_name = osp.basename(filename).split('_')[0]\n if not id_name in centroids:\n centroids[id_name] = np.zeros(clean_feature_size, dtype=np.float32)\n centroids[id_name] += clean_features[i, :]\n\n for i, file_path in enumerate(tqdm(filenames)):\n filename = osp.basename(file_path)\n for line in noises_list:\n if line in filename.replace(' ', '_'):\n id_name = filename.split('_')[0]\n clean_features[i, :] = centroids[id_name] + np.random.uniform(-0.001, 0.001, clean_feature_size)\n clean_features[i, :] /= np.linalg.norm(clean_features[i, :])\n break\n\n return clean_features\n\n\[email protected]_grad()\ndef main(args):\n input_filenames = []\n output_filenames = []\n input_dir = os.path.abspath(args.input_dir)\n output_dir = os.path.abspath(args.output_dir)\n\n if not args.trillion_format:\n log.info('Reading info...')\n with open(os.path.join(args.input_dir, os.path.basename(args.input_list)), 'r') as f:\n lines = f.readlines()\n\n for line in tqdm(lines):\n info = line.strip().split('|')\n file = info[0].strip()\n filename = os.path.join(input_dir, file)\n\n path, _ = osp.split(filename)\n out_folder = path.replace(input_dir, output_dir)\n if not osp.isdir(out_folder):\n os.makedirs(out_folder)\n\n landmarks = None\n bbox = None\n\n if len(info) > 2:\n landmarks = info[1].strip().split(' ')\n landmarks = [float(x) for x in landmarks]\n bbox = info[2].strip().split(' ')\n bbox = [int(float(x)) for x in bbox]\n outname = filename.replace(input_dir, output_dir) + args.file_ending\n input_filenames.append({'path': filename, 'landmarks': landmarks, 'bbox': bbox})\n output_filenames += [outname]\n\n nrof_images = len(input_filenames)\n log.info(\"Total number of images: \", nrof_images)\n dataset = MegaFace(input_filenames)\n else:\n dataset = TrillionPairs(args.input_dir, osp.join(args.input_dir, 'testdata_lmk.txt'), test_mode=True)\n nrof_images = len(dataset)\n\n emb_array = np.zeros((nrof_images, args.embedding_size), dtype=np.float32)\n\n dataset.transform = t.Compose([ResizeNumpy(models_backbones[args.model].get_input_res()),\n NumpyToTensor(switch_rb=True)])\n val_loader = DataLoader(dataset, batch_size=args.batch_size, num_workers=5, shuffle=False)\n\n model = models_backbones[args.model](embedding_size=args.embedding_size, feature=True)\n assert args.snap is not None\n log.info('Snapshot ' + args.snap + ' ...')\n log.info('Extracting embeddings ...')\n model = load_model_state(model, args.snap, args.devices[0], eval_state=True)\n model = torch.nn.DataParallel(model, device_ids=args.devices, output_device=args.devices[0])\n\n f_output_filenames = []\n\n with torch.cuda.device(args.devices[0]):\n for i, data in enumerate(tqdm(val_loader), 0):\n idxs, imgs = data['idx'], data['img']\n batch_embeddings = F.normalize(model(imgs), p=2, dim=1).data.cpu().numpy()\n batch_embeddings = batch_embeddings.reshape(batch_embeddings.shape[0], -1)\n path_indices = idxs.data.cpu().numpy()\n\n start_index = i*args.batch_size\n end_index = min((i+1)*args.batch_size, nrof_images)\n assert start_index == path_indices[0]\n assert end_index == path_indices[-1] + 1\n assert emb_array[start_index:end_index, :].shape == batch_embeddings.shape\n emb_array[start_index:end_index, :] = batch_embeddings\n\n if not args.trillion_format:\n for index in path_indices:\n f_output_filenames.append(output_filenames[index])\n\n assert len(output_filenames) == len(output_filenames)\n\n log.info('Extracting features Done.')\n\n if args.trillion_format:\n save_mat(args.file_ending, emb_array)\n else:\n if 'megaface_noises.txt' in args.noises_list:\n log.info('Cleaning Megaface features')\n emb_array = clean_megaface(f_output_filenames, emb_array, args.noises_list)\n elif 'facescrub_noises.txt' in args.noises_list:\n log.info('Cleaning Facescrub features')\n emb_array = clean_facescrub(f_output_filenames, emb_array, args.noises_list)\n else:\n log.info('Megaface features are not cleaned up.')\n\n log.info('Saving features to files...')\n for i in tqdm(range(len(f_output_filenames))):\n save_mat(f_output_filenames[i], emb_array[i, :])\n\n\ndef parse_argument(argv):\n parser = argparse.ArgumentParser(description='Save embeddings to MegaFace features files')\n parser.add_argument('--model', choices=models_backbones.keys(), type=str, default='rmnet', help='Model type.')\n parser.add_argument('input_dir', help='Path to MegaFace Features')\n parser.add_argument('output_dir', help='Path to FaceScrub Features')\n parser.add_argument('--input_list', default='list.txt', type=str, required=False)\n parser.add_argument('--batch_size', type=int, default=128)\n parser.add_argument('--embedding_size', type=int, default=128)\n parser.add_argument('--devices', type=int, nargs='+', default=[0], help='CUDA devices to use.')\n parser.add_argument('--snap', type=str, required=True, help='Snapshot to evaluate.')\n parser.add_argument('--noises_list', type=str, default='', required=False, help='A list of the Megaface or Facescrub noises produced by insightface. \\\n See https://github.com/deepinsight/insightface/blob/master/src/megaface/README.md')\n parser.add_argument('--file_ending', help='Ending appended to original photo files. i.e.\\\n 11084833664_0.jpg_LBP_100x100.bin => _LBP_100x100.bin', default='_rmnet.bin')\n parser.add_argument('--trillion_format', action='store_true')\n return parser.parse_args(argv)\n\nif __name__ == '__main__':\n main(parse_argument(sys.argv[1:]))\n" ]
[ [ "torch.utils.data.DataLoader", "numpy.linalg.norm", "torch.no_grad", "torch.cuda.device", "numpy.random.uniform", "torch.nn.DataParallel", "numpy.zeros" ] ]
zchen088/Cirq
[ "8cf782554adbafed724987de3067de7ca565fa0c" ]
[ "cirq/sim/simulator.py" ]
[ "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Abstract base classes for different types of simulators.\n\nSimulator types include:\n\n SimulatesSamples: mimics the interface of quantum hardware.\n\n SimulatesAmplitudes: computes amplitudes of desired bitstrings in the\n final state of the simulation.\n\n SimulatesFinalState: allows access to the final state of the simulation.\n\n SimulatesIntermediateState: allows for access to the state of the simulation\n as the simulation iterates through the moments of a cirq.\n\"\"\"\n\nfrom typing import (\n Any,\n Dict,\n Iterator,\n List,\n Sequence,\n Tuple,\n Union,\n Optional,\n TYPE_CHECKING,\n Set,\n cast,\n Callable,\n TypeVar,\n Generic,\n)\n\nimport abc\nimport collections\n\nimport numpy as np\n\nfrom cirq import circuits, ops, protocols, study, value, work\nfrom cirq._compat import deprecated\n\nif TYPE_CHECKING:\n import cirq\n\n\nTStepResult = TypeVar('TStepResult', bound='StepResult')\nTSimulationTrialResult = TypeVar('TSimulationTrialResult', bound='SimulationTrialResult')\nTSimulatorState = TypeVar('TSimulatorState')\n\n\nclass SimulatesSamples(work.Sampler, metaclass=abc.ABCMeta):\n \"\"\"Simulator that mimics running on quantum hardware.\n\n Implementors of this interface should implement the _run method.\n \"\"\"\n\n def run_sweep(\n self,\n program: 'cirq.Circuit',\n params: study.Sweepable,\n repetitions: int = 1,\n ) -> List[study.Result]:\n \"\"\"Runs the supplied Circuit, mimicking quantum hardware.\n\n In contrast to run, this allows for sweeping over different parameter\n values.\n\n Args:\n program: The circuit to simulate.\n params: Parameters to run with the program.\n repetitions: The number of repetitions to simulate.\n\n Returns:\n Result list for this run; one for each possible parameter\n resolver.\n \"\"\"\n if not program.has_measurements():\n raise ValueError(\"Circuit has no measurements to sample.\")\n\n _verify_unique_measurement_keys(program)\n\n trial_results = [] # type: List[study.Result]\n for param_resolver in study.to_resolvers(params):\n measurements = {}\n if repetitions == 0:\n for _, op, _ in program.findall_operations_with_gate_type(ops.MeasurementGate):\n measurements[protocols.measurement_key(op)] = np.empty([0, 1])\n else:\n measurements = self._run(\n circuit=program, param_resolver=param_resolver, repetitions=repetitions\n )\n trial_results.append(\n study.Result.from_single_parameter_set(\n params=param_resolver, measurements=measurements\n )\n )\n return trial_results\n\n @abc.abstractmethod\n def _run(\n self, circuit: circuits.Circuit, param_resolver: study.ParamResolver, repetitions: int\n ) -> Dict[str, np.ndarray]:\n \"\"\"Run a simulation, mimicking quantum hardware.\n\n Args:\n circuit: The circuit to simulate.\n param_resolver: Parameters to run with the program.\n repetitions: Number of times to repeat the run. It is expected that\n this is validated greater than zero before calling this method.\n\n Returns:\n A dictionary from measurement gate key to measurement\n results. Measurement results are stored in a 2-dimensional\n numpy array, the first dimension corresponding to the repetition\n and the second to the actual boolean measurement results (ordered\n by the qubits being measured.)\n \"\"\"\n raise NotImplementedError()\n\n\nclass SimulatesAmplitudes(metaclass=abc.ABCMeta):\n \"\"\"Simulator that computes final amplitudes of given bitstrings.\n\n Given a circuit and a list of bitstrings, computes the amplitudes\n of the given bitstrings in the state obtained by applying the circuit\n to the all zeros state. Implementors of this interface should implement\n the compute_amplitudes_sweep method.\n \"\"\"\n\n def compute_amplitudes(\n self,\n program: 'cirq.Circuit',\n bitstrings: Sequence[int],\n param_resolver: 'study.ParamResolverOrSimilarType' = None,\n qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT,\n ) -> Sequence[complex]:\n \"\"\"Computes the desired amplitudes.\n\n The initial state is assumed to be the all zeros state.\n\n Args:\n program: The circuit to simulate.\n bitstrings: The bitstrings whose amplitudes are desired, input\n as an integer array where each integer is formed from measured\n qubit values according to `qubit_order` from most to least\n significant qubit, i.e. in big-endian ordering.\n param_resolver: Parameters to run with the program.\n qubit_order: Determines the canonical ordering of the qubits. This\n is often used in specifying the initial state, i.e. the\n ordering of the computational basis states.\n\n Returns:\n List of amplitudes.\n \"\"\"\n return self.compute_amplitudes_sweep(\n program, bitstrings, study.ParamResolver(param_resolver), qubit_order\n )[0]\n\n @abc.abstractmethod\n def compute_amplitudes_sweep(\n self,\n program: 'cirq.Circuit',\n bitstrings: Sequence[int],\n params: study.Sweepable,\n qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT,\n ) -> Sequence[Sequence[complex]]:\n \"\"\"Computes the desired amplitudes.\n\n The initial state is assumed to be the all zeros state.\n\n Args:\n program: The circuit to simulate.\n bitstrings: The bitstrings whose amplitudes are desired, input\n as an integer array where each integer is formed from measured\n qubit values according to `qubit_order` from most to least\n significant qubit, i.e. in big-endian ordering.\n params: Parameters to run with the program.\n qubit_order: Determines the canonical ordering of the qubits. This\n is often used in specifying the initial state, i.e. the\n ordering of the computational basis states.\n\n Returns:\n List of lists of amplitudes. The outer dimension indexes the\n circuit parameters and the inner dimension indexes the bitstrings.\n \"\"\"\n raise NotImplementedError()\n\n\nclass SimulatesExpectationValues(metaclass=abc.ABCMeta):\n \"\"\"Simulator that computes exact expectation values of observables.\n\n Given a circuit and an observable map, computes exact (to float precision)\n expectation values for each observable at the end of the circuit.\n\n Implementors of this interface should implement the\n simulate_expectation_values_sweep method.\n \"\"\"\n\n def simulate_expectation_values(\n self,\n program: 'cirq.Circuit',\n observables: Union['cirq.PauliSumLike', List['cirq.PauliSumLike']],\n param_resolver: 'study.ParamResolverOrSimilarType' = None,\n qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT,\n initial_state: Any = None,\n permit_terminal_measurements: bool = False,\n ) -> List[float]:\n \"\"\"Simulates the supplied circuit and calculates exact expectation\n values for the given observables on its final state.\n\n This method has no perfect analogy in hardware. Instead compare with\n Sampler.sample_expectation_values, which calculates estimated\n expectation values by sampling multiple times.\n\n Args:\n program: The circuit to simulate.\n observables: An observable or list of observables.\n param_resolver: Parameters to run with the program.\n qubit_order: Determines the canonical ordering of the qubits. This\n is often used in specifying the initial state, i.e. the\n ordering of the computational basis states.\n initial_state: The initial state for the simulation. The form of\n this state depends on the simulation implementation. See\n documentation of the implementing class for details.\n permit_terminal_measurements: If the provided circuit ends with\n measurement(s), this method will generate an error unless this\n is set to True. This is meant to prevent measurements from\n ruining expectation value calculations.\n\n Returns:\n A list of expectation values, with the value at index `n`\n corresponding to `observables[n]` from the input.\n\n Raises:\n ValueError if 'program' has terminal measurement(s) and\n 'permit_terminal_measurements' is False.\n \"\"\"\n return self.simulate_expectation_values_sweep(\n program,\n observables,\n study.ParamResolver(param_resolver),\n qubit_order,\n initial_state,\n permit_terminal_measurements,\n )[0]\n\n @abc.abstractmethod\n def simulate_expectation_values_sweep(\n self,\n program: 'cirq.Circuit',\n observables: Union['cirq.PauliSumLike', List['cirq.PauliSumLike']],\n params: 'study.Sweepable',\n qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT,\n initial_state: Any = None,\n permit_terminal_measurements: bool = False,\n ) -> List[List[float]]:\n \"\"\"Simulates the supplied circuit and calculates exact expectation\n values for the given observables on its final state, sweeping over the\n given params.\n\n This method has no perfect analogy in hardware. Instead compare with\n Sampler.sample_expectation_values, which calculates estimated\n expectation values by sampling multiple times.\n\n Args:\n program: The circuit to simulate.\n observables: An observable or list of observables.\n params: Parameters to run with the program.\n qubit_order: Determines the canonical ordering of the qubits. This\n is often used in specifying the initial state, i.e. the\n ordering of the computational basis states.\n initial_state: The initial state for the simulation. The form of\n this state depends on the simulation implementation. See\n documentation of the implementing class for details.\n permit_terminal_measurements: If the provided circuit ends in a\n measurement, this method will generate an error unless this\n is set to True. This is meant to prevent measurements from\n ruining expectation value calculations.\n\n Returns:\n A list of expectation-value lists. The outer index determines the\n sweep, and the inner index determines the observable. For instance,\n results[1][3] would select the fourth observable measured in the\n second sweep.\n\n Raises:\n ValueError if 'program' has terminal measurement(s) and\n 'permit_terminal_measurements' is False.\n \"\"\"\n\n\nclass SimulatesFinalState(Generic[TSimulationTrialResult], metaclass=abc.ABCMeta):\n \"\"\"Simulator that allows access to the simulator's final state.\n\n Implementors of this interface should implement the simulate_sweep\n method. This simulator only returns the state of the quantum system\n for the final step of a simulation. This simulator state may be a state\n vector, the density matrix, or another representation, depending on the\n implementation. For simulators that also allow stepping through\n a circuit see `SimulatesIntermediateState`.\n \"\"\"\n\n def simulate(\n self,\n program: 'cirq.Circuit',\n param_resolver: 'study.ParamResolverOrSimilarType' = None,\n qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT,\n initial_state: Any = None,\n ) -> TSimulationTrialResult:\n \"\"\"Simulates the supplied Circuit.\n\n This method returns a result which allows access to the entire\n simulator's final state.\n\n Args:\n program: The circuit to simulate.\n param_resolver: Parameters to run with the program.\n qubit_order: Determines the canonical ordering of the qubits. This\n is often used in specifying the initial state, i.e. the\n ordering of the computational basis states.\n initial_state: The initial state for the simulation. The form of\n this state depends on the simulation implementation. See\n documentation of the implementing class for details.\n\n Returns:\n SimulationTrialResults for the simulation. Includes the final state.\n \"\"\"\n return self.simulate_sweep(\n program, study.ParamResolver(param_resolver), qubit_order, initial_state\n )[0]\n\n @abc.abstractmethod\n def simulate_sweep(\n self,\n program: 'cirq.Circuit',\n params: study.Sweepable,\n qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT,\n initial_state: Any = None,\n ) -> List[TSimulationTrialResult]:\n \"\"\"Simulates the supplied Circuit.\n\n This method returns a result which allows access to the entire final\n simulator state. In contrast to simulate, this allows for sweeping\n over different parameter values.\n\n Args:\n program: The circuit to simulate.\n params: Parameters to run with the program.\n qubit_order: Determines the canonical ordering of the qubits. This\n is often used in specifying the initial state, i.e. the\n ordering of the computational basis states.\n initial_state: The initial state for the simulation. The form of\n this state depends on the simulation implementation. See\n documentation of the implementing class for details.\n\n Returns:\n List of SimulationTrialResults for this run, one for each\n possible parameter resolver.\n \"\"\"\n raise NotImplementedError()\n\n\nclass SimulatesIntermediateState(\n Generic[TStepResult, TSimulationTrialResult, TSimulatorState],\n SimulatesFinalState[TSimulationTrialResult],\n metaclass=abc.ABCMeta,\n):\n \"\"\"A SimulatesFinalState that simulates a circuit by moments.\n\n Whereas a general SimulatesFinalState may return the entire simulator\n state at the end of a circuit, a SimulatesIntermediateState can\n simulate stepping through the moments of a circuit.\n\n Implementors of this interface should implement the _base_iterator\n method.\n\n Note that state here refers to simulator state, which is not necessarily\n a state vector.\n \"\"\"\n\n def simulate_sweep(\n self,\n program: 'cirq.Circuit',\n params: study.Sweepable,\n qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT,\n initial_state: Any = None,\n ) -> List[TSimulationTrialResult]:\n \"\"\"Simulates the supplied Circuit.\n\n This method returns a result which allows access to the entire\n state vector. In contrast to simulate, this allows for sweeping\n over different parameter values.\n\n Args:\n program: The circuit to simulate.\n params: Parameters to run with the program.\n qubit_order: Determines the canonical ordering of the qubits. This\n is often used in specifying the initial state, i.e. the\n ordering of the computational basis states.\n initial_state: The initial state for the simulation. The form of\n this state depends on the simulation implementation. See\n documentation of the implementing class for details.\n\n Returns:\n List of SimulationTrialResults for this run, one for each\n possible parameter resolver.\n \"\"\"\n trial_results = []\n qubit_order = ops.QubitOrder.as_qubit_order(qubit_order)\n for param_resolver in study.to_resolvers(params):\n all_step_results = self.simulate_moment_steps(\n program, param_resolver, qubit_order, initial_state\n )\n measurements = {} # type: Dict[str, np.ndarray]\n for step_result in all_step_results:\n for k, v in step_result.measurements.items():\n measurements[k] = np.array(v, dtype=np.uint8)\n trial_results.append(\n self._create_simulator_trial_result(\n params=param_resolver,\n measurements=measurements,\n final_simulator_state=step_result._simulator_state(),\n )\n )\n return trial_results\n\n def simulate_moment_steps(\n self,\n circuit: circuits.Circuit,\n param_resolver: 'study.ParamResolverOrSimilarType' = None,\n qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT,\n initial_state: Any = None,\n ) -> Iterator[TStepResult]:\n \"\"\"Returns an iterator of StepResults for each moment simulated.\n\n If the circuit being simulated is empty, a single step result should\n be returned with the state being set to the initial state.\n\n Args:\n circuit: The Circuit to simulate.\n param_resolver: A ParamResolver for determining values of Symbols.\n qubit_order: Determines the canonical ordering of the qubits. This\n is often used in specifying the initial state, i.e. the\n ordering of the computational basis states.\n initial_state: The initial state for the simulation. The form of\n this state depends on the simulation implementation. See\n documentation of the implementing class for details.\n\n Returns:\n Iterator that steps through the simulation, simulating each\n moment and returning a StepResult for each moment.\n \"\"\"\n param_resolver = study.ParamResolver(param_resolver)\n resolved_circuit = protocols.resolve_parameters(circuit, param_resolver)\n check_all_resolved(resolved_circuit)\n actual_initial_state = 0 if initial_state is None else initial_state\n return self._base_iterator(resolved_circuit, qubit_order, actual_initial_state)\n\n @deprecated(deadline='v0.11', fix='Override _base_iterator instead')\n def _simulator_iterator(\n self,\n circuit: circuits.Circuit,\n param_resolver: study.ParamResolver,\n qubit_order: ops.QubitOrderOrList,\n initial_state: Any,\n ) -> Iterator[TStepResult]:\n \"\"\"Iterator over StepResult from Moments of a Circuit.\n\n If the initial state is an int, the state is set to the computational\n basis state corresponding to this state. Otherwise if the initial\n state is a np.ndarray it is the full initial state, either a pure state\n or the full density matrix. If it is the pure state it must be the\n correct size, be normalized (an L2 norm of 1), and be safely castable\n to an appropriate dtype for the simulator. If it is a mixed state\n it must be correctly sized and positive semidefinite with trace one.\n\n Args:\n circuit: The circuit to simulate.\n param_resolver: A ParamResolver for determining values of\n Symbols.\n qubit_order: Determines the canonical ordering of the qubits. This\n is often used in specifying the initial state, i.e. the\n ordering of the computational basis states.\n initial_state: The initial state for the simulation. The form of\n this state depends on the simulation implementation. See\n documentation of the implementing class for details.\n\n Yields:\n StepResults from simulating a Moment of the Circuit.\n \"\"\"\n return self.simulate_moment_steps(circuit, param_resolver, qubit_order, initial_state)\n\n @abc.abstractmethod\n def _base_iterator(\n self,\n circuit: circuits.Circuit,\n qubit_order: ops.QubitOrderOrList,\n initial_state: Any,\n ) -> Iterator[TStepResult]:\n \"\"\"Iterator over StepResult from Moments of a Circuit.\n\n Args:\n circuit: The circuit to simulate.\n param_resolver: A ParamResolver for determining values of\n Symbols.\n qubit_order: Determines the canonical ordering of the qubits. This\n is often used in specifying the initial state, i.e. the\n ordering of the computational basis states.\n initial_state: The initial state for the simulation. The form of\n this state depends on the simulation implementation. See\n documentation of the implementing class for details.\n\n Yields:\n StepResults from simulating a Moment of the Circuit.\n \"\"\"\n raise NotImplementedError()\n\n @abc.abstractmethod\n def _create_simulator_trial_result(\n self,\n params: study.ParamResolver,\n measurements: Dict[str, np.ndarray],\n final_simulator_state: TSimulatorState,\n ) -> TSimulationTrialResult:\n \"\"\"This method can be implemented to create a trial result.\n\n Args:\n params: The ParamResolver for this trial.\n measurements: The measurement results for this trial.\n final_simulator_state: The final state of the simulator for the\n StepResult.\n\n Returns:\n The SimulationTrialResult.\n \"\"\"\n raise NotImplementedError()\n\n\nclass StepResult(Generic[TSimulatorState], metaclass=abc.ABCMeta):\n \"\"\"Results of a step of a SimulatesIntermediateState.\n\n Attributes:\n measurements: A dictionary from measurement gate key to measurement\n results, ordered by the qubits that the measurement operates on.\n \"\"\"\n\n def __init__(self, measurements: Optional[Dict[str, List[int]]] = None) -> None:\n self.measurements = measurements or collections.defaultdict(list)\n\n @abc.abstractmethod\n def _simulator_state(self) -> TSimulatorState:\n \"\"\"Returns the simulator state of the simulator after this step.\n\n This method starts with an underscore to indicate that it is private.\n To access public state, see public methods on StepResult.\n\n The form of the simulator_state depends on the implementation of the\n simulation,see documentation for the implementing class for the form of\n details.\n \"\"\"\n\n @abc.abstractmethod\n def sample(\n self,\n qubits: List[ops.Qid],\n repetitions: int = 1,\n seed: 'cirq.RANDOM_STATE_OR_SEED_LIKE' = None,\n ) -> np.ndarray:\n \"\"\"Samples from the system at this point in the computation.\n\n Note that this does not collapse the state vector.\n\n Args:\n qubits: The qubits to be sampled in an order that influence the\n returned measurement results.\n repetitions: The number of samples to take.\n seed: A seed for the pseudorandom number generator.\n\n Returns:\n Measurement results with True corresponding to the ``|1⟩`` state.\n The outer list is for repetitions, and the inner corresponds to\n measurements ordered by the supplied qubits. These lists\n are wrapped as an numpy ndarray.\n \"\"\"\n raise NotImplementedError()\n\n def sample_measurement_ops(\n self,\n measurement_ops: List[ops.GateOperation],\n repetitions: int = 1,\n seed: 'cirq.RANDOM_STATE_OR_SEED_LIKE' = None,\n ) -> Dict[str, np.ndarray]:\n \"\"\"Samples from the system at this point in the computation.\n\n Note that this does not collapse the state vector.\n\n In contrast to `sample` which samples qubits, this takes a list of\n `cirq.GateOperation` instances whose gates are `cirq.MeasurementGate`\n instances and then returns a mapping from the key in the measurement\n gate to the resulting bit strings. Different measurement operations must\n not act on the same qubits.\n\n Args:\n measurement_ops: `GateOperation` instances whose gates are\n `MeasurementGate` instances to be sampled form.\n repetitions: The number of samples to take.\n seed: A seed for the pseudorandom number generator.\n\n Returns: A dictionary from measurement gate key to measurement\n results. Measurement results are stored in a 2-dimensional\n numpy array, the first dimension corresponding to the repetition\n and the second to the actual boolean measurement results (ordered\n by the qubits being measured.)\n\n Raises:\n ValueError: If the operation's gates are not `MeasurementGate`\n instances or a qubit is acted upon multiple times by different\n operations from `measurement_ops`.\n \"\"\"\n\n # Sanity checks.\n seen_measurement_keys: Set[str] = set()\n for op in measurement_ops:\n gate = op.gate\n if not isinstance(gate, ops.MeasurementGate):\n raise ValueError(f'{op.gate} was not a MeasurementGate')\n key = protocols.measurement_key(gate)\n if key in seen_measurement_keys:\n raise ValueError(f'Duplicate MeasurementGate with key {key}')\n seen_measurement_keys.add(key)\n\n # Find measured qubits, ensuring a consistent ordering.\n measured_qubits = []\n seen_qubits: Set[cirq.Qid] = set()\n for op in measurement_ops:\n for q in op.qubits:\n if q not in seen_qubits:\n seen_qubits.add(q)\n measured_qubits.append(q)\n\n # Perform whole-system sampling of the measured qubits.\n indexed_sample = self.sample(measured_qubits, repetitions, seed=seed)\n\n # Extract results for each measurement.\n results: Dict[str, np.ndarray] = {}\n qubits_to_index = {q: i for i, q in enumerate(measured_qubits)}\n for op in measurement_ops:\n gate = cast(ops.MeasurementGate, op.gate)\n out = np.zeros(shape=(repetitions, len(op.qubits)), dtype=np.int8)\n inv_mask = gate.full_invert_mask()\n for i, q in enumerate(op.qubits):\n out[:, i] = indexed_sample[:, qubits_to_index[q]]\n if inv_mask[i]:\n out[:, i] ^= out[:, i] < 2\n results[gate.key] = out\n\n return results\n\n\[email protected]_equality(unhashable=True)\nclass SimulationTrialResult:\n \"\"\"Results of a simulation by a SimulatesFinalState.\n\n Unlike Result these results contain the final simulator_state of the\n system. This simulator_state is dependent on the simulation implementation\n and may be, for example, the state vector or the density matrix of the\n system.\n\n Attributes:\n params: A ParamResolver of settings used for this result.\n measurements: A dictionary from measurement gate key to measurement\n results. Measurement results are a numpy ndarray of actual boolean\n measurement results (ordered by the qubits acted on by the\n measurement gate.)\n \"\"\"\n\n def __init__(\n self,\n params: study.ParamResolver,\n measurements: Dict[str, np.ndarray],\n final_simulator_state: Any,\n ) -> None:\n self.params = params\n self.measurements = measurements\n self._final_simulator_state = final_simulator_state\n\n def __repr__(self) -> str:\n return (\n f'cirq.SimulationTrialResult(params={self.params!r}, '\n f'measurements={self.measurements!r}, '\n f'final_simulator_state={self._final_simulator_state!r})'\n )\n\n def __str__(self) -> str:\n def bitstring(vals):\n separator = ' ' if np.max(vals) >= 10 else ''\n return separator.join(str(int(v)) for v in vals)\n\n results = sorted([(key, bitstring(val)) for key, val in self.measurements.items()])\n if not results:\n return '(no measurements)'\n return ' '.join([f'{key}={val}' for key, val in results])\n\n def _repr_pretty_(self, p: Any, cycle: bool) -> None:\n \"\"\"Text output in Jupyter.\"\"\"\n if cycle:\n # There should never be a cycle. This is just in case.\n p.text('SimulationTrialResult(...)')\n else:\n p.text(str(self))\n\n def _value_equality_values_(self) -> Any:\n measurements = {k: v.tolist() for k, v in sorted(self.measurements.items())}\n return (self.params, measurements, self._final_simulator_state)\n\n @property\n def qubit_map(self) -> Dict[ops.Qid, int]:\n \"\"\"A map from Qid to index used to define the ordering of the basis in\n the result.\n \"\"\"\n return self._final_simulator_state.qubit_map\n\n def _qid_shape_(self) -> Tuple[int, ...]:\n return _qubit_map_to_shape(self.qubit_map)\n\n\ndef _qubit_map_to_shape(qubit_map: Dict[ops.Qid, int]) -> Tuple[int, ...]:\n qid_shape: List[int] = [-1] * len(qubit_map)\n try:\n for q, i in qubit_map.items():\n qid_shape[i] = q.dimension\n except IndexError:\n raise ValueError(f'Invalid qubit_map. Qubit index out of bounds. Map is <{qubit_map!r}>.')\n if -1 in qid_shape:\n raise ValueError(f'Invalid qubit_map. Duplicate qubit index. Map is <{qubit_map!r}>.')\n return tuple(qid_shape)\n\n\ndef _verify_unique_measurement_keys(circuit: circuits.Circuit):\n result = collections.Counter(\n key for op in ops.flatten_op_tree(iter(circuit)) for key in protocols.measurement_keys(op)\n )\n if result:\n duplicates = [k for k, v in result.most_common() if v > 1]\n if duplicates:\n raise ValueError(f\"Measurement key {','.join(duplicates)} repeated\")\n\n\ndef check_all_resolved(circuit):\n \"\"\"Raises if the circuit contains unresolved symbols.\"\"\"\n if protocols.is_parameterized(circuit):\n unresolved = [op for moment in circuit for op in moment if protocols.is_parameterized(op)]\n raise ValueError(\n 'Circuit contains ops whose symbols were not specified in '\n 'parameter sweep. Ops: {}'.format(unresolved)\n )\n\n\ndef split_into_matching_protocol_then_general(\n circuit: 'cirq.Circuit',\n predicate: Callable[['cirq.Operation'], bool],\n) -> Tuple['cirq.Circuit', 'cirq.Circuit']:\n \"\"\"Splits the circuit into a matching prefix and non-matching suffix.\n\n The splitting happens in a per-qubit fashion. A non-matching operation on\n qubit A will cause later operations on A to be part of the non-matching\n suffix, but later operations on other qubits will continue to be put into\n the matching part (as long as those qubits have had no non-matching operation\n up to that point).\n \"\"\"\n blocked_qubits: Set[cirq.Qid] = set()\n matching_prefix = circuits.Circuit()\n general_suffix = circuits.Circuit()\n for moment in circuit:\n matching_part = []\n general_part = []\n for op in moment:\n qs = set(op.qubits)\n if not predicate(op) or not qs.isdisjoint(blocked_qubits):\n blocked_qubits |= qs\n\n if qs.isdisjoint(blocked_qubits):\n matching_part.append(op)\n else:\n general_part.append(op)\n if matching_part:\n matching_prefix.append(ops.Moment(matching_part))\n if general_part:\n general_suffix.append(ops.Moment(general_part))\n return matching_prefix, general_suffix\n" ]
[ [ "numpy.max", "numpy.array", "numpy.empty" ] ]
xdralex/pioneer
[ "1fb9ea947d1b1cc2eb1f27bc4e8a7f206019b607", "1fb9ea947d1b1cc2eb1f27bc4e8a7f206019b607" ]
[ "pioneer/temp/mujoco_test.py", "pioneer/launch/pioneer_knm_train.py" ]
[ "import mujoco_py\nimport numpy as np\nfrom gym import spaces\n\nmodel = mujoco_py.load_model_from_path('pioneer/envs/assets/pioneer2.xml')\nsim = mujoco_py.MjSim(model)\n\nprint(f'timestep: {model.opt.timestep}')\n\nbounds = model.jnt_range.copy().astype(np.float32)\nlow, high = bounds.T\nposition_space = spaces.Box(low=low, high=high, dtype=np.float32)\nprint(f'bounds: {bounds}')\n\nprint(f'nq={model.nq}, nv={model.nv}')\n\na0 = sim.get_state()\nprint(f'qpos={a0.qpos}, nv={a0.qvel}')\n\na1 = mujoco_py.MjSimState(a0.time, a0.qpos, [0.2, -0.2], a0.act, a0.udd_state)\nsim.set_state(a1)\n\nsim.step()\nsim.forward()\n\nprint(sim.data.qpos.flat[:])\nprint(sim.data.qvel.flat[:2])\n\nexit(0)\n\n#\n# print(position_space.sample())\n#\n# sim.step()\n#\n# print(f\"{sim.data.get_body_xpos('pointer')}\")\n#\n# a0 = sim.get_state()\n# print(a0)\n#\n# a1 = mujoco_py.MjSimState(a0.time, -1.0, 0.0, a0.act, a0.udd_state)\n# print(a1)\n# sim.set_state(a1)\n#\n# bounds = model.actuator_ctrlrange.copy().astype(np.float32)\n# print(bounds)\n# print(sim.data.ctrl)\n#\n# # sim.data.ctrl[:] = [10.0]\n#\n# sim.step()\n# sim.forward()\n\n# a1 = mujoco_py.MjSimState(a0.time, 0.0, 1.0, a0.act, a0.udd_state)\n# sim.set_state(a1)\n#\n# sim.step()\n# sim.forward()\n#\n\nviewer = mujoco_py.mjviewer.MjViewer(sim)\n\nDEFAULT_CAMERA_CONFIG = {\n 'trackbodyid': 0,\n 'distance': 20.0,\n 'lookat': np.array((0.0, 0.0, 0.0)),\n 'elevation': -35.0,\n 'azimuth': 135.0\n}\n\nfor key, value in DEFAULT_CAMERA_CONFIG.items():\n if isinstance(value, np.ndarray):\n getattr(viewer.cam, key)[:] = value\n else:\n setattr(viewer.cam, key, value)\n\nwhile True:\n sim.step()\n viewer.render()\n # print(f'{sim.get_state()} - {sim.data.get_body_xpos(\"pointer\")}')\n", "from typing import Any, Dict\n\nimport ray\nfrom gym.wrappers import TimeLimit\nfrom ray import tune\nfrom ray.tune.registry import register_env\nimport pandas as pd\nimport numpy as np\n\nfrom pioneer.envs.pioneer import PioneerKinematicConfig\nfrom pioneer.envs.pioneer import PioneerKinematicEnv\n\n\ndef train(results_dir: str,\n checkpoint_freq: int,\n num_samples: int,\n num_workers: int,\n monitor: bool) -> pd.DataFrame:\n\n def prepare_env(env_config: Dict[str, Any]):\n pioneer_config = PioneerKinematicConfig(\n award_potential_slope=float(env_config['award_potential_slope']),\n award_done=float(env_config['award_done']),\n penalty_step=float(env_config['penalty_step']),\n )\n pioneer_env = PioneerKinematicEnv(pioneer_config=pioneer_config)\n return TimeLimit(pioneer_env, max_episode_steps=500)\n\n register_env('Pioneer-v1', prepare_env)\n ray.init(webui_host='0.0.0.0')\n\n def entropy_coeff_schedule(min_start_entropy: float,\n max_start_entropy: float,\n decay_steps: int,\n base: float = 10):\n\n logmin = np.log(min_start_entropy) / np.log(base)\n logmax = np.log(max_start_entropy) / np.log(base)\n\n x = base ** (np.random.uniform(logmin, logmax))\n return [(0, x), (decay_steps, 0)]\n\n results = tune.run('PPO',\n num_samples=num_samples,\n config={\n 'env': 'Pioneer-v1',\n 'framework': 'torch',\n 'num_gpus': 0,\n 'num_workers': num_workers,\n 'log_level': 'INFO',\n 'monitor': monitor,\n\n 'env_config': {\n 'award_potential_slope': 10.0,\n 'award_done': 5.0,\n 'penalty_step': 1 / 100\n },\n\n 'model': {\n 'fcnet_hiddens': [256, 256]\n },\n 'train_batch_size': 8000,\n 'entropy_coeff_schedule': tune.sample_from(lambda _: entropy_coeff_schedule(1e-3, 1e-1, 1000000)),\n 'lr': 2e-5,\n 'num_sgd_iter': 20,\n 'observation_filter': 'ConcurrentMeanStdFilter'\n },\n stop={\n \"training_iteration\": 1000\n },\n local_dir=results_dir,\n checkpoint_freq=checkpoint_freq,\n checkpoint_at_end=True)\n\n ray.shutdown()\n return results.dataframe()\n\n\nif __name__ == '__main__':\n train(results_dir='~/ray_results',\n checkpoint_freq=10,\n num_samples=1,\n num_workers=4,\n monitor=True)\n" ]
[ [ "numpy.array" ], [ "numpy.random.uniform", "numpy.log" ] ]
Zac-hills/d3m-primitives
[ "1829fc98042dddfcbee3cfbbb8cb75dd452f1e8d" ]
[ "kf_d3m_primitives/natural_language_processing/sent2vec/sent2vec.py" ]
[ "import os.path\nfrom typing import Sequence, Optional, Dict\n\nimport numpy as np\nimport pandas as pd\nfrom nk_sent2vec import Sent2Vec as _Sent2Vec\nfrom d3m import container, utils\nfrom d3m.primitive_interfaces.transformer import TransformerPrimitiveBase\nfrom d3m.primitive_interfaces.base import CallResult\nfrom d3m.container import DataFrame as d3m_DataFrame\nfrom d3m.metadata import hyperparams, base as metadata_base, params\n\n__author__ = \"Distil\"\n__version__ = \"1.3.0\"\n__contact__ = \"mailto:[email protected]\"\n\nInputs = container.pandas.DataFrame\nOutputs = container.pandas.DataFrame\n\n\nclass Hyperparams(hyperparams.Hyperparams):\n use_columns = hyperparams.Set(\n elements=hyperparams.Hyperparameter[int](-1),\n default=(),\n semantic_types=[\n \"https://metadata.datadrivendiscovery.org/types/ControlParameter\"\n ],\n description=\"A set of column indices to force primitive to operate on. If any specified \\\n column cannot be parsed, it is skipped.\",\n )\n\n\nclass Sent2VecPrimitive(TransformerPrimitiveBase[Inputs, Outputs, Hyperparams]):\n \"\"\"\n This primitive produces numerical representations of text data using a model\n that was pre-trained on English Twitter bi-grams.\n \"\"\"\n\n metadata = metadata_base.PrimitiveMetadata(\n {\n \"id\": \"cf450079-9333-4a3f-aed4-b77a4e8c7be7\",\n \"version\": __version__,\n \"name\": \"sent2vec_wrapper\",\n \"keywords\": [\"Sent2Vec\", \"Embedding\", \"NLP\", \"Natural Language Processing\"],\n \"source\": {\n \"name\": __author__,\n \"contact\": __contact__,\n \"uris\": [\"https://github.com/kungfuai/d3m-primitives\"],\n },\n \"installation\": [\n {\"type\": \"PIP\", \"package\": \"cython\", \"version\": \"0.29.16\"},\n {\n \"type\": metadata_base.PrimitiveInstallationType.PIP,\n \"package_uri\": \"git+https://github.com/kungfuai/d3m-primitives.git@{git_commit}#egg=kf-d3m-primitives\".format(\n git_commit=utils.current_git_commit(os.path.dirname(__file__)),\n ),\n },\n {\n \"type\": \"FILE\",\n \"key\": \"sent2vec_model\",\n \"file_uri\": \"http://public.datadrivendiscovery.org/twitter_bigrams.bin\",\n \"file_digest\": \"9e8ccfea2aaa4435ca61b05b11b60e1a096648d56fff76df984709339f423dd6\",\n },\n ],\n \"python_path\": \"d3m.primitives.feature_extraction.nk_sent2vec.Sent2Vec\",\n \"algorithm_types\": [metadata_base.PrimitiveAlgorithmType.VECTORIZATION],\n \"primitive_family\": metadata_base.PrimitiveFamily.FEATURE_EXTRACTION,\n }\n )\n\n # class instance to avoid unnecessary re-init on subsequent produce calls\n _vectorizer: Optional[_Sent2Vec] = None\n\n def __init__(\n self,\n *,\n hyperparams: Hyperparams,\n random_seed: int = 0,\n volumes: Dict[str, str] = None\n ) -> None:\n super().__init__(\n hyperparams=hyperparams, random_seed=random_seed, volumes=volumes\n )\n\n self.volumes = volumes\n\n def produce(\n self, *, inputs: Inputs, timeout: float = None, iterations: int = None\n ) -> CallResult[Outputs]:\n \"\"\"\n Produce numerical representations (features) for short texts or sentences.\n\n Parameters\n ----------\n inputs: D3M dataframe\n\n Returns\n -------\n Outputs: Input D3M dataframe with vector components appended as additional columns\n \"\"\"\n\n # figure out columns to operate on\n cols = self._get_operating_columns(\n inputs, self.hyperparams[\"use_columns\"], (\"http://schema.org/Text\",)\n )\n frame = inputs.iloc[:, cols]\n outputs = inputs.copy()\n\n try:\n # lazy load the model and keep it around for subsequent produce calls\n if Sent2VecPrimitive._vectorizer is None:\n Sent2VecPrimitive._vectorizer = _Sent2Vec(\n path=self.volumes[\"sent2vec_model\"]\n )\n\n output_vectors = []\n for col in range(frame.shape[1]):\n text = frame.iloc[:, col].tolist()\n embedded_sentences = Sent2VecPrimitive._vectorizer.embed_sentences(\n sentences=text\n )\n output_vectors.append(embedded_sentences)\n embedded_df = pd.DataFrame(\n np.array(output_vectors).reshape(len(embedded_sentences), -1)\n )\n except ValueError:\n # just return inputs with file names deleted if vectorizing fails\n return CallResult(outputs)\n\n # create df with vectorized columns and append to input df\n embedded_df = d3m_DataFrame(embedded_df)\n for col in range(embedded_df.shape[1]):\n col_dict = dict(\n embedded_df.metadata.query((metadata_base.ALL_ELEMENTS, col))\n )\n col_dict[\"structural_type\"] = type(1.0)\n col_dict[\"name\"] = \"vector_\" + str(col)\n col_dict[\"semantic_types\"] = (\n \"http://schema.org/Float\",\n \"https://metadata.datadrivendiscovery.org/types/Attribute\",\n )\n embedded_df.metadata = embedded_df.metadata.update(\n (metadata_base.ALL_ELEMENTS, col), col_dict\n )\n df_dict = dict(embedded_df.metadata.query((metadata_base.ALL_ELEMENTS,)))\n df_dict_1 = dict(embedded_df.metadata.query((metadata_base.ALL_ELEMENTS,)))\n df_dict[\"dimension\"] = df_dict_1\n df_dict_1[\"name\"] = \"columns\"\n df_dict_1[\"semantic_types\"] = (\n \"https://metadata.datadrivendiscovery.org/types/TabularColumn\",\n )\n df_dict_1[\"length\"] = embedded_df.shape[1]\n embedded_df.metadata = embedded_df.metadata.update(\n (metadata_base.ALL_ELEMENTS,), df_dict\n )\n return CallResult(outputs.append_columns(embedded_df))\n\n @classmethod\n def _get_operating_columns(\n cls,\n inputs: container.DataFrame,\n use_columns: Sequence[int],\n semantic_types: Sequence[str],\n require_attribute: bool = True,\n ) -> Sequence[int]:\n # use caller supplied columns if supplied\n cols = set(use_columns)\n type_cols = set(\n inputs.metadata.list_columns_with_semantic_types(semantic_types)\n )\n if require_attribute:\n attributes = set(\n inputs.metadata.list_columns_with_semantic_types(\n (\"https://metadata.datadrivendiscovery.org/types/Attribute\",)\n )\n )\n type_cols = type_cols & attributes\n\n if len(cols) > 0:\n cols = type_cols & cols\n else:\n cols = type_cols\n return list(cols)" ]
[ [ "numpy.array" ] ]
YannickWehr/trax
[ "67dda3b236339a7f6de803a3f84a9e92d0f0442c" ]
[ "trax/rl/actor_critic.py" ]
[ "# coding=utf-8\n# Copyright 2020 The Trax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Classes for RL training in Trax.\"\"\"\n\nimport functools\nimport os\n\nimport gym\nimport numpy as np\nimport tensorflow as tf\n\nfrom trax import data\nfrom trax import fastmath\nfrom trax import layers as tl\nfrom trax import shapes\nfrom trax import supervised\nfrom trax.fastmath import numpy as jnp\nfrom trax.rl import advantages as rl_advantages\nfrom trax.rl import training as rl_training\nfrom trax.supervised import lr_schedules as lr\n\n\nclass ActorCriticAgent(rl_training.PolicyAgent):\n \"\"\"Trains policy and value models using actor-critic methods.\n\n Attrs:\n on_policy (bool): Whether the algorithm is on-policy. Used in the data\n generators. Should be set in derived classes.\n \"\"\"\n\n on_policy = None\n\n def __init__(self, task,\n value_model=None,\n value_optimizer=None,\n value_lr_schedule=lr.multifactor,\n value_batch_size=64,\n value_train_steps_per_epoch=500,\n value_evals_per_epoch=1,\n value_eval_steps=1,\n n_shared_layers=0,\n added_policy_slice_length=0,\n n_replay_epochs=1,\n scale_value_targets=False,\n q_value=False,\n q_value_aggregate_max=True,\n q_value_n_samples=1,\n **kwargs): # Arguments of PolicyAgent come here.\n \"\"\"Configures the actor-critic trainer.\n\n Args:\n task: `RLTask` instance to use.\n value_model: Model to use for the value function.\n value_optimizer: Optimizer to train the value model.\n value_lr_schedule: lr schedule for value model training.\n value_batch_size: Batch size for value model training.\n value_train_steps_per_epoch: Number of steps are we using to train the\n value model in each epoch.\n value_evals_per_epoch: Number of value trainer evaluations per RL epoch;\n only affects metric reporting.\n value_eval_steps: Number of value trainer steps per evaluation; only\n affects metric reporting.\n n_shared_layers: Number of layers to share between value and policy\n models.\n added_policy_slice_length: How much longer should slices of\n trajectories be for policy than for value training; this\n is useful for TD calculations and only affect the length\n of elements produced for policy batches; value batches\n have maximum length set by `max_slice_length` in `**kwargs`.\n n_replay_epochs: Number of last epochs to take into the replay buffer;\n only makes sense for off-policy algorithms.\n scale_value_targets: If `True`, scale value function targets by\n `1 / (1 - gamma)`.\n q_value: If `True`, use Q-values as baselines.\n q_value_aggregate_max: If `True`, aggregate Q-values with max (or mean).\n q_value_n_samples: Number of samples to average over when calculating\n baselines based on Q-values.\n **kwargs: Arguments for `PolicyAgent` superclass.\n \"\"\"\n self._n_shared_layers = n_shared_layers\n self._value_batch_size = value_batch_size\n self._value_train_steps_per_epoch = value_train_steps_per_epoch\n self._value_evals_per_epoch = value_evals_per_epoch\n self._value_eval_steps = value_eval_steps\n\n # The 2 below will be initalized in super.__init__ anyway, but are needed\n # to construct value batches which are needed before PolicyAgent init\n # since policy input creation calls the value model -- hence this code.\n self._task = task\n self._max_slice_length = kwargs.get('max_slice_length', 1)\n self._added_policy_slice_length = added_policy_slice_length\n self._n_replay_epochs = n_replay_epochs\n task.set_n_replay_epochs(n_replay_epochs)\n\n if scale_value_targets:\n self._value_network_scale = 1 / (1 - self._task.gamma)\n else:\n self._value_network_scale = 1\n\n self._q_value = q_value\n self._q_value_aggregate_max = q_value_aggregate_max\n self._q_value_n_samples = q_value_n_samples\n\n is_discrete = isinstance(self._task.action_space, gym.spaces.Discrete)\n self._is_discrete = is_discrete\n self._vocab_size = None\n self._sample_all_discrete_actions = False\n if q_value and is_discrete:\n self._vocab_size = self.task.action_space.n\n # TODO(lukaszkaiser): the code below is specific to AWR, move it.\n # If n_samples = n_actions, we'll take them all in actor and reweight.\n if self._q_value_n_samples == self._vocab_size:\n # TODO(lukaszkaiser): set this explicitly once it's in AWR Trainer.\n self._sample_all_discrete_actions = True\n\n if q_value:\n value_model = functools.partial(value_model,\n inject_actions=True,\n is_discrete=is_discrete,\n vocab_size=self._vocab_size)\n self._value_eval_model = value_model(mode='eval')\n self._value_eval_model.init(self._value_model_signature)\n self._value_eval_jit = tl.jit_forward(\n self._value_eval_model.pure_fn, fastmath.device_count(), do_mean=False)\n\n # Initialize policy training.\n super().__init__(task, **kwargs)\n\n # Initialize training of the value function.\n value_output_dir = kwargs.get('output_dir', None)\n if value_output_dir is not None:\n value_output_dir = os.path.join(value_output_dir, 'value')\n # If needed, create value_output_dir and missing parent directories.\n if not tf.io.gfile.isdir(value_output_dir):\n tf.io.gfile.makedirs(value_output_dir)\n self._value_inputs = data.inputs.Inputs(\n train_stream=lambda _: self.value_batches_stream())\n self._value_trainer = supervised.Trainer(\n model=value_model,\n optimizer=value_optimizer,\n lr_schedule=value_lr_schedule(),\n loss_fn=tl.L2Loss(),\n inputs=self._value_inputs,\n output_dir=value_output_dir,\n metrics={'value_loss': tl.L2Loss()})\n\n @property\n def _value_model_signature(self):\n obs_sig = shapes.signature(self._task.observation_space)\n target_sig = mask_sig = shapes.ShapeDtype(\n shape=(1, 1, 1),\n )\n inputs_sig = (obs_sig.replace(shape=(1, 1) + obs_sig.shape),)\n if self._q_value:\n act_sig = shapes.signature(self._task.action_space)\n inputs_sig += (act_sig.replace(shape=(1, 1) + act_sig.shape),)\n return (*inputs_sig, target_sig, mask_sig)\n\n @property\n def _replay_epochs(self):\n if self.on_policy:\n assert self._n_replay_epochs == 1, (\n 'Non-unit replay buffer size only makes sense for off-policy '\n 'algorithms.'\n )\n return [-(ep + 1) for ep in range(self._n_replay_epochs)]\n\n def _run_value_model(self, observations, dist_inputs):\n if dist_inputs is None:\n dist_inputs = jnp.zeros(\n observations.shape[:2] + (self._policy_dist.n_inputs,)\n )\n\n actions = None\n if self._q_value:\n if self._sample_all_discrete_actions:\n # Since we want to sample all actions, start by creating their list.\n act = np.arange(self._vocab_size)\n # Now act is a vector [0, ..., vocab_size-1], but we'll need to tile it.\n # Add extra dimenstions so it's the same dimensionality as dist_inputs.\n act = jnp.reshape(act, [-1] + [1] * (len(dist_inputs.shape) - 1))\n # Now act is [vocab_size, 1, ..., 1], dimensionality of dist_inputs.\n dist_inputs = jnp.broadcast_to(\n dist_inputs, (self._q_value_n_samples,) + dist_inputs.shape)\n if self._sample_all_discrete_actions:\n actions = act + jnp.zeros(dist_inputs.shape[:-1], dtype=jnp.int32)\n actions = jnp.swapaxes(actions, 0, 1)\n # Swapping the n_samples and batch_size axes, so the input is split\n # between accelerators along the batch_size axis.\n dist_inputs = jnp.swapaxes(dist_inputs, 0, 1)\n if not self._sample_all_discrete_actions:\n actions = self._policy_dist.sample(dist_inputs)\n log_probs = self._policy_dist.log_prob(dist_inputs, actions)\n obs = observations\n obs = jnp.reshape(obs, [obs.shape[0], 1] + list(obs.shape[1:]))\n inputs = (obs, actions)\n else:\n log_probs = None\n inputs = (observations,)\n\n n_devices = fastmath.device_count()\n weights = tl.for_n_devices(self._value_eval_model.weights, n_devices)\n state = tl.for_n_devices(self._value_eval_model.state, n_devices)\n rng = self._value_eval_model.rng\n values, _ = self._value_eval_jit(inputs, weights, state, rng)\n values *= self._value_network_scale\n values = jnp.squeeze(values, axis=-1) # Remove the singleton depth dim.\n return (values, actions, log_probs)\n\n def _aggregate_values(self, values, aggregate_max, act_log_probs):\n if self._q_value:\n if aggregate_max:\n values = jnp.max(values, axis=1)\n elif self._sample_all_discrete_actions:\n values = jnp.sum(values * jnp.exp(act_log_probs), axis=1)\n else:\n values = jnp.mean(values, axis=1)\n return np.array(values) # Move the values to CPU.\n\n def value_batches_stream(self):\n \"\"\"Use the RLTask self._task to create inputs to the value model.\"\"\"\n max_slice_length = self._max_slice_length + self._added_policy_slice_length\n for np_trajectory in self._task.trajectory_batch_stream(\n self._value_batch_size,\n max_slice_length=max_slice_length,\n min_slice_length=(1 + self._added_policy_slice_length),\n margin=self._added_policy_slice_length,\n epochs=self._replay_epochs,\n ):\n (values, _, act_log_probs) = self._run_value_model(\n np_trajectory.observations, np_trajectory.dist_inputs\n )\n values = self._aggregate_values(\n values, self._q_value_aggregate_max, act_log_probs)\n\n # TODO(pkozakowski): Add some shape assertions and docs.\n # Calculate targets based on the advantages over the target network - this\n # allows TD learning for value networks.\n advantages = self._advantage_estimator(\n rewards=np_trajectory.rewards,\n returns=np_trajectory.returns,\n values=values,\n dones=np_trajectory.dones,\n gamma=self._task.gamma,\n n_extra_steps=self._added_policy_slice_length,\n )\n length = advantages.shape[1]\n values = values[:, :length]\n target_returns = values + advantages\n\n inputs = (np_trajectory.observations[:, :length],)\n if self._q_value:\n inputs += (np_trajectory.actions[:, :length],)\n\n # Insert an extra depth dimension, so the target shape is consistent with\n # the network output shape.\n yield (\n # Inputs: observations and maybe actions.\n *inputs,\n # Targets: computed returns.\n target_returns[:, :, None] / self._value_network_scale,\n # Mask to zero-out padding.\n np_trajectory.mask[:, :length, None],\n )\n\n def policy_inputs(self, trajectory, values):\n \"\"\"Create inputs to policy model from a TrajectoryNp and values.\n\n Args:\n trajectory: a TrajectoryNp, the trajectory to create inputs from\n values: a numpy array: value function computed on trajectory\n\n Returns:\n a tuple of numpy arrays of the form (inputs, x1, x2, ...) that will be\n passed to the policy model; policy model will compute outputs from\n inputs and (outputs, x1, x2, ...) will be passed to self.policy_loss\n which should be overridden accordingly.\n \"\"\"\n return NotImplementedError\n\n def policy_batches_stream(self):\n \"\"\"Use the RLTask self._task to create inputs to the policy model.\"\"\"\n # Maximum slice length for policy is max_slice_len + the added policy len.\n max_slice_length = self._max_slice_length + self._added_policy_slice_length\n for np_trajectory in self._task.trajectory_batch_stream(\n self._policy_batch_size,\n epochs=self._replay_epochs,\n max_slice_length=max_slice_length,\n margin=self._added_policy_slice_length,\n include_final_state=False):\n (values, _, act_log_probs) = self._run_value_model(\n np_trajectory.observations, np_trajectory.dist_inputs)\n values = self._aggregate_values(values, False, act_log_probs)\n if len(values.shape) != 2:\n raise ValueError('Values are expected to have shape ' +\n '[batch_size, length], got: %s' % str(values.shape))\n if values.shape[0] != self._policy_batch_size:\n raise ValueError('Values first dimension should = policy batch size, ' +\n '%d != %d' %(values.shape[0], self._policy_batch_size))\n yield self.policy_inputs(np_trajectory, values)\n\n def train_epoch(self):\n \"\"\"Trains RL for one epoch.\"\"\"\n # Copy policy state accumulated during data collection to the trainer.\n self._policy_trainer.model_state = self._policy_collect_model.state\n\n # Copy policy weights and state to value trainer.\n if self._n_shared_layers > 0:\n _copy_model_weights_and_state(\n 0, self._n_shared_layers, self._policy_trainer, self._value_trainer\n )\n\n # Update the target value network.\n self._value_eval_model.weights = self._value_trainer.model_weights\n self._value_eval_model.state = self._value_trainer.model_state\n\n n_value_evals = rl_training.remaining_evals(\n self._value_trainer.step,\n self._epoch,\n self._value_train_steps_per_epoch,\n self._value_evals_per_epoch)\n for _ in range(n_value_evals):\n self._value_trainer.train_epoch(\n self._value_train_steps_per_epoch // self._value_evals_per_epoch,\n self._value_eval_steps,\n )\n\n # Copy value weights and state to policy trainer.\n if self._n_shared_layers > 0:\n _copy_model_weights_and_state(\n 0, self._n_shared_layers, self._value_trainer, self._policy_trainer\n )\n n_policy_evals = rl_training.remaining_evals(\n self._policy_trainer.step,\n self._epoch,\n self._policy_train_steps_per_epoch,\n self._policy_evals_per_epoch)\n # Check if there was a restart after value training finishes and policy not.\n stopped_after_value = (n_value_evals == 0 and\n n_policy_evals < self._policy_evals_per_epoch)\n should_copy_weights = self._n_shared_layers > 0 and not stopped_after_value\n if should_copy_weights:\n _copy_model_weights_and_state(\n 0, self._n_shared_layers, self._value_trainer, self._policy_trainer\n )\n\n # Update the target value network.\n self._value_eval_model.weights = self._value_trainer.model_weights\n self._value_eval_model.state = self._value_trainer.model_state\n\n for _ in range(n_policy_evals):\n self._policy_trainer.train_epoch(\n self._policy_train_steps_per_epoch // self._policy_evals_per_epoch,\n self._policy_eval_steps,\n )\n\n def close(self):\n self._value_trainer.close()\n super().close()\n\n\ndef _copy_model_weights_and_state( # pylint: disable=invalid-name\n start, end, from_trainer, to_trainer, copy_optimizer_slots=False\n):\n \"\"\"Copy model weights[start:end] from from_trainer to to_trainer.\"\"\"\n from_weights = from_trainer.model_weights\n to_weights = list(to_trainer.model_weights)\n shared_weights = from_weights[start:end]\n to_weights[start:end] = shared_weights\n to_trainer.model_weights = to_weights\n\n from_state = from_trainer.model_state\n to_state = list(to_trainer.model_state)\n shared_state = from_state[start:end]\n to_state[start:end] = shared_state\n to_trainer.model_state = to_state\n\n if copy_optimizer_slots:\n # TODO(lukaszkaiser): make a nicer API in Trainer to support this.\n # Currently we use the hack below. Note [0] since that's the model w/o loss.\n # pylint: disable=protected-access\n from_slots = from_trainer._opt_state.slots[0][start:end]\n to_slots = to_trainer._opt_state.slots[0]\n # The lines below do to_slots[start:end] = from_slots, but on tuples.\n new_slots = to_slots[:start] + from_slots[start:end] + to_slots[end:]\n new_slots = tuple([new_slots] + list(to_trainer._opt_state.slots[1:]))\n to_trainer._opt_state = to_trainer._opt_state._replace(slots=new_slots)\n # pylint: enable=protected-access\n\n\n### Implementations of common actor-critic algorithms.\n\n\nclass AdvantageBasedActorCriticAgent(ActorCriticAgent):\n \"\"\"Base class for advantage-based actor-critic algorithms.\"\"\"\n\n def __init__(\n self,\n task,\n advantage_estimator=rl_advantages.td_lambda,\n advantage_normalization=True,\n advantage_normalization_epsilon=1e-5,\n **kwargs\n ):\n self._advantage_estimator = advantage_estimator\n self._advantage_normalization = advantage_normalization\n self._advantage_normalization_epsilon = advantage_normalization_epsilon\n super().__init__(task, **kwargs)\n\n def policy_inputs(self, trajectory, values):\n \"\"\"Create inputs to policy model from a TrajectoryNp and values.\"\"\"\n # How much TD to use is determined by the added policy slice length,\n # as the policy batches need to be this much longer to calculate TD.\n advantages = self._advantage_estimator(\n rewards=trajectory.rewards,\n returns=trajectory.returns,\n values=values,\n dones=trajectory.dones,\n gamma=self._task.gamma,\n n_extra_steps=self._added_policy_slice_length,\n )\n # Observations should be the same length as advantages - so if we are\n # using n_extra_steps, we need to trim the length to match.\n obs = trajectory.observations[:, :advantages.shape[1]]\n act = trajectory.actions[:, :advantages.shape[1]]\n mask = trajectory.mask[:, :advantages.shape[1]] # Mask to zero-out padding.\n if trajectory.dist_inputs is not None:\n dist_inputs = trajectory.dist_inputs[:, :advantages.shape[1]]\n else:\n dist_inputs = jnp.zeros(advantages.shape + (self._policy_dist.n_inputs,))\n # Shape checks to help debugging.\n if len(advantages.shape) != 2:\n raise ValueError('Advantages are expected to have shape ' +\n '[batch_size, length], got: %s' % str(advantages.shape))\n if act.shape[0:2] != advantages.shape:\n raise ValueError('First 2 dimensions of actions should be the same as in '\n 'advantages, %s != %s' % (act.shape[0:2],\n advantages.shape))\n if obs.shape[0:2] != advantages.shape:\n raise ValueError('First 2 dimensions of observations should be the same '\n 'as in advantages, %s != %s' % (obs.shape[0:2],\n advantages.shape))\n if dist_inputs.shape[:2] != advantages.shape:\n raise ValueError('First 2 dimensions of dist_inputs should be the same '\n 'as in advantages, %s != %s' % (dist_inputs.shape[:2],\n advantages.shape))\n if mask.shape != advantages.shape:\n raise ValueError('Mask and advantages shapes should be the same'\n ', %s != %s' % (mask.shape, advantages.shape))\n return (obs, act, advantages, dist_inputs, mask)\n\n @property\n def policy_loss_given_log_probs(self):\n \"\"\"Policy loss given action log-probabilities.\"\"\"\n raise NotImplementedError\n\n def _preprocess_advantages(self, advantages):\n if self._advantage_normalization:\n advantages = (\n (advantages - jnp.mean(advantages)) /\n (jnp.std(advantages) + self._advantage_normalization_epsilon)\n )\n return advantages\n\n @property\n def policy_loss(self, **unused_kwargs):\n \"\"\"Policy loss.\"\"\"\n def LossInput(dist_inputs, actions, advantages, old_dist_inputs): # pylint: disable=invalid-name\n \"\"\"Calculates action log probabilities and normalizes advantages.\"\"\"\n advantages = self._preprocess_advantages(advantages)\n log_probs = self._policy_dist.log_prob(dist_inputs, actions)\n old_log_probs = self._policy_dist.log_prob(old_dist_inputs, actions)\n return (log_probs, advantages, old_log_probs)\n\n return tl.Serial(\n tl.Fn('LossInput', LossInput, n_out=3),\n # Policy loss is expected to consume\n # (log_probs, advantages, old_log_probs, mask).\n self.policy_loss_given_log_probs,\n )\n\n @property\n def policy_metrics(self):\n metrics = super().policy_metrics\n metrics.update({\n 'advantage_mean': self.advantage_mean,\n 'advantage_std': self.advantage_std,\n })\n return metrics\n\n @property\n def advantage_mean(self):\n return tl.Serial([\n # (dist_inputs, advantages, old_dist_inputs, mask)\n tl.Select([1]), # Select just the advantages.\n tl.Fn('AdvantageMean', lambda x: jnp.mean(x)), # pylint: disable=unnecessary-lambda\n ])\n\n @property\n def advantage_std(self):\n return tl.Serial([\n # (dist_inputs, advantages, old_dist_inputs, mask)\n tl.Select([1]), # Select just the advantages.\n tl.Fn('AdvantageStd', lambda x: jnp.std(x)), # pylint: disable=unnecessary-lambda\n ])\n\n\nclass A2C(AdvantageBasedActorCriticAgent):\n \"\"\"Trains policy and value models using the A2C algortithm.\"\"\"\n\n on_policy = True\n\n def __init__(self, task, entropy_coeff=0.01, **kwargs):\n \"\"\"Configures the A2C Trainer.\"\"\"\n self._entropy_coeff = entropy_coeff\n super().__init__(task, **kwargs)\n\n @property\n def policy_loss_given_log_probs(self):\n \"\"\"Definition of the Advantage Actor Critic (A2C) loss.\"\"\"\n # A2C is one of the most basic actor-critic RL algorithms.\n # TODO(henrykm) re-factor f into rl_layers and finally share code between\n # actor_critic.py and actor_critic_joint.py - requires change of inputs\n # in actor_critic_joint.py from dist_inputs to log_probs.\n def f(log_probs, advantages, old_log_probs, mask):\n del old_log_probs # Not used in A2C.\n # log_probs of the shape float32[128,1]\n # advantages of the shape int32[128,1]\n # mask of the shape int32[128,1]\n if log_probs.shape != advantages.shape:\n raise ValueError('New log-probs and advantages shapes '\n 'should be the same, %s != %s' % (log_probs.shape,\n advantages.shape))\n if log_probs.shape != mask.shape:\n raise ValueError('New log-probs and mask shapes should be the same'\n ', %s != %s' % (log_probs.shape, mask.shape))\n\n a2c_objective = -jnp.sum(log_probs * advantages * mask) / jnp.sum(mask)\n\n entropy_vec = self._policy_dist.entropy(log_probs) * self._entropy_coeff\n entropy_loss = jnp.mean(entropy_vec)\n\n combined_loss = a2c_objective - entropy_loss\n\n return combined_loss\n\n return tl.Fn('A2CLoss', f)\n\n\nclass PPO(AdvantageBasedActorCriticAgent):\n \"\"\"The Proximal Policy Optimization Algorithm aka PPO.\n\n Trains policy and value models using the PPO algortithm.\n \"\"\"\n\n on_policy = True\n\n def __init__(self, task, epsilon=0.2, entropy_coeff=0.01, **kwargs):\n \"\"\"Configures the PPO Trainer.\"\"\"\n self._entropy_coeff = entropy_coeff\n self._epsilon = epsilon\n super().__init__(task, **kwargs)\n\n @property\n def policy_loss_given_log_probs(self):\n \"\"\"Definition of the Proximal Policy Optimization loss.\"\"\"\n def f(new_log_probs, advantages, old_log_probs, mask):\n # new_log_probs of the shape float32[128,1]\n # advantages of the shape int32[128,1]\n # old_log_probs of the shape int32[128,1]\n # mask of the shape int32[128,1]\n if new_log_probs.shape != advantages.shape:\n raise ValueError('New log-probs and advantages shapes '\n 'should be the same, %s != %s' % (new_log_probs.shape,\n advantages.shape))\n if new_log_probs.shape != old_log_probs.shape:\n raise ValueError('New log-probs and old log-probs shapes '\n 'should be the same, %s != %s' % (new_log_probs.shape,\n old_log_probs.shape))\n if new_log_probs.shape != mask.shape:\n raise ValueError('New log-probs and mask shapes should be the same'\n ', %s != %s' % (new_log_probs.shape, mask.shape))\n\n # The ratio between new_probs and old_probs expressed\n # using log_probs and exponentaion\n probs_ratio = jnp.exp(new_log_probs - old_log_probs)\n if advantages.shape != probs_ratio.shape:\n raise ValueError('New log-probs and old log probs shapes '\n 'should be the same, %s != %s' % (advantages.shape,\n probs_ratio.shape))\n unclipped_objective = probs_ratio * advantages\n clipped_objective = jnp.clip(probs_ratio,\n 1 - self._epsilon,\n 1 + self._epsilon) * advantages\n\n if unclipped_objective.shape != probs_ratio.shape:\n raise ValueError('unclipped_objective and clipped_objective shapes '\n 'should be the same, %s != %s' % (\n unclipped_objective.shape,\n clipped_objective.shape))\n\n ppo_objective = jnp.minimum(unclipped_objective, clipped_objective)\n\n if ppo_objective.shape != mask.shape:\n raise ValueError('ppo_objective and mask shapes '\n 'should be the same, %s != %s' % (\n ppo_objective.shape,\n mask.shape))\n\n ppo_loss = -jnp.sum(ppo_objective * mask) / jnp.sum(mask)\n entropy_vec = self._policy_dist.entropy(\n new_log_probs) * self._entropy_coeff\n entropy_loss = jnp.mean(entropy_vec)\n combined_loss = ppo_loss - entropy_loss\n\n return combined_loss\n return tl.Fn('PPOLoss', f)\n\n\n# AWR is an off-policy actor-critic RL algorithm.\ndef awr_weights(advantages, beta):\n return jnp.exp(advantages / beta)\n\n\n# Helper functions for computing AWR metrics.\ndef awr_metrics(beta, preprocess_layer=None):\n return { # pylint: disable=g-complex-comprehension\n 'awr_weight_' + name: awr_weight_stat(name, fn, beta, preprocess_layer)\n for (name, fn) in [\n ('mean', jnp.mean),\n ('std', jnp.std),\n ('min', jnp.min),\n ('max', jnp.max),\n ]\n }\n\n\ndef awr_weight_stat(stat_name, stat_fn, beta, preprocess_layer):\n # Select just the advantages if preprocess layer is not given.\n preprocess = tl.Select([1]) if preprocess_layer is None else preprocess_layer\n return tl.Serial([\n preprocess,\n tl.Fn(\n 'AWRWeight' + stat_name.capitalize(),\n lambda x: stat_fn(awr_weights(x, beta)),\n ),\n ])\n\n\ndef AWRLoss(beta, w_max): # pylint: disable=invalid-name\n \"\"\"Definition of the Advantage Weighted Regression (AWR) loss.\"\"\"\n def f(log_probs, advantages, old_log_probs, mask):\n del old_log_probs # Not used in AWR.\n weights = jnp.minimum(awr_weights(advantages, beta), w_max)\n return -jnp.sum(log_probs * weights * mask) / jnp.sum(mask)\n return tl.Fn('AWRLoss', f)\n\n\nclass AWR(AdvantageBasedActorCriticAgent):\n \"\"\"Trains policy and value models using AWR.\"\"\"\n\n on_policy = False\n\n def __init__(self, task, beta=1.0, w_max=20.0, **kwargs):\n \"\"\"Configures the AWR Trainer.\"\"\"\n self._beta = beta\n self._w_max = w_max\n super().__init__(task, **kwargs)\n\n @property\n def policy_loss_given_log_probs(self):\n \"\"\"Policy loss.\"\"\"\n return AWRLoss(beta=self._beta, w_max=self._w_max) # pylint: disable=no-value-for-parameter\n\n @property\n def policy_metrics(self):\n metrics = super().policy_metrics\n metrics.update(awr_metrics(self._beta))\n return metrics\n\n\ndef SamplingAWRLoss(beta, w_max, reweight=False, sampled_all_discrete=False): # pylint: disable=invalid-name\n \"\"\"Definition of the Advantage Weighted Regression (AWR) loss.\"\"\"\n def f(log_probs, advantages, old_log_probs, mask):\n if reweight: # Use new policy weights for sampled actions instead.\n mask *= jnp.exp(fastmath.stop_gradient(log_probs) - old_log_probs)\n if sampled_all_discrete: # Actions were sampled uniformly; weight them.\n mask *= jnp.exp(old_log_probs)\n weights = jnp.minimum(awr_weights(advantages, beta), w_max)\n return -jnp.sum(log_probs * weights * mask) / jnp.sum(mask)\n return tl.Fn('SamplingAWRLoss', f)\n\n\nclass SamplingAWR(AdvantageBasedActorCriticAgent):\n \"\"\"Trains policy and value models using Sampling AWR.\"\"\"\n\n on_policy = False\n\n def __init__(self, task, beta=1.0, w_max=20.0, reweight=False, **kwargs):\n \"\"\"Configures the AWR Trainer.\"\"\"\n self._beta = beta\n self._w_max = w_max\n self._reweight = reweight\n super().__init__(task, q_value=True, **kwargs)\n\n def _policy_inputs_to_advantages(self, preprocess):\n \"\"\"A layer that computes advantages from policy inputs.\"\"\"\n def fn(dist_inputs, actions, q_values, act_log_probs, mask):\n del dist_inputs, actions, mask\n q_values = jnp.swapaxes(q_values, 0, 1)\n act_log_probs = jnp.swapaxes(act_log_probs, 0, 1)\n if self._sample_all_discrete_actions:\n values = jnp.sum(q_values * jnp.exp(act_log_probs), axis=0)\n else:\n values = jnp.mean(q_values, axis=0)\n advantages = q_values - values # Broadcasting values over n_samples\n if preprocess:\n advantages = self._preprocess_advantages(advantages)\n return advantages\n return tl.Fn('PolicyInputsToAdvantages', fn)\n\n @property\n def policy_metrics(self):\n metrics = {\n 'policy_loss': self.policy_loss,\n 'advantage_mean': tl.Serial(\n self._policy_inputs_to_advantages(False),\n tl.Fn('Mean', lambda x: jnp.mean(x)) # pylint: disable=unnecessary-lambda\n ),\n 'advantage_std': tl.Serial(\n self._policy_inputs_to_advantages(False),\n tl.Fn('Std', lambda x: jnp.std(x)) # pylint: disable=unnecessary-lambda\n )\n }\n metrics.update(awr_metrics(\n self._beta, preprocess_layer=self._policy_inputs_to_advantages(True)))\n return metrics\n\n @property\n def policy_loss(self, **unused_kwargs):\n \"\"\"Policy loss.\"\"\"\n def LossInput(dist_inputs, actions, q_values, act_log_probs, mask): # pylint: disable=invalid-name\n \"\"\"Calculates action log probabilities and normalizes advantages.\"\"\"\n # (batch_size, n_samples, ...) -> (n_samples, batch_size, ...)\n q_values = jnp.swapaxes(q_values, 0, 1)\n mask = jnp.swapaxes(mask, 0, 1)\n actions = jnp.swapaxes(actions, 0, 1)\n act_log_probs = jnp.swapaxes(act_log_probs, 0, 1)\n\n # TODO(pkozakowski,lukaszkaiser): Try max here, or reweighting?\n if self._sample_all_discrete_actions:\n values = jnp.sum(q_values * jnp.exp(act_log_probs), axis=0)\n else:\n values = jnp.mean(q_values, axis=0)\n advantages = q_values - values # Broadcasting values over n_samples\n advantages = self._preprocess_advantages(advantages)\n\n # Broadcast inputs and calculate log-probs\n dist_inputs = jnp.broadcast_to(\n dist_inputs, (self._q_value_n_samples,) + dist_inputs.shape)\n log_probs = self._policy_dist.log_prob(dist_inputs, actions)\n return (log_probs, advantages, act_log_probs, mask)\n\n return tl.Serial(\n tl.Fn('LossInput', LossInput, n_out=4),\n # Policy loss is expected to consume\n # (log_probs, advantages, old_log_probs, mask).\n SamplingAWRLoss(\n beta=self._beta, w_max=self._w_max, reweight=self._reweight,\n sampled_all_discrete=self._sample_all_discrete_actions)\n )\n\n def policy_batches_stream(self):\n \"\"\"Use the RLTask self._task to create inputs to the policy model.\"\"\"\n # For now TD-0 estimation of the value. TODO(pkozakowski): Support others?\n for np_trajectory in self._task.trajectory_batch_stream(\n self._policy_batch_size,\n epochs=self._replay_epochs,\n max_slice_length=self._max_slice_length,\n include_final_state=False,\n ):\n (q_values, actions, act_log_probs) = self._run_value_model(\n np_trajectory.observations, np_trajectory.dist_inputs)\n shapes.assert_same_shape(q_values, act_log_probs)\n\n # q_values shape: (batch_size, n_samples, length)\n if len(q_values.shape) != 3:\n raise ValueError('Q-values are expected to have shape [batch_size, ' +\n 'n_samples, length], got: %s' % str(q_values.shape))\n if q_values.shape[1] != self._q_value_n_samples:\n raise ValueError('Q-values dimension 1 should = n_samples, %d != %d'\n % (q_values.shape[1], self._q_value_n_samples))\n if q_values.shape[0] != self._policy_batch_size:\n raise ValueError('Q-values dimension 0 should = policy batch size, ' +\n '%d!=%d' %(q_values.shape[1], self._policy_batch_size))\n\n mask = np_trajectory.mask\n mask = np.reshape(mask, [mask.shape[0], 1] + list(mask.shape[1:]))\n mask = jnp.broadcast_to(mask, q_values.shape)\n shapes.assert_same_shape(mask, q_values)\n yield (np_trajectory.observations, actions, q_values, act_log_probs, mask)\n" ]
[ [ "numpy.arange", "tensorflow.io.gfile.isdir", "numpy.array", "tensorflow.io.gfile.makedirs" ] ]
brandongk-ubco/autoalbument
[ "1735ea4376694c2179ac62ce7d100a10b26f2558", "1735ea4376694c2179ac62ce7d100a10b26f2558" ]
[ "tests/test_albumentations_pytorch.py", "autoalbument/faster_autoaugment/models/faa_model.py" ]
[ "import albumentations.augmentations.functional as F\nimport pytest\nimport torch\nfrom torch.autograd import gradcheck\n\nimport autoalbument.albumentations_pytorch.functional as PF\nfrom tests.utils import assert_batches_match\n\n\nclass Base:\n def scalar_to_tensor(self, arg, requires_grad=False, dtype=torch.float32):\n if arg is None:\n return None\n return torch.tensor(arg, requires_grad=requires_grad, dtype=dtype)\n\n def test_albumentations_match(self, image_batches, arg):\n np_images, pytorch_batch = image_batches\n tensor_arg = self.scalar_to_tensor(arg)\n augmented_np_images = [self.albumentations_fn(image, arg) for image in np_images]\n augmented_pytorch_batch = self.albumentations_pytorch_fn(pytorch_batch, tensor_arg)\n assert_batches_match(augmented_np_images, augmented_pytorch_batch)\n\n def test_gradients(self, gradcheck_batch, arg):\n tensor_arg = self.scalar_to_tensor(arg, requires_grad=True, dtype=torch.float64)\n gradcheck(self.albumentations_pytorch_fn, (gradcheck_batch, tensor_arg))\n\n def albumentations_fn(self, image, arg):\n raise NotImplementedError\n\n def albumentations_pytorch_fn(self, pytorch_batch, arg):\n raise NotImplementedError\n\n\[email protected](\"arg\", [0.2, 0.4, 0.8])\nclass TestSolarize(Base):\n def albumentations_fn(self, image, arg):\n return F.solarize(image, threshold=arg)\n\n def albumentations_pytorch_fn(self, pytorch_batch, arg):\n return PF.solarize(pytorch_batch, threshold=arg)\n\n def test_gradients(self, gradcheck_batch, arg):\n pass\n\n\[email protected](\"arg\", [[0.0, 0.0, 0.0], [1.0, 1.0, 1.0], [-1.0, -1.0, -1.0], [0.0, 0.7, -0.2]])\nclass TestShiftRgb(Base):\n def albumentations_fn(self, image, arg):\n return F.shift_rgb(image, r_shift=arg[0], g_shift=arg[1], b_shift=arg[2])\n\n def albumentations_pytorch_fn(self, pytorch_batch, arg):\n return PF.shift_rgb(pytorch_batch, r_shift=arg[0], g_shift=arg[1], b_shift=arg[2])\n\n\[email protected](\"arg\", [-1.0, 0.1, 0.5, 1.0])\nclass TestBrightnessAdjust(Base):\n def albumentations_fn(self, image, arg):\n return F.brightness_contrast_adjust(image, beta=arg, beta_by_max=True)\n\n def albumentations_pytorch_fn(self, pytorch_batch, arg):\n return PF.brightness_adjust(pytorch_batch, beta=arg)\n\n\[email protected](\"arg\", [-1.0, 0.1, 0.5, 1.0])\nclass TestContrastAdjust(Base):\n def albumentations_fn(self, image, arg):\n return F.brightness_contrast_adjust(image, alpha=arg)\n\n def albumentations_pytorch_fn(self, pytorch_batch, arg):\n return PF.contrast_adjust(pytorch_batch, alpha=arg)\n\n\[email protected](\"arg\", [None])\nclass TestVflip(Base):\n def albumentations_fn(self, image, arg):\n return F.vflip(image)\n\n def albumentations_pytorch_fn(self, pytorch_batch, arg):\n return PF.vflip(pytorch_batch)\n\n def test_gradients(self, gradcheck_batch, arg):\n pass\n\n\[email protected](\"arg\", [None])\nclass TestHflip(Base):\n def albumentations_fn(self, image, arg):\n return F.hflip(image)\n\n def albumentations_pytorch_fn(self, pytorch_batch, arg):\n return PF.hflip(pytorch_batch)\n\n def test_gradients(self, gradcheck_batch, arg):\n pass\n\n\[email protected](\n \"arg\",\n [\n [0.01],\n [-0.5],\n [0.5],\n [1.0 - 1e-6],\n [-1.0 + 1e-6],\n ],\n)\nclass TestShiftX(Base):\n def albumentations_pytorch_fn(self, pytorch_batch, arg):\n return PF.shift_x(pytorch_batch, dx=arg)\n\n def test_albumentations_match(self, image_batches, arg):\n pass\n\n\[email protected](\n \"arg\",\n [\n [0.01],\n [-0.5],\n [0.5],\n [1.0 - 1e-6],\n [-1.0 + 1e-6],\n ],\n)\nclass TestShiftY(Base):\n def albumentations_pytorch_fn(self, pytorch_batch, arg):\n return PF.shift_y(pytorch_batch, dy=arg)\n\n def test_albumentations_match(self, image_batches, arg):\n pass\n\n\[email protected](\n \"arg\",\n [\n [0.1],\n [-0.5],\n [0.5],\n [1.0 - 1e-6],\n [-1.0 + 1e-6],\n ],\n)\nclass TestScale(Base):\n def albumentations_pytorch_fn(self, pytorch_batch, arg):\n return PF.scale(pytorch_batch, scale=arg)\n\n def test_albumentations_match(self, image_batches, arg):\n pass\n\n\[email protected](\n \"arg\",\n [\n [0.1],\n [-0.5],\n [0.5],\n [1.0 - 1e-6],\n [-1.0 + 1e-6],\n ],\n)\nclass TestRotate(Base):\n def albumentations_pytorch_fn(self, pytorch_batch, arg):\n return PF.rotate(pytorch_batch, angle=arg)\n\n def test_albumentations_match(self, image_batches, arg):\n pass\n", "\"\"\"\nBased on the official implementation of Faster AutoAugment\nhttps://github.com/moskomule/dda/blob/master/faster_autoaugment/search.py\n\"\"\"\n\nimport pytorch_lightning as pl\nimport torch\nfrom hydra.utils import instantiate\nfrom torch import Tensor\nfrom torch.nn import functional as F\n\nfrom autoalbument.faster_autoaugment.models.policy_model import Policy\n\n\nclass FAABaseModel(pl.LightningModule):\n def __init__(self, cfg):\n super().__init__()\n self.save_hyperparameters()\n self.automatic_optimization = False\n self.cfg = cfg\n self.main_model = self.create_main_model()\n self.policy_model = self.create_policy_model()\n\n def configure_optimizers(self):\n optimizer_config = self.cfg.optim\n main_optimizer = instantiate(optimizer_config.main, params=self.main_model.parameters())\n policy_optimizer = instantiate(optimizer_config.policy, params=self.policy_model.parameters())\n return main_optimizer, policy_optimizer\n\n def create_main_model(self):\n model_cfg = self.get_main_model_cfg()\n main_model = instantiate(model_cfg)\n return main_model\n\n def create_policy_model(self):\n policy_model_cfg = self.cfg.policy_model\n normalization_cfg = self.cfg.data.normalization\n policy_operations = [\n instantiate(operation, temperature=policy_model_cfg.temperature)\n for operation in policy_model_cfg.operations\n ]\n\n policy_model = Policy.faster_auto_augment_policy(\n policy_operations,\n policy_model_cfg.num_sub_policies,\n policy_model_cfg.temperature,\n policy_model_cfg.operation_count,\n policy_model_cfg.num_chunks,\n mean=torch.tensor(normalization_cfg.mean),\n std=torch.tensor(normalization_cfg.std),\n )\n return policy_model\n\n def gradient_penalty(self, real: Tensor, fake: Tensor) -> Tensor:\n alpha = real.new_empty(real.size(0), 1, 1, 1).uniform_(0, 1)\n interpolated = alpha * real + (1 - alpha) * fake\n interpolated.requires_grad_()\n _, output = self.main_model(interpolated)\n grad = torch.autograd.grad(\n outputs=output,\n inputs=interpolated,\n grad_outputs=torch.ones_like(output),\n create_graph=True,\n retain_graph=True,\n only_inputs=True,\n )[0]\n return (grad.norm(2, dim=1) - 1).pow(2).mean()\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n input, target = batch\n b = input.size(0) // 2\n a_input, a_target = input[:b], target[:b]\n n_input, n_target = input[b:], target[b:]\n\n main_optimizer, policy_optimizer = self.optimizers(use_pl_optimizer=True)\n\n ones = n_input.new_tensor(1.0)\n self.main_model.requires_grad_(True)\n self.main_model.zero_grad()\n output, n_output = self.main_model(n_input)\n loss = self.cfg.policy_model.task_factor * self.criterion(output, n_target)\n self.manual_backward(loss, main_optimizer, retain_graph=True)\n\n d_n_loss = n_output.mean()\n\n self.manual_backward(d_n_loss.unsqueeze(0), main_optimizer, -ones.unsqueeze(0))\n\n with torch.no_grad():\n a_input = self.policy_model.denormalize_(a_input)\n augmented = self.policy_model({\"image_batch\": a_input})[\"image_batch\"]\n\n _, a_output = self.main_model(augmented)\n d_a_loss = a_output.mean()\n\n self.manual_backward(d_a_loss.unsqueeze(0), main_optimizer, ones.unsqueeze(0))\n\n gp = self.cfg.policy_model.gp_factor * self.gradient_penalty(n_input, augmented)\n self.manual_backward(gp, main_optimizer)\n main_optimizer.step()\n\n self.main_model.requires_grad_(False)\n self.policy_model.zero_grad()\n augmented_input, maybe_augmented_target = self.policy_forward_for_policy_train(a_input, a_target)\n _output, a_output = self.main_model(augmented_input)\n\n _loss = self.cfg.policy_model.task_factor * self.criterion(_output, maybe_augmented_target)\n self.manual_backward(_loss, policy_optimizer, retain_graph=True)\n\n a_loss = a_output.mean()\n self.manual_backward(a_loss.unsqueeze(0), policy_optimizer, -ones.unsqueeze(0))\n\n policy_optimizer.step()\n with torch.no_grad():\n metrics = {\n \"loss\": loss + _loss,\n \"d_loss\": -d_n_loss + d_a_loss + gp,\n \"a_loss\": -a_loss,\n }\n self.log_dict(metrics, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n\n def get_main_model_cfg(self):\n raise NotImplementedError\n\n def criterion(self, input, target):\n raise NotImplementedError\n\n def policy_forward_for_policy_train(self, a_input, a_target):\n raise NotImplementedError\n\n\nclass FAAClassificationModel(FAABaseModel):\n def get_main_model_cfg(self):\n return self.cfg.classification_model\n\n def criterion(self, input, target):\n return F.cross_entropy(input, target)\n\n def policy_forward_for_policy_train(self, a_input, a_target):\n output = self.policy_model({\"image_batch\": a_input})[\"image_batch\"]\n return output, a_target\n\n\nclass FAASemanticSegmentationModel(FAABaseModel):\n def get_main_model_cfg(self):\n return self.cfg.semantic_segmentation_model\n\n def criterion(self, input, target):\n return F.binary_cross_entropy_with_logits(input, target)\n\n def policy_forward_for_policy_train(self, a_input, a_target):\n output = self.policy_model({\"image_batch\": a_input, \"mask_batch\": a_target})\n return output[\"image_batch\"], output[\"mask_batch\"]\n" ]
[ [ "torch.autograd.gradcheck", "torch.tensor" ], [ "torch.nn.functional.binary_cross_entropy_with_logits", "torch.nn.functional.cross_entropy", "torch.tensor", "torch.no_grad", "torch.ones_like" ] ]
2dx/moderngl
[ "5f932560a535469626d79d22e4205f400e18f328", "5f932560a535469626d79d22e4205f400e18f328" ]
[ "examples/basic_simple_color_triangle.py", "examples/heightmap_on_the_fly.py" ]
[ "'''\n Renders a traingle that has all RGB combinations\n'''\n\nimport numpy as np\n\nfrom ported._example import Example\n\n\nclass SimpleColorTriangle(Example):\n gl_version = (3, 3)\n aspect_ratio = 16 / 9\n title = \"Simple Color Triangle\"\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n self.prog = self.ctx.program(\n vertex_shader='''\n #version 330\n\n in vec2 in_vert;\n\n in vec3 in_color;\n out vec3 v_color; // Goes to the fragment shader\n\n void main() {\n gl_Position = vec4(in_vert, 0.0, 1.0);\n v_color = in_color;\n }\n ''',\n fragment_shader='''\n #version 330\n\n in vec3 v_color;\n out vec4 f_color;\n\n void main() {\n // We're not interested in changing the alpha value\n f_color = vec4(v_color, 1.0);\n }\n ''',\n )\n\n # Point coordinates are put followed by the vec3 color values\n vertices = np.array([\n # x, y, red, green, blue\n 0.0, 0.8, 1.0, 0.0, 0.0,\n -0.6, -0.8, 0.0, 1.0, 0.0,\n 0.6, -0.8, 0.0, 0.0, 1.0,\n ], dtype='f4')\n\n self.vbo = self.ctx.buffer(vertices)\n\n # We control the 'in_vert' and `in_color' variables\n self.vao = self.ctx.vertex_array(\n self.prog,\n [\n # Map in_vert to the first 2 floats\n # Map in_color to the next 3 floats\n (self.vbo, '2f 3f', 'in_vert', 'in_color')\n ],\n )\n\n def render(self, time: float, frame_time: float):\n self.ctx.clear(1.0, 1.0, 1.0)\n self.vao.render()\n\n\nif __name__ == '__main__':\n SimpleColorTriangle.run()\n", "\"\"\"\nDemonstrates redering a terrain/height map on the fly without any\npre-generated geometry.\n\"\"\"\n\nimport numpy as np\nfrom pyrr import Matrix44, Matrix33\n\nimport moderngl\nfrom ported._example import Example\n\n\nclass HeightmapOnTheFly(Example):\n title = \"Heightmap - On the fly\"\n gl_version = (3, 3)\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n self.prog = self.ctx.program(\n vertex_shader=\"\"\"\n #version 330\n\n uniform int dim;\n out vec2 uv;\n\n void main() {\n // grid position from gl_VertexID normalized\n vec2 pos = vec2(gl_VertexID % dim, gl_VertexID / dim) / dim;\n gl_Position = vec4(pos, 0.0, 1.0);\n }\n \"\"\",\n geometry_shader=\"\"\"\n #version 330\n\n uniform sampler2D heightmap;\n\n uniform mat4 projection;\n uniform mat4 modelview;\n uniform mat3 normal_matrix;\n uniform int dim;\n uniform float terrain_size;\n\n out vec2 g_uv;\n // out vec3 g_pos;\n out vec3 normal;\n\n layout(points) in;\n layout(triangle_strip, max_vertices = 4) out;\n\n const float scale = 0.5;\n const float height = -0.15;\n\n float calculateHeight(float h) {\n return h * scale + height;\n }\n\n vec3 calculateNormal(vec2 uv, float step, float size) {\n float hl = calculateHeight(texture(heightmap, uv + vec2(-step, 0.0)).r);\n float hr = calculateHeight(texture(heightmap, uv + vec2(step, 0.0)).r);\n float hu = calculateHeight(texture(heightmap, uv + vec2(0.0, step)).r);\n float hd = calculateHeight(texture(heightmap, uv + vec2(0.0, -step)).r);\n return normalize(vec3(hl - hr, hd - hu, size));\n }\n\n void main() {\n // width and height of a quad\n float size = terrain_size / dim;\n\n // lower left corner of the quad\n vec2 pos = gl_in[0].gl_Position.xy * terrain_size - terrain_size / 2.0;\n vec2 uv = gl_in[0].gl_Position.xy;\n float uv_step = 1.0 / dim;\n\n // Calculate mvp\n mat4 mvp = projection * modelview;\n\n // Read heights for each corner\n vec2 uv1 = uv + vec2(0.0, uv_step);\n float h1 = calculateHeight(texture(heightmap, uv1).r);\n\n vec2 uv2 = uv;\n float h2 = calculateHeight(texture(heightmap, uv2).r);\n\n vec2 uv3 = uv + vec2(uv_step, uv_step);\n float h3 = calculateHeight(texture(heightmap, uv3).r);\n\n vec2 uv4 = uv + vec2(uv_step, 0.0);\n float h4 = calculateHeight(texture(heightmap, uv4).r);\n\n // Upper left\n vec4 pos1 = vec4(pos + vec2(0.0, size), h1, 1.0);\n gl_Position = mvp * pos1;\n g_uv = uv1;\n normal = normal_matrix * calculateNormal(uv1, uv_step, size);\n // g_pos = (modelview * pos1).xyz;\n EmitVertex();\n\n // Lower left\n vec4 pos2 = vec4(pos, h2, 1.0);\n gl_Position = mvp * pos2;\n g_uv = uv2;\n normal = normal_matrix * calculateNormal(uv2, uv_step, size);\n // g_pos = (modelview * pos2).xyz;\n EmitVertex();\n\n // Upper right\n vec4 pos3 = vec4(pos + vec2(size, size), h3, 1.0);\n gl_Position = mvp * pos3;\n g_uv = uv3;\n normal = normal_matrix * calculateNormal(uv3, uv_step, size);\n // g_pos = (modelview * pos3).xyz;\n EmitVertex();\n\n // Lower right\n vec4 pos4 = vec4(pos + vec2(size, 0.0), h4, 1.0);\n gl_Position = mvp * pos4;\n g_uv = uv4;\n normal = normal_matrix * calculateNormal(uv4, uv_step, size);\n // g_pos = (modelview * pos4).xyz;\n EmitVertex();\n\n EndPrimitive();\n }\n \"\"\",\n fragment_shader=\"\"\"\n #version 330\n\n uniform sampler2D heightmap;\n out vec4 fragColor;\n in vec2 g_uv;\n // in vec3 g_pos;\n in vec3 normal;\n\n void main() {\n // vec3 normal = normalize(cross(dFdx(g_pos), dFdy(g_pos)));\n float l = abs(dot(vec3(0, 0, 1), normal));\n\n // fragColor = vec4(vec3(texture(heightmap, g_uv).r) * l, 1.0);\n // fragColor = vec4(normal * l, 1.0);\n fragColor = vec4(vec3(1.0) * l, 1.0);\n }\n \"\"\",\n )\n self.heightmap = self.load_texture_2d('heightmap_detailed.png')\n self.heightmap.repeat_x = False\n self.heightmap.repeat_y = False\n self.dim = self.heightmap.width\n self.vao = self.ctx.vertex_array(self.prog, [])\n\n projection = Matrix44.perspective_projection(45.0, self.aspect_ratio, 0.1, 1000.0, dtype='f4')\n self.prog['projection'].write(projection)\n self.prog['dim'] = self.dim - 1\n self.prog['terrain_size'] = 1.0\n\n def render(self, time, frame_time):\n self.ctx.clear()\n self.ctx.enable(moderngl.DEPTH_TEST | moderngl.CULL_FACE)\n angle = time * 0.2\n\n lookat = Matrix44.look_at(\n (np.cos(angle), np.sin(angle), 0.4),\n (0.0, 0.0, 0.0),\n (0.0, 0.0, 1.0),\n dtype='f4',\n )\n normal_matrix = Matrix33.from_matrix44(lookat).inverse.transpose()\n\n self.prog['modelview'].write(lookat)\n self.prog['normal_matrix'].write(normal_matrix.astype('f4').tobytes())\n self.heightmap.use(0)\n self.vao.render(moderngl.POINTS, vertices=(self.dim - 1) ** 2)\n\n\nif __name__ == '__main__':\n HeightmapOnTheFly.run()\n" ]
[ [ "numpy.array" ], [ "numpy.cos", "numpy.sin" ] ]
zegra1989/ml
[ "ed574ff45d4852d0c93f1ad5d7e0160cd752c9e0" ]
[ "src/native_bayes/classify.py" ]
[ "def NBAccuracy(features_train, labels_train, features_test, labels_test):\n \"\"\" compute the accuracy of your Naive Bayes classifier \"\"\"\n ### import the sklearn module for GaussianNB\n from sklearn.naive_bayes import GaussianNB\n from sklearn.metrics import accuracy_score\n\n ### create classifier\n clf = GaussianNB()\n\n ### fit the classifier on the training features and labels\n clf.fit(features_train, labels_train)\n\n ### use the trained classifier to predict labels for the test features\n pred = clf.predict(features_test)\n\n ### calculate and return the accuracy on the test data\n ### this is slightly different than the example, \n ### where we just print the accuracy\n ### you might need to import an sklearn module\n accuracy = accuracy_score(pred, labels_test)\n return accuracy" ]
[ [ "sklearn.naive_bayes.GaussianNB", "sklearn.metrics.accuracy_score" ] ]
coco-in-bluemoon/building-recommendation-engines
[ "b337b2ba75b6c9b08612ab1720a2858e64e9de09" ]
[ "chapter03/python/item_cf.py" ]
[ "import numpy as np\nimport pandas as pd\n\n\n# 1. load dataset\nratings = pd.read_csv('chapter02/data/movie_rating.csv')\n\nmovie_ratings = pd.pivot_table(\n ratings,\n values='rating',\n index='title',\n columns='critic'\n)\n\n\n# 2. calculate similarity\ndef calcualte_norm(u):\n norm_u = 0.0\n for ui in u:\n if np.isnan(ui):\n continue\n norm_u += (ui ** 2)\n\n return np.sqrt(norm_u)\n\n\ndef calculate_cosine_similarity(u, v):\n norm_u = calcualte_norm(u)\n norm_v = calcualte_norm(v)\n denominator = norm_u * norm_v\n\n numerator = 0.0\n for ui, vi in zip(u, v):\n if np.isnan(ui) or np.isnan(vi):\n continue\n numerator += (ui * vi)\n\n similarity = numerator / denominator\n\n return similarity\n\n\ntitles = movie_ratings.index\nsim_items = pd.DataFrame(0, columns=titles, index=titles, dtype=float)\nfor src in titles:\n for dst in titles:\n src_vec = movie_ratings.loc[src, :].values\n dst_vec = movie_ratings.loc[dst, :].values\n\n similarity = calculate_cosine_similarity(src_vec, dst_vec)\n sim_items.loc[src, dst] = similarity\nprint(sim_items)\n\n\n# 3. Make Prediction & Recommendation\nuser_id = 5\nratings_critic = movie_ratings.loc[:, [movie_ratings.columns[user_id]]]\nratings_critic.columns = ['rating']\ntitles_na_critic = ratings_critic[pd.isna(ratings_critic.rating)].index\n\nratings_t = ratings.loc[ratings.critic == movie_ratings.columns[user_id]]\nratings_t = ratings_t.reset_index(drop=True)\n\nx = sim_items.loc[:, titles_na_critic]\n\nratings_t = pd.merge(ratings_t, x, on='title')\nprint(ratings_t)\n\nresult_dict = {'title': list(), 'rating': list(), 'similarity': list()}\nfor row in ratings_t.iterrows():\n for title in titles_na_critic:\n result_dict['title'].append(title)\n result_dict['rating'].append(row[1]['rating'])\n result_dict['similarity'].append(row[1][title])\nresult = pd.DataFrame(result_dict)\nresult.loc[:, 'sim_rating'] = result.rating * result.similarity\nresult = result.groupby('title').sum()\nresult.loc[:, 'prediction'] = result.sim_rating / result.similarity\nresult = result.drop(columns=['rating', 'similarity', 'sim_rating'])\nprint(result)\n" ]
[ [ "pandas.merge", "pandas.read_csv", "numpy.sqrt", "numpy.isnan", "pandas.DataFrame", "pandas.isna", "pandas.pivot_table" ] ]
The-Makers-of-things/jesse
[ "df061ea21011a3c28f3359f421ec5594216fb708" ]
[ "jesse/indicators/rocp.py" ]
[ "from typing import Union\n\nimport numpy as np\nimport talib\n\nfrom jesse.helpers import get_candle_source\n\n\ndef rocp(candles: np.ndarray, period: int = 10, source_type: str = \"close\", sequential: bool = False) -> Union[\n float, np.ndarray]:\n \"\"\"\n ROCP - Rate of change Percentage: (price-prevPrice)/prevPrice\n\n :param candles: np.ndarray\n :param period: int - default=10\n :param source_type: str - default: \"close\"\n :param sequential: bool - default=False\n\n :return: float | np.ndarray\n \"\"\"\n if not sequential and len(candles) > 240:\n candles = candles[-240:]\n\n source = get_candle_source(candles, source_type=source_type)\n res = talib.ROCP(source, timeperiod=period)\n\n if sequential:\n return res\n else:\n return None if np.isnan(res[-1]) else res[-1]\n" ]
[ [ "numpy.isnan" ] ]
tikhonovpavel/LdaSummarization
[ "fbfb229e83548d9dd8f921626fd3fbf423b0305a" ]
[ "src/models/data_loader.py" ]
[ "import bisect\nimport gc\nimport glob\nimport pickle\nimport random\n\nimport torch\n\nfrom others.logging import logger\n\nimport gensim\nfrom gensim.utils import simple_preprocess\nfrom gensim.parsing.preprocessing import STOPWORDS\nfrom nltk.stem import WordNetLemmatizer, SnowballStemmer\nfrom nltk.stem.porter import *\nimport numpy as np\nnp.random.seed(2018)\n\n# import nltk\n# nltk.download('wordnet')\n#\n# with open('../topic_modelling_data/dictionary.pkl', 'rb') as f:\n# tm_dictionary = pickle.load(f)\n#\n# with open('../topic_modelling_data/lda_model.pkl', 'rb') as f:\n# lda_model = pickle.load(f)\n#\n# stemmer = SnowballStemmer('english')\n#\n# def lemmatize_stemming(text):\n# return stemmer.stem(WordNetLemmatizer().lemmatize(text, pos='v'))\n#\n# def preprocess(text):\n# result = []\n# for token in gensim.utils.simple_preprocess(text):\n# if token not in gensim.parsing.preprocessing.STOPWORDS and len(token) > 3:\n# result.append(lemmatize_stemming(token))\n# return result\n\nclass Batch(object):\n def _pad(self, data, pad_id, width=-1):\n if (width == -1):\n width = max(len(d) for d in data)\n rtn_data = [d + [pad_id] * (width - len(d)) for d in data]\n return rtn_data\n\n def __init__(self, data=None, device=None, is_test=False):\n \"\"\"Create a Batch from a list of examples.\"\"\"\n if data is not None:\n self.batch_size = len(data)\n pre_src = [x[0] for x in data]\n pre_tgt = [x[1] for x in data]\n pre_segs = [x[2] for x in data]\n pre_clss = [x[3] for x in data]\n pre_src_sent_labels = [x[4] for x in data]\n\n src = torch.tensor(self._pad(pre_src, 0))\n tgt = torch.tensor(self._pad(pre_tgt, 0))\n\n segs = torch.tensor(self._pad(pre_segs, 0))\n\n try:\n mask_src = 1 - (src == 0)\n mask_tgt = 1 - (tgt == 0)\n except RuntimeError as err:\n if 'Subtraction, the `-` operator, with a bool tensor is not supported' not in str(err):\n raise err\n mask_src = ~(src == 0)\n mask_tgt = ~(tgt == 0)\n\n clss = torch.tensor(self._pad(pre_clss, -1))\n src_sent_labels = torch.tensor(self._pad(pre_src_sent_labels, 0))\n\n try:\n mask_cls = 1 - (clss == -1)\n except RuntimeError as err:\n if 'Subtraction, the `-` operator, with a bool tensor is not supported' not in str(err):\n raise err\n mask_cls = ~(clss == -1)\n\n clss[clss == -1] = 0\n setattr(self, 'clss', clss.to(device))\n setattr(self, 'mask_cls', mask_cls.to(device))\n setattr(self, 'src_sent_labels', src_sent_labels.to(device))\n\n\n setattr(self, 'src', src.to(device))\n setattr(self, 'tgt', tgt.to(device))\n setattr(self, 'segs', segs.to(device))\n setattr(self, 'mask_src', mask_src.to(device))\n setattr(self, 'mask_tgt', mask_tgt.to(device))\n\n\n # setattr(self, 'topics', topics.to(device))\n\n\n if (is_test) or True:\n src_str = [x[-3] for x in data]\n setattr(self, 'src_str', src_str)\n tgt_str = [x[-2] for x in data]\n setattr(self, 'tgt_str', tgt_str)\n topics = [x[-1] for x in data]\n setattr(self, 'topics', topics)\n\n def __len__(self):\n return self.batch_size\n\n\n\n\ndef load_dataset(args, corpus_type, shuffle):\n \"\"\"\n Dataset generator. Don't do extra stuff here, like printing,\n because they will be postponed to the first loading time.\n\n Args:\n corpus_type: 'train' or 'valid'\n Returns:\n A list of dataset, the dataset(s) are lazily loaded.\n \"\"\"\n assert corpus_type in [\"train\", \"valid\", \"test\"]\n\n def _lazy_dataset_loader(pt_file, corpus_type, use_topic_modelling):\n dataset = torch.load(pt_file)\n\n # if use_topic_modelling:\n # for article in dataset:\n # # unseen_document = 'How a Pentagon deal became an identity crisis for Google'\n # bow_vector = tm_dictionary.doc2bow(preprocess(' '.join(article['src_txt'])))\n #\n # article_topic = sorted(lda_model[bow_vector], key=lambda tup: -1 * tup[1])[0]\n # article_topic = article_topic[0]\n # DICTIONARY_SIZE = 30_000\n # article_topic = DICTIONARY_SIZE + article_topic\n #\n # article['src'] = [article_topic] + article['src']\n #\n # # for index, score in sorted(lda_model[bow_vector], key=lambda tup: -1 * tup[1]):\n # # print(\"Score: {}\\t Topic: {}\".format(score, lda_model.print_topic(index, 5)))\n\n logger.info('Loading %s dataset from %s, number of examples: %d' %\n (corpus_type, pt_file, len(dataset)))\n return dataset\n\n # Sort the glob output by file name (by increasing indexes).\n pts = sorted(glob.glob(args.bert_data_path + '.' + corpus_type + '.[0-9]*.pt'))\n if pts:\n if (shuffle):\n random.shuffle(pts)\n\n for pt in pts:\n yield _lazy_dataset_loader(pt, corpus_type, args.use_topic_modelling)\n else:\n # Only one inputters.*Dataset, simple!\n pt = args.bert_data_path + '.' + corpus_type + '.pt'\n yield _lazy_dataset_loader(pt, corpus_type, args.use_topic_modelling)\n\n\ndef abs_batch_size_fn(new, count):\n src, tgt = new[0], new[1]\n global max_n_sents, max_n_tokens, max_size\n if count == 1:\n max_size = 0\n max_n_sents=0\n max_n_tokens=0\n max_n_sents = max(max_n_sents, len(tgt))\n max_size = max(max_size, max_n_sents)\n src_elements = count * max_size\n if (count > 6):\n return src_elements + 1e3\n return src_elements\n\n\ndef ext_batch_size_fn(new, count):\n if (len(new) == 4):\n pass\n src, labels = new[0], new[4]\n global max_n_sents, max_n_tokens, max_size\n if count == 1:\n max_size = 0\n max_n_sents = 0\n max_n_tokens = 0\n max_n_sents = max(max_n_sents, len(src))\n max_size = max(max_size, max_n_sents)\n src_elements = count * max_size\n return src_elements\n\n\nclass Dataloader(object):\n def __init__(self, args, datasets, batch_size,\n device, shuffle, is_test):\n self.args = args\n self.datasets = datasets\n self.batch_size = batch_size\n self.device = device\n self.shuffle = shuffle\n self.is_test = is_test\n self.use_topic_modelling = args.use_topic_modelling\n self.cur_iter = self._next_dataset_iterator(datasets)\n assert self.cur_iter is not None\n\n def __iter__(self):\n dataset_iter = (d for d in self.datasets)\n while self.cur_iter is not None:\n for batch in self.cur_iter:\n yield batch\n self.cur_iter = self._next_dataset_iterator(dataset_iter)\n\n\n def _next_dataset_iterator(self, dataset_iter):\n try:\n # Drop the current dataset for decreasing memory\n if hasattr(self, \"cur_dataset\"):\n self.cur_dataset = None\n gc.collect()\n del self.cur_dataset\n gc.collect()\n\n self.cur_dataset = next(dataset_iter)\n except StopIteration:\n return None\n\n return DataIterator(args = self.args,\n dataset=self.cur_dataset, batch_size=self.batch_size,\n device=self.device, shuffle=self.shuffle, is_test=self.is_test)\n\n\nclass DataIterator(object):\n def __init__(self, args, dataset, batch_size, device=None, is_test=False,\n shuffle=True):\n self.args = args\n self.batch_size, self.is_test, self.dataset = batch_size, is_test, dataset\n self.iterations = 0\n self.device = device\n self.shuffle = shuffle\n\n self.sort_key = lambda x: len(x[1])\n\n self._iterations_this_epoch = 0\n if (self.args.task == 'abs'):\n self.batch_size_fn = abs_batch_size_fn\n else:\n self.batch_size_fn = ext_batch_size_fn\n\n def data(self):\n if self.shuffle:\n random.shuffle(self.dataset)\n xs = self.dataset\n return xs\n\n\n\n\n\n\n def preprocess(self, ex, is_test):\n src = ex['src']\n tgt = ex['tgt'][:self.args.max_tgt_len][:-1]+[2]\n src_sent_labels = ex['src_sent_labels']\n segs = ex['segs']\n if(not self.args.use_interval):\n segs=[0]*len(segs)\n clss = ex['clss']\n src_txt = ex['src_txt']\n tgt_txt = ex['tgt_txt']\n\n try:\n topics = ex['topics']\n except KeyError:\n print('Warning: topics are not presented!')\n topics = None\n\n end_id = [src[-1]]\n src = src[:-1][:self.args.max_pos - 1] + end_id\n segs = segs[:self.args.max_pos]\n max_sent_id = bisect.bisect_left(clss, self.args.max_pos)\n src_sent_labels = src_sent_labels[:max_sent_id]\n clss = clss[:max_sent_id]\n # src_txt = src_txt[:max_sent_id]\n\n\n\n if(is_test):\n return src, tgt, segs, clss, src_sent_labels, src_txt, tgt_txt, topics\n else:\n return src, tgt, segs, clss, src_sent_labels, src_txt, tgt_txt, topics\n\n def batch_buffer(self, data, batch_size):\n minibatch, size_so_far = [], 0\n for ex in data:\n if(len(ex['src'])==0):\n continue\n ex = self.preprocess(ex, self.is_test)\n if(ex is None):\n continue\n minibatch.append(ex)\n size_so_far = self.batch_size_fn(ex, len(minibatch))\n if size_so_far == batch_size:\n yield minibatch\n minibatch, size_so_far = [], 0\n elif size_so_far > batch_size:\n yield minibatch[:-1]\n minibatch, size_so_far = minibatch[-1:], self.batch_size_fn(ex, 1)\n if minibatch:\n yield minibatch\n\n def batch(self, data, batch_size):\n \"\"\"Yield elements from data in chunks of batch_size.\"\"\"\n minibatch, size_so_far = [], 0\n for ex in data:\n minibatch.append(ex)\n size_so_far = self.batch_size_fn(ex, len(minibatch))\n if size_so_far == batch_size:\n yield minibatch\n minibatch, size_so_far = [], 0\n elif size_so_far > batch_size:\n yield minibatch[:-1]\n minibatch, size_so_far = minibatch[-1:], self.batch_size_fn(ex, 1)\n if minibatch:\n yield minibatch\n\n def create_batches(self):\n \"\"\" Create batches \"\"\"\n data = self.data()\n for buffer in self.batch_buffer(data, self.batch_size * 300):\n\n if (self.args.task == 'abs'):\n p_batch = sorted(buffer, key=lambda x: len(x[2]))\n p_batch = sorted(p_batch, key=lambda x: len(x[1]))\n else:\n p_batch = sorted(buffer, key=lambda x: len(x[2]))\n\n p_batch = self.batch(p_batch, self.batch_size)\n\n\n p_batch = list(p_batch)\n if (self.shuffle):\n random.shuffle(p_batch)\n for b in p_batch:\n if(len(b)==0):\n continue\n yield b\n\n def __iter__(self):\n while True:\n self.batches = self.create_batches()\n for idx, minibatch in enumerate(self.batches):\n # fast-forward if loaded from state\n if self._iterations_this_epoch > idx:\n continue\n self.iterations += 1\n self._iterations_this_epoch += 1\n batch = Batch(minibatch, self.device, self.is_test)\n\n yield batch\n return\n\n\nclass TextDataloader(object):\n def __init__(self, args, datasets, batch_size,\n device, shuffle, is_test):\n self.args = args\n self.batch_size = batch_size\n self.device = device\n\n def data(self):\n if self.shuffle:\n random.shuffle(self.dataset)\n xs = self.dataset\n return xs\n\n def preprocess(self, ex, is_test):\n src = ex['src']\n tgt = ex['tgt'][:self.args.max_tgt_len][:-1] + [2]\n src_sent_labels = ex['src_sent_labels']\n segs = ex['segs']\n if (not self.args.use_interval):\n segs = [0] * len(segs)\n clss = ex['clss']\n src_txt = ex['src_txt']\n tgt_txt = ex['tgt_txt']\n topics = ex['topics']\n\n end_id = [src[-1]]\n src = src[:-1][:self.args.max_pos - 1] + end_id\n segs = segs[:self.args.max_pos]\n max_sent_id = bisect.bisect_left(clss, self.args.max_pos)\n src_sent_labels = src_sent_labels[:max_sent_id]\n clss = clss[:max_sent_id]\n # src_txt = src_txt[:max_sent_id]\n\n if (is_test):\n return src, tgt, segs, clss, src_sent_labels, src_txt, tgt_txt, topics\n else:\n return src, tgt, segs, clss, src_sent_labels, src_txt, tgt_txt, topics\n\n def batch_buffer(self, data, batch_size):\n minibatch, size_so_far = [], 0\n for ex in data:\n if (len(ex['src']) == 0):\n continue\n ex = self.preprocess(ex, self.is_test)\n if (ex is None):\n continue\n minibatch.append(ex)\n size_so_far = simple_batch_size_fn(ex, len(minibatch))\n if size_so_far == batch_size:\n yield minibatch\n minibatch, size_so_far = [], 0\n elif size_so_far > batch_size:\n yield minibatch[:-1]\n minibatch, size_so_far = minibatch[-1:], simple_batch_size_fn(ex, 1)\n if minibatch:\n yield minibatch\n\n def create_batches(self):\n \"\"\" Create batches \"\"\"\n data = self.data()\n for buffer in self.batch_buffer(data, self.batch_size * 300):\n if (self.args.task == 'abs'):\n p_batch = sorted(buffer, key=lambda x: len(x[2]))\n p_batch = sorted(p_batch, key=lambda x: len(x[1]))\n else:\n p_batch = sorted(buffer, key=lambda x: len(x[2]))\n p_batch = batch(p_batch, self.batch_size)\n\n p_batch = batch(p_batch, self.batch_size)\n\n p_batch = list(p_batch)\n if (self.shuffle):\n random.shuffle(p_batch)\n for b in p_batch:\n if (len(b) == 0):\n continue\n yield b\n\n def __iter__(self):\n while True:\n self.batches = self.create_batches()\n for idx, minibatch in enumerate(self.batches):\n # fast-forward if loaded from state\n if self._iterations_this_epoch > idx:\n continue\n self.iterations += 1\n self._iterations_this_epoch += 1\n batch = Batch(minibatch, self.device, self.is_test)\n\n yield batch\n return\n" ]
[ [ "numpy.random.seed", "torch.load" ] ]
TeaKatz/Generative_Deep_Learning
[ "f62b9150a5e18240dd22816918f2ce6abf807d58" ]
[ "Original_Codes/GDL_code-master/models/WGANGP.py" ]
[ "\nfrom keras.layers import Input, Conv2D, Flatten, Dense, Conv2DTranspose, Reshape, Lambda, Activation, BatchNormalization, LeakyReLU, Dropout, ZeroPadding2D, UpSampling2D\nfrom keras.layers.merge import _Merge\n\nfrom keras.models import Model, Sequential\nfrom keras import backend as K\nfrom keras.optimizers import Adam, RMSprop\nfrom keras.callbacks import ModelCheckpoint \nfrom keras.utils import plot_model\nfrom keras.initializers import RandomNormal\n\nfrom functools import partial\n\nimport numpy as np\nimport json\nimport os\nimport pickle\nimport matplotlib.pyplot as plt\n\n\nclass RandomWeightedAverage(_Merge):\n def __init__(self, batch_size):\n super().__init__()\n self.batch_size = batch_size\n \"\"\"Provides a (random) weighted average between real and generated image samples\"\"\"\n def _merge_function(self, inputs):\n alpha = K.random_uniform((self.batch_size, 1, 1, 1))\n return (alpha * inputs[0]) + ((1 - alpha) * inputs[1])\n\nclass WGANGP():\n def __init__(self\n , input_dim\n , critic_conv_filters\n , critic_conv_kernel_size\n , critic_conv_strides\n , critic_batch_norm_momentum\n , critic_activation\n , critic_dropout_rate\n , critic_learning_rate\n , generator_initial_dense_layer_size\n , generator_upsample\n , generator_conv_filters\n , generator_conv_kernel_size\n , generator_conv_strides\n , generator_batch_norm_momentum\n , generator_activation\n , generator_dropout_rate\n , generator_learning_rate\n , optimiser\n , grad_weight\n , z_dim\n , batch_size\n ):\n\n self.name = 'gan'\n\n self.input_dim = input_dim\n self.critic_conv_filters = critic_conv_filters\n self.critic_conv_kernel_size = critic_conv_kernel_size\n self.critic_conv_strides = critic_conv_strides\n self.critic_batch_norm_momentum = critic_batch_norm_momentum\n self.critic_activation = critic_activation\n self.critic_dropout_rate = critic_dropout_rate\n self.critic_learning_rate = critic_learning_rate\n\n self.generator_initial_dense_layer_size = generator_initial_dense_layer_size\n self.generator_upsample = generator_upsample\n self.generator_conv_filters = generator_conv_filters\n self.generator_conv_kernel_size = generator_conv_kernel_size\n self.generator_conv_strides = generator_conv_strides\n self.generator_batch_norm_momentum = generator_batch_norm_momentum\n self.generator_activation = generator_activation\n self.generator_dropout_rate = generator_dropout_rate\n self.generator_learning_rate = generator_learning_rate\n \n self.optimiser = optimiser\n\n self.z_dim = z_dim\n\n self.n_layers_critic = len(critic_conv_filters)\n self.n_layers_generator = len(generator_conv_filters)\n\n self.weight_init = RandomNormal(mean=0., stddev=0.02) # 'he_normal' #RandomNormal(mean=0., stddev=0.02)\n self.grad_weight = grad_weight\n self.batch_size = batch_size\n\n\n self.d_losses = []\n self.g_losses = []\n self.epoch = 0\n\n self._build_critic()\n self._build_generator()\n\n self._build_adversarial()\n\n def gradient_penalty_loss(self, y_true, y_pred, interpolated_samples):\n \"\"\"\n Computes gradient penalty based on prediction and weighted real / fake samples\n \"\"\"\n gradients = K.gradients(y_pred, interpolated_samples)[0]\n\n # compute the euclidean norm by squaring ...\n gradients_sqr = K.square(gradients)\n # ... summing over the rows ...\n gradients_sqr_sum = K.sum(gradients_sqr,\n axis=np.arange(1, len(gradients_sqr.shape)))\n # ... and sqrt\n gradient_l2_norm = K.sqrt(gradients_sqr_sum)\n # compute lambda * (1 - ||grad||)^2 still for each single sample\n gradient_penalty = K.square(1 - gradient_l2_norm)\n # return the mean as loss over all the batch samples\n return K.mean(gradient_penalty)\n\n def wasserstein(self, y_true, y_pred):\n return -K.mean(y_true * y_pred)\n\n def get_activation(self, activation):\n if activation == 'leaky_relu':\n layer = LeakyReLU(alpha = 0.2)\n else:\n layer = Activation(activation)\n return layer\n\n def _build_critic(self):\n\n ### THE critic\n critic_input = Input(shape=self.input_dim, name='critic_input')\n\n x = critic_input\n\n for i in range(self.n_layers_critic):\n\n x = Conv2D(\n filters = self.critic_conv_filters[i]\n , kernel_size = self.critic_conv_kernel_size[i]\n , strides = self.critic_conv_strides[i]\n , padding = 'same'\n , name = 'critic_conv_' + str(i)\n , kernel_initializer = self.weight_init\n )(x)\n\n if self.critic_batch_norm_momentum and i > 0:\n x = BatchNormalization(momentum = self.critic_batch_norm_momentum)(x)\n\n x = self.get_activation(self.critic_activation)(x)\n\n if self.critic_dropout_rate:\n x = Dropout(rate = self.critic_dropout_rate)(x)\n\n x = Flatten()(x)\n\n # x = Dense(512, kernel_initializer = self.weight_init)(x)\n\n # x = self.get_activation(self.critic_activation)(x)\n \n critic_output = Dense(1, activation=None\n , kernel_initializer = self.weight_init\n )(x)\n\n self.critic = Model(critic_input, critic_output)\n\n def _build_generator(self):\n\n ### THE generator\n\n generator_input = Input(shape=(self.z_dim,), name='generator_input')\n\n x = generator_input\n\n x = Dense(np.prod(self.generator_initial_dense_layer_size), kernel_initializer = self.weight_init)(x)\n if self.generator_batch_norm_momentum:\n x = BatchNormalization(momentum = self.generator_batch_norm_momentum)(x)\n \n x = self.get_activation(self.generator_activation)(x)\n\n x = Reshape(self.generator_initial_dense_layer_size)(x)\n\n if self.generator_dropout_rate:\n x = Dropout(rate = self.generator_dropout_rate)(x)\n\n for i in range(self.n_layers_generator):\n\n if self.generator_upsample[i] == 2:\n x = UpSampling2D()(x)\n x = Conv2D(\n filters = self.generator_conv_filters[i]\n , kernel_size = self.generator_conv_kernel_size[i]\n , padding = 'same'\n , name = 'generator_conv_' + str(i)\n , kernel_initializer = self.weight_init\n )(x)\n else:\n\n x = Conv2DTranspose(\n filters = self.generator_conv_filters[i]\n , kernel_size = self.generator_conv_kernel_size[i]\n , padding = 'same'\n , strides = self.generator_conv_strides[i]\n , name = 'generator_conv_' + str(i)\n , kernel_initializer = self.weight_init\n )(x)\n\n if i < self.n_layers_generator - 1:\n\n if self.generator_batch_norm_momentum:\n x = BatchNormalization(momentum = self.generator_batch_norm_momentum)(x)\n\n x = self.get_activation(self.generator_activation)(x)\n \n else:\n x = Activation('tanh')(x)\n\n generator_output = x\n self.generator = Model(generator_input, generator_output)\n\n\n\n\n def get_opti(self, lr):\n if self.optimiser == 'adam':\n opti = Adam(lr=lr, beta_1=0.5)\n elif self.optimiser == 'rmsprop':\n opti = RMSprop(lr=lr)\n else:\n opti = Adam(lr=lr)\n\n return opti\n\n\n def set_trainable(self, m, val):\n m.trainable = val\n for l in m.layers:\n l.trainable = val\n\n def _build_adversarial(self):\n \n #-------------------------------\n # Construct Computational Graph\n # for the Critic\n #-------------------------------\n\n # Freeze generator's layers while training critic\n self.set_trainable(self.generator, False)\n\n # Image input (real sample)\n real_img = Input(shape=self.input_dim)\n\n # Fake image\n z_disc = Input(shape=(self.z_dim,))\n fake_img = self.generator(z_disc)\n\n # critic determines validity of the real and fake images\n fake = self.critic(fake_img)\n valid = self.critic(real_img)\n\n # Construct weighted average between real and fake images\n interpolated_img = RandomWeightedAverage(self.batch_size)([real_img, fake_img])\n # Determine validity of weighted sample\n validity_interpolated = self.critic(interpolated_img)\n\n # Use Python partial to provide loss function with additional\n # 'interpolated_samples' argument\n partial_gp_loss = partial(self.gradient_penalty_loss,\n interpolated_samples=interpolated_img)\n partial_gp_loss.__name__ = 'gradient_penalty' # Keras requires function names\n\n self.critic_model = Model(inputs=[real_img, z_disc],\n outputs=[valid, fake, validity_interpolated])\n\n self.critic_model.compile(\n loss=[self.wasserstein,self.wasserstein, partial_gp_loss]\n ,optimizer=self.get_opti(self.critic_learning_rate)\n ,loss_weights=[1, 1, self.grad_weight]\n )\n \n #-------------------------------\n # Construct Computational Graph\n # for Generator\n #-------------------------------\n\n # For the generator we freeze the critic's layers\n self.set_trainable(self.critic, False)\n self.set_trainable(self.generator, True)\n\n # Sampled noise for input to generator\n model_input = Input(shape=(self.z_dim,))\n # Generate images based of noise\n img = self.generator(model_input)\n # Discriminator determines validity\n model_output = self.critic(img)\n # Defines generator model\n self.model = Model(model_input, model_output)\n\n self.model.compile(optimizer=self.get_opti(self.generator_learning_rate)\n , loss=self.wasserstein\n )\n\n self.set_trainable(self.critic, True)\n\n def train_critic(self, x_train, batch_size, using_generator):\n\n valid = np.ones((batch_size,1), dtype=np.float32)\n fake = -np.ones((batch_size,1), dtype=np.float32)\n dummy = np.zeros((batch_size, 1), dtype=np.float32) # Dummy gt for gradient penalty\n\n if using_generator:\n true_imgs = next(x_train)[0]\n if true_imgs.shape[0] != batch_size:\n true_imgs = next(x_train)[0]\n else:\n idx = np.random.randint(0, x_train.shape[0], batch_size)\n true_imgs = x_train[idx]\n \n noise = np.random.normal(0, 1, (batch_size, self.z_dim))\n\n d_loss = self.critic_model.train_on_batch([true_imgs, noise], [valid, fake, dummy])\n return d_loss\n\n def train_generator(self, batch_size):\n valid = np.ones((batch_size,1), dtype=np.float32)\n noise = np.random.normal(0, 1, (batch_size, self.z_dim))\n return self.model.train_on_batch(noise, valid)\n\n\n def train(self, x_train, batch_size, epochs, run_folder, print_every_n_batches = 10\n , n_critic = 5\n , using_generator = False):\n\n for epoch in range(self.epoch, self.epoch + epochs):\n\n if epoch % 100 == 0:\n critic_loops = 5\n else:\n critic_loops = n_critic\n\n for _ in range(critic_loops):\n d_loss = self.train_critic(x_train, batch_size, using_generator)\n\n g_loss = self.train_generator(batch_size)\n\n \n print (\"%d (%d, %d) [D loss: (%.1f)(R %.1f, F %.1f, G %.1f)] [G loss: %.1f]\" % (epoch, critic_loops, 1, d_loss[0], d_loss[1],d_loss[2],d_loss[3],g_loss))\n \n\n\n self.d_losses.append(d_loss)\n self.g_losses.append(g_loss)\n\n # If at save interval => save generated image samples\n if epoch % print_every_n_batches == 0:\n self.sample_images(run_folder)\n self.model.save_weights(os.path.join(run_folder, 'weights/weights-%d.h5' % (epoch)))\n self.model.save_weights(os.path.join(run_folder, 'weights/weights.h5'))\n self.save_model(run_folder)\n \n\n self.epoch+=1\n\n\n def sample_images(self, run_folder):\n r, c = 5, 5\n noise = np.random.normal(0, 1, (r * c, self.z_dim))\n gen_imgs = self.generator.predict(noise)\n\n #Rescale images 0 - 1\n\n gen_imgs = 0.5 * (gen_imgs + 1)\n gen_imgs = np.clip(gen_imgs, 0, 1)\n\n fig, axs = plt.subplots(r, c, figsize=(15,15))\n cnt = 0\n\n for i in range(r):\n for j in range(c):\n axs[i,j].imshow(np.squeeze(gen_imgs[cnt, :,:,:]), cmap = 'gray_r')\n axs[i,j].axis('off')\n cnt += 1\n fig.savefig(os.path.join(run_folder, \"images/sample_%d.png\" % self.epoch))\n plt.close()\n\n\n\n\n \n def plot_model(self, run_folder):\n plot_model(self.model, to_file=os.path.join(run_folder ,'viz/model.png'), show_shapes = True, show_layer_names = True)\n plot_model(self.critic, to_file=os.path.join(run_folder ,'viz/critic.png'), show_shapes = True, show_layer_names = True)\n plot_model(self.generator, to_file=os.path.join(run_folder ,'viz/generator.png'), show_shapes = True, show_layer_names = True)\n\n\n\n \n def save(self, folder):\n\n with open(os.path.join(folder, 'params.pkl'), 'wb') as f:\n pickle.dump([\n self.input_dim\n , self.critic_conv_filters\n , self.critic_conv_kernel_size\n , self.critic_conv_strides\n , self.critic_batch_norm_momentum\n , self.critic_activation\n , self.critic_dropout_rate\n , self.critic_learning_rate\n , self.generator_initial_dense_layer_size\n , self.generator_upsample\n , self.generator_conv_filters\n , self.generator_conv_kernel_size\n , self.generator_conv_strides\n , self.generator_batch_norm_momentum\n , self.generator_activation\n , self.generator_dropout_rate\n , self.generator_learning_rate\n , self.optimiser\n , self.grad_weight\n , self.z_dim\n , self.batch_size\n ], f)\n\n self.plot_model(folder)\n\n def save_model(self, run_folder):\n self.model.save(os.path.join(run_folder, 'model.h5'))\n self.critic.save(os.path.join(run_folder, 'critic.h5'))\n self.generator.save(os.path.join(run_folder, 'generator.h5'))\n pickle.dump(self, open( os.path.join(run_folder, \"obj.pkl\"), \"wb\" ))\n\n def load_weights(self, filepath):\n self.model.load_weights(filepath)" ]
[ [ "numpy.clip", "numpy.squeeze", "matplotlib.pyplot.subplots", "numpy.ones", "numpy.random.normal", "matplotlib.pyplot.close", "numpy.prod", "numpy.zeros", "numpy.random.randint" ] ]
DNALuo/3Dposes
[ "c5e2ed5fea612318d7715e239176571f593ccf83" ]
[ "src/models/hg_3d.py" ]
[ "from .layers.Residual import Residual\r\nimport torch.nn as nn\r\nimport math\r\nimport ref\r\n\r\nclass Hourglass(nn.Module):\r\n def __init__(self, n, nModules, nFeats):\r\n super(Hourglass, self).__init__()\r\n self.n = n\r\n self.nModules = nModules\r\n self.nFeats = nFeats\r\n \r\n _up1_, _low1_, _low2_, _low3_ = [], [], [], []\r\n for j in range(self.nModules):\r\n _up1_.append(Residual(self.nFeats, self.nFeats))\r\n self.low1 = nn.MaxPool2d(kernel_size = 2, stride = 2)\r\n for j in range(self.nModules):\r\n _low1_.append(Residual(self.nFeats, self.nFeats))\r\n \r\n if self.n > 1:\r\n self.low2 = Hourglass(n - 1, self.nModules, self.nFeats)\r\n else:\r\n for j in range(self.nModules):\r\n _low2_.append(Residual(self.nFeats, self.nFeats))\r\n self.low2_ = nn.ModuleList(_low2_)\r\n \r\n for j in range(self.nModules):\r\n _low3_.append(Residual(self.nFeats, self.nFeats))\r\n \r\n self.up1_ = nn.ModuleList(_up1_)\r\n self.low1_ = nn.ModuleList(_low1_)\r\n self.low3_ = nn.ModuleList(_low3_)\r\n \r\n self.up2 = nn.Upsample(scale_factor = 2)\r\n \r\n def forward(self, x):\r\n up1 = x\r\n for j in range(self.nModules):\r\n up1 = self.up1_[j](up1)\r\n \r\n low1 = self.low1(x)\r\n for j in range(self.nModules):\r\n low1 = self.low1_[j](low1)\r\n \r\n if self.n > 1:\r\n low2 = self.low2(low1)\r\n else:\r\n low2 = low1\r\n for j in range(self.nModules):\r\n low2 = self.low2_[j](low2)\r\n \r\n low3 = low2\r\n for j in range(self.nModules):\r\n low3 = self.low3_[j](low3)\r\n up2 = self.up2(low3)\r\n \r\n return up1 + up2\r\n\r\nclass HourglassNet3D(nn.Module):\r\n def __init__(self, nStack, nModules, nFeats, nRegModules):\r\n super(HourglassNet3D, self).__init__()\r\n self.nStack = nStack\r\n self.nModules = nModules\r\n self.nFeats = nFeats\r\n self.nRegModules = nRegModules\r\n self.conv1_ = nn.Conv2d(3, 64, bias = True, kernel_size = 7, stride = 2, padding = 3)\r\n self.bn1 = nn.BatchNorm2d(64)\r\n self.relu = nn.ReLU(inplace = True)\r\n self.r1 = Residual(64, 128)\r\n self.maxpool = nn.MaxPool2d(kernel_size = 2, stride = 2)\r\n self.r4 = Residual(128, 128)\r\n self.r5 = Residual(128, self.nFeats)\r\n \r\n _hourglass, _Residual, _lin_, _tmpOut, _ll_, _tmpOut_, _reg_ = [], [], [], [], [], [], []\r\n for i in range(self.nStack):\r\n _hourglass.append(Hourglass(4, self.nModules, self.nFeats))\r\n for j in range(self.nModules):\r\n _Residual.append(Residual(self.nFeats, self.nFeats))\r\n lin = nn.Sequential(nn.Conv2d(self.nFeats, self.nFeats, bias = True, kernel_size = 1, stride = 1), \r\n nn.BatchNorm2d(self.nFeats), self.relu)\r\n _lin_.append(lin)\r\n _tmpOut.append(nn.Conv2d(self.nFeats, ref.nJoints, bias = True, kernel_size = 1, stride = 1))\r\n _ll_.append(nn.Conv2d(self.nFeats, self.nFeats, bias = True, kernel_size = 1, stride = 1))\r\n _tmpOut_.append(nn.Conv2d(ref.nJoints, self.nFeats, bias = True, kernel_size = 1, stride = 1))\r\n\r\n for i in range(4):\r\n for j in range(self.nRegModules):\r\n _reg_.append(Residual(self.nFeats, self.nFeats))\r\n \r\n self.hourglass = nn.ModuleList(_hourglass)\r\n self.Residual = nn.ModuleList(_Residual)\r\n self.lin_ = nn.ModuleList(_lin_)\r\n self.tmpOut = nn.ModuleList(_tmpOut)\r\n self.ll_ = nn.ModuleList(_ll_)\r\n self.tmpOut_ = nn.ModuleList(_tmpOut_)\r\n self.reg_ = nn.ModuleList(_reg_)\r\n \r\n self.reg = nn.Linear(4 * 4 * self.nFeats, ref.nJoints)\r\n \r\n def forward(self, x):\r\n x = self.conv1_(x)\r\n x = self.bn1(x)\r\n x = self.relu(x)\r\n x = self.r1(x)\r\n x = self.maxpool(x)\r\n x = self.r4(x)\r\n x = self.r5(x)\r\n \r\n out = []\r\n \r\n for i in range(self.nStack):\r\n hg = self.hourglass[i](x)\r\n ll = hg\r\n for j in range(self.nModules):\r\n ll = self.Residual[i * self.nModules + j](ll)\r\n ll = self.lin_[i](ll)\r\n tmpOut = self.tmpOut[i](ll)\r\n out.append(tmpOut)\r\n \r\n ll_ = self.ll_[i](ll)\r\n tmpOut_ = self.tmpOut_[i](tmpOut)\r\n x = x + ll_ + tmpOut_\r\n \r\n for i in range(4):\r\n for j in range(self.nRegModules):\r\n x = self.reg_[i * self.nRegModules + j](x)\r\n x = self.maxpool(x)\r\n \r\n x = x.view(x.size(0), -1)\r\n reg = self.reg(x)\r\n out.append(reg)\r\n \r\n return out\r\n\r\n" ]
[ [ "torch.nn.ModuleList", "torch.nn.Conv2d", "torch.nn.MaxPool2d", "torch.nn.Linear", "torch.nn.Upsample", "torch.nn.BatchNorm2d", "torch.nn.ReLU" ] ]
csdms/ivy
[ "862fc8bafa665864ceae25c4ead9e376ffe175cb" ]
[ "lessons/best-practices/boulder_dem.py" ]
[ "\"\"\"An example of reading topographical data from a file and displaying it.\"\"\"\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\ntopo_file = \"../../data/topo.asc\"\n\n\ndef read():\n try:\n topo = pd.read_csv(topo_file, header=None)\n except IOError:\n print(\"IOError: file '{}' cannot be read\".format(topo_file))\n else:\n return topo\n\n\ndef display(data, show=False, outfile=\"boulder_dem.png\"):\n fig, ax = plt.subplots()\n elev = ax.imshow(data, cmap=\"jet\")\n fig.colorbar(elev, label=\"Elevation (m)\")\n plt.title(\"Boulder Topography\")\n\n if show is True:\n plt.show()\n else:\n plt.savefig(outfile, dpi=96)\n plt.close()\n\n\nif __name__ == \"__main__\":\n topo = read()\n if topo is not None:\n display(topo)\n" ]
[ [ "pandas.read_csv", "matplotlib.pyplot.title", "matplotlib.pyplot.subplots", "matplotlib.pyplot.savefig", "matplotlib.pyplot.close", "matplotlib.pyplot.show" ] ]