repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
luhuaei/xlnet
|
[
"ef415d43ab999165d5ac284962edf0675120e55c"
] |
[
"data_utils.py"
] |
[
"# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport json\nimport os\nimport random\n\nfrom absl import flags\nimport absl.logging as _logging # pylint: disable=unused-import\n\nimport numpy as np\n\n\nimport tensorflow as tf\n\nfrom prepro_utils import preprocess_text, encode_ids\nimport sentencepiece as spm\n\n\nspecial_symbols = {\n \"<unk>\" : 0,\n \"<s>\" : 1,\n \"</s>\" : 2,\n \"<cls>\" : 3,\n \"<sep>\" : 4,\n \"<pad>\" : 5,\n \"<mask>\" : 6,\n \"<eod>\" : 7,\n \"<eop>\" : 8,\n}\n\nVOCAB_SIZE = 32000\nUNK_ID = special_symbols[\"<unk>\"]\nCLS_ID = special_symbols[\"<cls>\"]\nSEP_ID = special_symbols[\"<sep>\"]\nMASK_ID = special_symbols[\"<mask>\"]\nEOD_ID = special_symbols[\"<eod>\"]\n\n\ndef _int64_feature(values):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=values))\n\n\ndef _float_feature(values):\n return tf.train.Feature(float_list=tf.train.FloatList(value=values))\n\n\ndef format_filename(prefix, bsz_per_host, seq_len, bi_data, suffix,\n mask_alpha=5, mask_beta=1, reuse_len=None, uncased=False,\n fixed_num_predict=None):\n \"\"\"docs.\"\"\"\n if reuse_len is None:\n reuse_len_str = \"\"\n else:\n reuse_len_str = \"reuse-{}.\".format(reuse_len)\n if not uncased:\n uncased_str = \"\"\n else:\n uncased_str = \"uncased.\"\n if bi_data:\n bi_data_str = \"bi\"\n else:\n bi_data_str = \"uni\"\n if fixed_num_predict is not None:\n fnp_str = \"fnp-{}.\".format(fixed_num_predict)\n else:\n fnp_str = \"\"\n\n file_name = \"{}.bsz-{}.seqlen-{}.{}{}{}.alpha-{}.beta-{}.{}{}\".format(\n prefix, bsz_per_host, seq_len, reuse_len_str, uncased_str, bi_data_str,\n mask_alpha, mask_beta, fnp_str, suffix)\n\n return file_name\n\n\ndef _create_data(idx, input_paths):\n # Load sentence-piece model\n sp = spm.SentencePieceProcessor()\n sp.Load(FLAGS.sp_path)\n\n input_shards = []\n total_line_cnt = 0\n for input_path in input_paths:\n input_data, sent_ids = [], []\n sent_id, line_cnt = True, 0\n tf.compat.v1.logging.info(\"Processing %s\", input_path)\n for line in tf.io.gfile.Open(input_path):\n if line_cnt % 100000 == 0:\n tf.compat.v1.logging.info(\"Loading line %d\", line_cnt)\n line_cnt += 1\n\n if not line.strip():\n if FLAGS.use_eod:\n sent_id = not sent_id\n cur_sent = [EOD_ID]\n else:\n continue\n else:\n if FLAGS.from_raw_text:\n cur_sent = preprocess_text(line.strip(), lower=FLAGS.uncased)\n cur_sent = encode_ids(sp, cur_sent)\n else:\n cur_sent = list(map(int, line.strip().split()))\n\n input_data.extend(cur_sent)\n sent_ids.extend([sent_id] * len(cur_sent))\n sent_id = not sent_id\n\n tf.compat.v1.logging.info(\"Finish with line %d\", line_cnt)\n if line_cnt == 0:\n continue\n\n input_data = np.array(input_data, dtype=np.int64)\n sent_ids = np.array(sent_ids, dtype=np.bool)\n\n total_line_cnt += line_cnt\n input_shards.append((input_data, sent_ids))\n\n tf.compat.v1.logging.info(\"[Task %d] Total number line: %d\", idx, total_line_cnt)\n\n tfrecord_dir = os.path.join(FLAGS.save_dir, \"tfrecords\")\n\n filenames, num_batch = [], 0\n\n # Randomly shuffle input shards (with a fixed but distinct random seed)\n np.random.seed(100 * FLAGS.task + FLAGS.pass_id)\n\n perm_indices = np.random.permutation(len(input_shards))\n tf.compat.v1.logging.info(\"Using perm indices %s for pass %d\",\n perm_indices.tolist(), FLAGS.pass_id)\n\n input_data_list, sent_ids_list = [], []\n prev_sent_id = None\n for perm_idx in perm_indices:\n input_data, sent_ids = input_shards[perm_idx]\n # make sure the `send_ids[0] == not prev_sent_id`\n if prev_sent_id is not None and sent_ids[0] == prev_sent_id:\n sent_ids = np.logical_not(sent_ids)\n\n # append to temporary list\n input_data_list.append(input_data)\n sent_ids_list.append(sent_ids)\n\n # update `prev_sent_id`\n prev_sent_id = sent_ids[-1]\n\n input_data = np.concatenate(input_data_list)\n sent_ids = np.concatenate(sent_ids_list)\n\n file_name, cur_num_batch = create_tfrecords(\n save_dir=tfrecord_dir,\n basename=\"{}-{}-{}\".format(FLAGS.split, idx, FLAGS.pass_id),\n data=[input_data, sent_ids],\n bsz_per_host=FLAGS.bsz_per_host,\n seq_len=FLAGS.seq_len,\n bi_data=FLAGS.bi_data,\n sp=sp,\n )\n\n filenames.append(file_name)\n num_batch += cur_num_batch\n\n record_info = {\n \"filenames\": filenames,\n \"num_batch\": num_batch\n }\n\n return record_info\n\n\ndef create_data(_):\n # Validate FLAGS\n assert FLAGS.bsz_per_host % FLAGS.num_core_per_host == 0\n if not FLAGS.use_tpu:\n FLAGS.num_core_per_host = 1 # forced to be one\n\n # Make workdirs\n if not tf.io.gfile.Exists(FLAGS.save_dir):\n tf.io.gfile.MakeDirs(FLAGS.save_dir)\n\n tfrecord_dir = os.path.join(FLAGS.save_dir, \"tfrecords\")\n if not tf.io.gfile.Exists(tfrecord_dir):\n tf.io.gfile.MakeDirs(tfrecord_dir)\n\n # Create and dump corpus_info from task 0\n if FLAGS.task == 0:\n corpus_info = {\n \"vocab_size\": VOCAB_SIZE,\n \"bsz_per_host\": FLAGS.bsz_per_host,\n \"num_core_per_host\": FLAGS.num_core_per_host,\n \"seq_len\": FLAGS.seq_len,\n \"reuse_len\": FLAGS.reuse_len,\n \"uncased\": FLAGS.uncased,\n \"bi_data\": FLAGS.bi_data,\n \"mask_alpha\": FLAGS.mask_alpha,\n \"mask_beta\": FLAGS.mask_beta,\n \"num_predict\": FLAGS.num_predict,\n \"use_eod\": FLAGS.use_eod,\n \"sp_path\": FLAGS.sp_path,\n \"input_glob\": FLAGS.input_glob,\n }\n corpus_info_path = os.path.join(FLAGS.save_dir, \"corpus_info.json\")\n with tf.io.gfile.Open(corpus_info_path, \"w\") as fp:\n json.dump(corpus_info, fp)\n\n # Interleavely split the work into FLAGS.num_task splits\n file_paths = sorted(tf.io.gfile.Glob(FLAGS.input_glob))\n tf.compat.v1.logging.info(\"Use glob: %s\", FLAGS.input_glob)\n tf.compat.v1.logging.info(\"Find %d files: %s\", len(file_paths), file_paths)\n\n task_file_paths = file_paths[FLAGS.task::FLAGS.num_task]\n if not task_file_paths:\n tf.compat.v1.logging.info(\"Exit: task %d has no file to process.\", FLAGS.task)\n return\n\n tf.compat.v1.logging.info(\"Task %d process %d files: %s\",\n FLAGS.task, len(task_file_paths), task_file_paths)\n record_info = _create_data(FLAGS.task, task_file_paths)\n\n record_prefix = \"record_info-{}-{}-{}\".format(\n FLAGS.split, FLAGS.task, FLAGS.pass_id)\n record_name = format_filename(\n prefix=record_prefix,\n bsz_per_host=FLAGS.bsz_per_host,\n seq_len=FLAGS.seq_len,\n mask_alpha=FLAGS.mask_alpha,\n mask_beta=FLAGS.mask_beta,\n reuse_len=FLAGS.reuse_len,\n bi_data=FLAGS.bi_data,\n suffix=\"json\",\n uncased=FLAGS.uncased,\n fixed_num_predict=FLAGS.num_predict)\n record_info_path = os.path.join(tfrecord_dir, record_name)\n\n with tf.io.gfile.Open(record_info_path, \"w\") as fp:\n json.dump(record_info, fp)\n\n\ndef batchify(data, bsz_per_host, sent_ids=None):\n num_step = len(data) // bsz_per_host\n data = data[:bsz_per_host * num_step]\n data = data.reshape(bsz_per_host, num_step)\n if sent_ids is not None:\n sent_ids = sent_ids[:bsz_per_host * num_step]\n sent_ids = sent_ids.reshape(bsz_per_host, num_step)\n\n if sent_ids is not None:\n return data, sent_ids\n return data\n\n\ndef _split_a_and_b(data, sent_ids, begin_idx, tot_len, extend_target=False):\n \"\"\"Split two segments from `data` starting from the index `begin_idx`.\"\"\"\n\n data_len = data.shape[0]\n if begin_idx + tot_len >= data_len:\n tf.compat.v1.logging.info(\"[_split_a_and_b] returns None: \"\n \"begin_idx %d + tot_len %d >= data_len %d\",\n begin_idx, tot_len, data_len)\n return None\n\n end_idx = begin_idx + 1\n cut_points = []\n while end_idx < data_len:\n if sent_ids[end_idx] != sent_ids[end_idx - 1]:\n if end_idx - begin_idx >= tot_len: break\n cut_points.append(end_idx)\n end_idx += 1\n\n a_begin = begin_idx\n if len(cut_points) == 0 or random.random() < 0.5:\n label = 0\n if len(cut_points) == 0:\n a_end = end_idx\n else:\n a_end = random.choice(cut_points)\n\n b_len = max(1, tot_len - (a_end - a_begin))\n # (zihang): `data_len - 1` to account for extend_target\n b_begin = random.randint(0, data_len - 1 - b_len)\n b_end = b_begin + b_len\n while b_begin > 0 and sent_ids[b_begin - 1] == sent_ids[b_begin]:\n b_begin -= 1\n # (zihang): `data_len - 1` to account for extend_target\n while b_end < data_len - 1 and sent_ids[b_end - 1] == sent_ids[b_end]:\n b_end += 1\n\n new_begin = a_end\n else:\n label = 1\n a_end = random.choice(cut_points)\n b_begin = a_end\n b_end = end_idx\n\n new_begin = b_end\n\n while a_end - a_begin + b_end - b_begin > tot_len:\n if a_end - a_begin > b_end - b_begin:\n # delete the right side only for the LM objective\n a_end -= 1\n else:\n b_end -= 1\n\n ret = [data[a_begin: a_end], data[b_begin: b_end], label, new_begin]\n\n if extend_target:\n if a_end >= data_len or b_end >= data_len:\n tf.compat.v1.logging.info(\"[_split_a_and_b] returns None: \"\n \"a_end %d or b_end %d >= data_len %d\",\n a_end, b_end, data_len)\n return None\n a_target = data[a_begin + 1: a_end + 1]\n b_target = data[b_begin: b_end + 1]\n ret.extend([a_target, b_target])\n\n return ret\n\n\ndef _is_start_piece(piece):\n special_pieces = set(list('!\"#$%&\\\"()*+,-./:;?@[\\\\]^_`{|}~'))\n if (piece.startswith(\"▁\") or piece.startswith(\"<\")\n or piece in special_pieces):\n return True\n else:\n return False\n\n\ndef _sample_mask(sp, seg, reverse=False, max_gram=5, goal_num_predict=None):\n \"\"\"Sample `goal_num_predict` tokens for partial prediction.\n About `mask_beta` tokens are chosen in a context of `mask_alpha` tokens.\"\"\"\n\n seg_len = len(seg)\n mask = np.array([False] * seg_len, dtype=np.bool)\n\n num_predict = 0\n\n ngrams = np.arange(1, max_gram + 1, dtype=np.int64)\n pvals = 1. / np.arange(1, max_gram + 1)\n pvals /= pvals.sum(keepdims=True)\n\n if reverse:\n seg = np.flip(seg, 0)\n\n cur_len = 0\n while cur_len < seg_len:\n if goal_num_predict is not None and num_predict >= goal_num_predict: break\n\n n = np.random.choice(ngrams, p=pvals)\n if goal_num_predict is not None:\n n = min(n, goal_num_predict - num_predict)\n ctx_size = (n * FLAGS.mask_alpha) // FLAGS.mask_beta\n l_ctx = np.random.choice(ctx_size)\n r_ctx = ctx_size - l_ctx\n\n # Find the start position of a complete token\n beg = cur_len + l_ctx\n while beg < seg_len and not _is_start_piece(sp.IdToPiece(seg[beg].item())):\n beg += 1\n if beg >= seg_len:\n break\n\n # Find the end position of the n-gram (start pos of the n+1-th gram)\n end = beg + 1\n cnt_ngram = 1\n while end < seg_len:\n if _is_start_piece(sp.IdToPiece(seg[beg].item())):\n cnt_ngram += 1\n if cnt_ngram > n:\n break\n end += 1\n if end >= seg_len:\n break\n\n # Update\n mask[beg:end] = True\n num_predict += end - beg\n\n cur_len = end + r_ctx\n\n while goal_num_predict is not None and num_predict < goal_num_predict:\n i = np.random.randint(seg_len)\n if not mask[i]:\n mask[i] = True\n num_predict += 1\n\n if reverse:\n mask = np.flip(mask, 0)\n\n return mask\n\n\ndef create_tfrecords(save_dir, basename, data, bsz_per_host, seq_len,\n bi_data, sp):\n data, sent_ids = data[0], data[1]\n\n num_core = FLAGS.num_core_per_host\n bsz_per_core = bsz_per_host // num_core\n\n if bi_data:\n assert bsz_per_host % (2 * FLAGS.num_core_per_host) == 0\n fwd_data, fwd_sent_ids = batchify(data, bsz_per_host // 2, sent_ids)\n\n fwd_data = fwd_data.reshape(num_core, 1, bsz_per_core // 2, -1)\n fwd_sent_ids = fwd_sent_ids.reshape(num_core, 1, bsz_per_core // 2, -1)\n\n bwd_data = fwd_data[:, :, :, ::-1]\n bwd_sent_ids = fwd_sent_ids[:, :, :, ::-1]\n\n data = np.concatenate(\n [fwd_data, bwd_data], 1).reshape(bsz_per_host, -1)\n sent_ids = np.concatenate(\n [fwd_sent_ids, bwd_sent_ids], 1).reshape(bsz_per_host, -1)\n else:\n data, sent_ids = batchify(data, bsz_per_host, sent_ids)\n\n tf.compat.v1.logging.info(\"Raw data shape %s.\", data.shape)\n\n file_name = format_filename(\n prefix=basename,\n bsz_per_host=bsz_per_host,\n seq_len=seq_len,\n bi_data=bi_data,\n suffix=\"tfrecords\",\n mask_alpha=FLAGS.mask_alpha,\n mask_beta=FLAGS.mask_beta,\n reuse_len=FLAGS.reuse_len,\n uncased=FLAGS.uncased,\n fixed_num_predict=FLAGS.num_predict\n )\n save_path = os.path.join(save_dir, file_name)\n record_writer = tf.python_io.TFRecordWriter(save_path)\n tf.compat.v1.logging.info(\"Start writing %s.\", save_path)\n\n num_batch = 0\n reuse_len = FLAGS.reuse_len\n\n # [sep] x 2 + [cls]\n assert reuse_len < seq_len - 3\n\n data_len = data.shape[1]\n sep_array = np.array([SEP_ID], dtype=np.int64)\n cls_array = np.array([CLS_ID], dtype=np.int64)\n\n i = 0\n while i + seq_len <= data_len:\n if num_batch % 500 == 0:\n tf.compat.v1.logging.info(\"Processing batch %d\", num_batch)\n\n all_ok = True\n features = []\n for idx in range(bsz_per_host):\n inp = data[idx, i: i + reuse_len]\n tgt = data[idx, i + 1: i + reuse_len + 1]\n\n results = _split_a_and_b(\n data[idx],\n sent_ids[idx],\n begin_idx=i + reuse_len,\n tot_len=seq_len - reuse_len - 3,\n extend_target=True)\n if results is None:\n tf.compat.v1.logging.info(\"Break out with seq idx %d\", i)\n all_ok = False\n break\n\n # unpack the results\n (a_data, b_data, label, _, a_target, b_target) = tuple(results)\n\n # sample ngram spans to predict\n reverse = bi_data and (idx // (bsz_per_core // 2)) % 2 == 1\n if FLAGS.num_predict is None:\n num_predict_0 = num_predict_1 = None\n else:\n num_predict_1 = FLAGS.num_predict // 2\n num_predict_0 = FLAGS.num_predict - num_predict_1\n mask_0 = _sample_mask(sp, inp, reverse=reverse,\n goal_num_predict=num_predict_0)\n mask_1 = _sample_mask(sp, np.concatenate([a_data, sep_array, b_data,\n sep_array, cls_array]),\n reverse=reverse, goal_num_predict=num_predict_1)\n\n # concatenate data\n cat_data = np.concatenate([inp, a_data, sep_array, b_data,\n sep_array, cls_array])\n seg_id = ([0] * (reuse_len + a_data.shape[0]) + [0] +\n [1] * b_data.shape[0] + [1] + [2])\n assert cat_data.shape[0] == seq_len\n assert mask_0.shape[0] == seq_len // 2\n assert mask_1.shape[0] == seq_len // 2\n\n # the last two CLS's are not used, just for padding purposes\n tgt = np.concatenate([tgt, a_target, b_target, cls_array, cls_array])\n assert tgt.shape[0] == seq_len\n\n is_masked = np.concatenate([mask_0, mask_1], 0)\n if FLAGS.num_predict is not None:\n assert np.sum(is_masked) == FLAGS.num_predict\n\n feature = {\n \"input\": _int64_feature(cat_data),\n \"is_masked\": _int64_feature(is_masked),\n \"target\": _int64_feature(tgt),\n \"seg_id\": _int64_feature(seg_id),\n \"label\": _int64_feature([label]),\n }\n features.append(feature)\n\n if all_ok:\n assert len(features) == bsz_per_host\n for feature in features:\n example = tf.train.Example(features=tf.train.Features(feature=feature))\n record_writer.write(example.SerializeToString())\n num_batch += 1\n else:\n break\n\n i += reuse_len\n\n record_writer.close()\n tf.compat.v1.logging.info(\"Done writing %s. Num of batches: %d\", save_path, num_batch)\n\n return save_path, num_batch\n\n\n################\n# get_input_fn #\n################\ndef _convert_example(example, use_bfloat16):\n \"\"\"Cast int64 into int32 and float32 to bfloat16 if use_bfloat16.\"\"\"\n for key in list(example.keys()):\n val = example[key]\n if tf.keras.backend.is_sparse(val):\n val = tf.sparse.to_dense(val)\n if val.dtype == tf.int64:\n val = tf.cast(val, tf.int32)\n if use_bfloat16 and val.dtype == tf.float32:\n val = tf.cast(val, tf.bfloat16)\n\n example[key] = val\n\n\ndef parse_files_to_dataset(parser, file_names, split, num_batch, num_hosts,\n host_id, num_core_per_host, bsz_per_core):\n # list of file pathes\n num_files = len(file_names)\n num_files_per_host = num_files // num_hosts\n my_start_file_id = host_id * num_files_per_host\n my_end_file_id = (host_id + 1) * num_files_per_host\n if host_id == num_hosts - 1:\n my_end_file_id = num_files\n file_paths = file_names[my_start_file_id: my_end_file_id]\n tf.compat.v1.logging.info(\"Host %d handles %d files\", host_id, len(file_paths))\n\n assert split == \"train\"\n dataset = tf.data.Dataset.from_tensor_slices(file_paths)\n\n # file-level shuffle\n if len(file_paths) > 1:\n dataset = dataset.shuffle(len(file_paths))\n\n # Note: we cannot perform sample-level shuffle here because this will violate\n # the consecutive requirement of data stream.\n dataset = tf.data.TFRecordDataset(dataset)\n\n # (zihang): since we are doing online preprocessing, the parsed result of\n # the same input at each time will be different. Thus, cache processed data\n # is not helpful. It will use a lot of memory and lead to contrainer OOM.\n # So, change to cache non-parsed raw data instead.\n dataset = dataset.cache().map(parser).repeat()\n dataset = dataset.batch(bsz_per_core, drop_remainder=True)\n dataset = dataset.prefetch(num_core_per_host * bsz_per_core)\n\n return dataset\n\n\ndef _local_perm(inputs, targets, is_masked, perm_size, seq_len):\n \"\"\"\n Sample a permutation of the factorization order, and create an\n attention mask accordingly.\n\n Args:\n inputs: int64 Tensor in shape [seq_len], input ids.\n targets: int64 Tensor in shape [seq_len], target ids.\n is_masked: bool Tensor in shape [seq_len]. True means being selected\n for partial prediction.\n perm_size: the length of longest permutation. Could be set to be reuse_len.\n Should not be larger than reuse_len or there will be data leaks.\n seq_len: int, sequence length.\n \"\"\"\n\n # Generate permutation indices\n index = tf.range(seq_len, dtype=tf.int64)\n index = tf.transpose(tf.reshape(index, [-1, perm_size]))\n index = tf.random_shuffle(index)\n index = tf.reshape(tf.transpose(index), [-1])\n\n # `perm_mask` and `target_mask`\n # non-functional tokens\n non_func_tokens = tf.logical_not(tf.logical_or(\n tf.equal(inputs, SEP_ID),\n tf.equal(inputs, CLS_ID)))\n\n non_mask_tokens = tf.logical_and(tf.logical_not(is_masked), non_func_tokens)\n masked_or_func_tokens = tf.logical_not(non_mask_tokens)\n\n # Set the permutation indices of non-masked (& non-funcional) tokens to the\n # smallest index (-1):\n # (1) they can be seen by all other positions\n # (2) they cannot see masked positions, so there won\"t be information leak\n smallest_index = -tf.ones([seq_len], dtype=tf.int64)\n rev_index = tf.where(non_mask_tokens, smallest_index, index)\n\n # Create `target_mask`: non-funcional and maksed tokens\n # 1: use mask as input and have loss\n # 0: use token (or [SEP], [CLS]) as input and do not have loss\n target_tokens = tf.logical_and(masked_or_func_tokens, non_func_tokens)\n target_mask = tf.cast(target_tokens, tf.float32)\n\n # Create `perm_mask`\n # `target_tokens` cannot see themselves\n self_rev_index = tf.where(target_tokens, rev_index, rev_index + 1)\n\n # 1: cannot attend if i <= j and j is not non-masked (masked_or_func_tokens)\n # 0: can attend if i > j or j is non-masked\n perm_mask = tf.logical_and(\n self_rev_index[:, None] <= rev_index[None, :],\n masked_or_func_tokens)\n perm_mask = tf.cast(perm_mask, tf.float32)\n\n # new target: [next token] for LM and [curr token] (self) for PLM\n new_targets = tf.concat([inputs[0: 1], targets[: -1]],\n axis=0)\n\n # construct inputs_k\n inputs_k = inputs\n\n # construct inputs_q\n inputs_q = target_mask\n\n return perm_mask, new_targets, target_mask, inputs_k, inputs_q\n\n\ndef get_dataset(params, num_hosts, num_core_per_host, split, file_names,\n num_batch, seq_len, reuse_len, perm_size, mask_alpha,\n mask_beta, use_bfloat16=False, num_predict=None):\n\n bsz_per_core = params[\"batch_size\"]\n if num_hosts > 1:\n host_id = params[\"context\"].current_host\n else:\n host_id = 0\n\n #### Function used to parse tfrecord\n def parser(record):\n \"\"\"function used to parse tfrecord.\"\"\"\n\n record_spec = {\n \"input\": tf.FixedLenFeature([seq_len], tf.int64),\n \"target\": tf.FixedLenFeature([seq_len], tf.int64),\n \"seg_id\": tf.FixedLenFeature([seq_len], tf.int64),\n \"label\": tf.FixedLenFeature([1], tf.int64),\n \"is_masked\": tf.FixedLenFeature([seq_len], tf.int64),\n }\n\n # retrieve serialized example\n example = tf.parse_single_example(\n serialized=record,\n features=record_spec)\n\n inputs = example.pop(\"input\")\n target = example.pop(\"target\")\n is_masked = tf.cast(example.pop(\"is_masked\"), tf.bool)\n\n non_reuse_len = seq_len - reuse_len\n assert perm_size <= reuse_len and perm_size <= non_reuse_len\n\n perm_mask_0, target_0, target_mask_0, input_k_0, input_q_0 = _local_perm(\n inputs[:reuse_len],\n target[:reuse_len],\n is_masked[:reuse_len],\n perm_size,\n reuse_len)\n\n perm_mask_1, target_1, target_mask_1, input_k_1, input_q_1 = _local_perm(\n inputs[reuse_len:],\n target[reuse_len:],\n is_masked[reuse_len:],\n perm_size,\n non_reuse_len)\n\n perm_mask_0 = tf.concat([perm_mask_0, tf.ones([reuse_len, non_reuse_len])],\n axis=1)\n perm_mask_1 = tf.concat([tf.zeros([non_reuse_len, reuse_len]), perm_mask_1],\n axis=1)\n perm_mask = tf.concat([perm_mask_0, perm_mask_1], axis=0)\n target = tf.concat([target_0, target_1], axis=0)\n target_mask = tf.concat([target_mask_0, target_mask_1], axis=0)\n input_k = tf.concat([input_k_0, input_k_1], axis=0)\n input_q = tf.concat([input_q_0, input_q_1], axis=0)\n\n if num_predict is not None:\n indices = tf.range(seq_len, dtype=tf.int64)\n bool_target_mask = tf.cast(target_mask, tf.bool)\n indices = tf.boolean_mask(indices, bool_target_mask)\n\n ##### extra padding due to CLS/SEP introduced after prepro\n actual_num_predict = tf.shape(indices)[0]\n pad_len = num_predict - actual_num_predict\n\n ##### target_mapping\n target_mapping = tf.one_hot(indices, seq_len, dtype=tf.float32)\n paddings = tf.zeros([pad_len, seq_len], dtype=target_mapping.dtype)\n target_mapping = tf.concat([target_mapping, paddings], axis=0)\n example[\"target_mapping\"] = tf.reshape(target_mapping,\n [num_predict, seq_len])\n\n ##### target\n target = tf.boolean_mask(target, bool_target_mask)\n paddings = tf.zeros([pad_len], dtype=target.dtype)\n target = tf.concat([target, paddings], axis=0)\n example[\"target\"] = tf.reshape(target, [num_predict])\n\n ##### target mask\n target_mask = tf.concat(\n [tf.ones([actual_num_predict], dtype=tf.float32),\n tf.zeros([pad_len], dtype=tf.float32)],\n axis=0)\n example[\"target_mask\"] = tf.reshape(target_mask, [num_predict])\n else:\n example[\"target\"] = tf.reshape(target, [seq_len])\n example[\"target_mask\"] = tf.reshape(target_mask, [seq_len])\n\n # reshape back to fixed shape\n example[\"perm_mask\"] = tf.reshape(perm_mask, [seq_len, seq_len])\n example[\"input_k\"] = tf.reshape(input_k, [seq_len])\n example[\"input_q\"] = tf.reshape(input_q, [seq_len])\n\n _convert_example(example, use_bfloat16)\n\n for k, v in example.items():\n tf.compat.v1.logging.info(\"%s: %s\", k, v)\n\n return example\n\n # Get dataset\n dataset = parse_files_to_dataset(\n parser=parser,\n file_names=file_names,\n split=split,\n num_batch=num_batch,\n num_hosts=num_hosts,\n host_id=host_id,\n num_core_per_host=num_core_per_host,\n bsz_per_core=bsz_per_core)\n\n return dataset\n\n\ndef get_input_fn(\n tfrecord_dir,\n split,\n bsz_per_host,\n seq_len,\n reuse_len,\n bi_data,\n num_hosts=1,\n num_core_per_host=1,\n perm_size=None,\n mask_alpha=None,\n mask_beta=None,\n uncased=False,\n num_passes=None,\n use_bfloat16=False,\n num_predict=None):\n\n # Merge all record infos into a single one\n record_glob_base = format_filename(\n prefix=\"record_info-{}-*\".format(split),\n bsz_per_host=bsz_per_host,\n seq_len=seq_len,\n bi_data=bi_data,\n suffix=\"json\",\n mask_alpha=mask_alpha,\n mask_beta=mask_beta,\n reuse_len=reuse_len,\n uncased=uncased,\n fixed_num_predict=num_predict)\n\n record_info = {\"num_batch\": 0, \"filenames\": []}\n\n tfrecord_dirs = tfrecord_dir.split(\",\")\n tf.compat.v1.logging.info(\"Use the following tfrecord dirs: %s\", tfrecord_dirs)\n\n for idx, record_dir in enumerate(tfrecord_dirs):\n record_glob = os.path.join(record_dir, record_glob_base)\n tf.compat.v1.logging.info(\"[%d] Record glob: %s\", idx, record_glob)\n\n record_paths = sorted(tf.io.gfile.Glob(record_glob))\n tf.compat.v1.logging.info(\"[%d] Num of record info path: %d\",\n idx, len(record_paths))\n\n cur_record_info = {\"num_batch\": 0, \"filenames\": []}\n\n for record_info_path in record_paths:\n if num_passes is not None:\n record_info_name = os.path.basename(record_info_path)\n fields = record_info_name.split(\".\")[0].split(\"-\")\n pass_id = int(fields[-1])\n if len(fields) == 5 and pass_id >= num_passes:\n tf.compat.v1.logging.info(\"Skip pass %d: %s\", pass_id, record_info_name)\n continue\n\n with tf.io.gfile.Open(record_info_path, \"r\") as fp:\n info = json.load(fp)\n if num_passes is not None:\n eff_num_passes = min(num_passes, len(info[\"filenames\"]))\n ratio = eff_num_passes / len(info[\"filenames\"])\n cur_record_info[\"num_batch\"] += int(info[\"num_batch\"] * ratio)\n cur_record_info[\"filenames\"] += info[\"filenames\"][:eff_num_passes]\n else:\n cur_record_info[\"num_batch\"] += info[\"num_batch\"]\n cur_record_info[\"filenames\"] += info[\"filenames\"]\n\n # overwrite directory for `cur_record_info`\n new_filenames = []\n for filename in cur_record_info[\"filenames\"]:\n basename = os.path.basename(filename)\n new_filename = os.path.join(record_dir, basename)\n new_filenames.append(new_filename)\n cur_record_info[\"filenames\"] = new_filenames\n\n tf.compat.v1.logging.info(\"[Dir %d] Number of chosen batches: %s\",\n idx, cur_record_info[\"num_batch\"])\n tf.compat.v1.logging.info(\"[Dir %d] Number of chosen files: %s\",\n idx, len(cur_record_info[\"filenames\"]))\n tf.compat.v1.logging.info(cur_record_info[\"filenames\"])\n\n # add `cur_record_info` to global `record_info`\n record_info[\"num_batch\"] += cur_record_info[\"num_batch\"]\n record_info[\"filenames\"] += cur_record_info[\"filenames\"]\n\n tf.compat.v1.logging.info(\"Total number of batches: %d\",\n record_info[\"num_batch\"])\n tf.compat.v1.logging.info(\"Total number of files: %d\",\n len(record_info[\"filenames\"]))\n tf.compat.v1.logging.info(record_info[\"filenames\"])\n\n def input_fn(params):\n \"\"\"docs.\"\"\"\n assert params[\"batch_size\"] * num_core_per_host == bsz_per_host\n\n dataset = get_dataset(\n params=params,\n num_hosts=num_hosts,\n num_core_per_host=num_core_per_host,\n split=split,\n file_names=record_info[\"filenames\"],\n num_batch=record_info[\"num_batch\"],\n seq_len=seq_len,\n reuse_len=reuse_len,\n perm_size=perm_size,\n mask_alpha=mask_alpha,\n mask_beta=mask_beta,\n use_bfloat16=use_bfloat16,\n num_predict=num_predict)\n\n return dataset\n\n return input_fn, record_info\n\n\nif __name__ == \"__main__\":\n FLAGS = flags.FLAGS\n flags.DEFINE_bool(\"use_tpu\", True, help=\"whether to use TPUs\")\n flags.DEFINE_integer(\"bsz_per_host\", 32, help=\"batch size per host.\")\n flags.DEFINE_integer(\"num_core_per_host\", 8, help=\"num TPU cores per host.\")\n\n flags.DEFINE_integer(\"seq_len\", 512,\n help=\"Sequence length.\")\n flags.DEFINE_integer(\"reuse_len\", 256,\n help=\"Number of token that can be reused as memory. \"\n \"Could be half of `seq_len`.\")\n flags.DEFINE_bool(\"uncased\", True, help=\"Use uncased inputs or not.\")\n flags.DEFINE_bool(\"bi_data\", True,\n help=\"whether to create bidirectional data\")\n flags.DEFINE_integer(\"mask_alpha\", default=6,\n help=\"How many tokens to form a group.\")\n flags.DEFINE_integer(\"mask_beta\", default=1,\n help=\"How many tokens to mask within each group.\")\n flags.DEFINE_bool(\"use_eod\", True,\n help=\"whether to append EOD at the end of a doc.\")\n flags.DEFINE_bool(\"from_raw_text\", True,\n help=\"Whether the input is raw text or encoded ids.\")\n flags.DEFINE_integer(\"num_predict\", default=85,\n help=\"Num of tokens to predict.\")\n\n flags.DEFINE_string(\"input_glob\", \"data/example/*.txt\",\n help=\"Input file glob.\")\n flags.DEFINE_string(\"sp_path\", \"\", help=\"Path to the sentence piece model.\")\n flags.DEFINE_string(\"save_dir\", \"proc_data/example\",\n help=\"Directory for saving the processed data.\")\n flags.DEFINE_enum(\"split\", \"train\", [\"train\", \"dev\", \"test\"],\n help=\"Save the data as which split.\")\n\n flags.DEFINE_integer(\"pass_id\", 0, help=\"ID of the current pass.\"\n \"Different passes sample different negative segment.\")\n flags.DEFINE_integer(\"num_task\", 1, help=\"Number of total tasks.\")\n flags.DEFINE_integer(\"task\", 0, help=\"The Task ID. This value is used when \"\n \"using multiple workers to identify each worker.\")\n\n tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)\n tf.compat.v1.app.run(create_data)\n"
] |
[
[
"tensorflow.concat",
"tensorflow.FixedLenFeature",
"tensorflow.zeros",
"tensorflow.cast",
"tensorflow.equal",
"numpy.concatenate",
"tensorflow.where",
"tensorflow.random_shuffle",
"tensorflow.io.gfile.Exists",
"tensorflow.compat.v1.app.run",
"numpy.random.randint",
"tensorflow.train.Int64List",
"tensorflow.io.gfile.MakeDirs",
"tensorflow.boolean_mask",
"numpy.arange",
"tensorflow.data.TFRecordDataset",
"tensorflow.python_io.TFRecordWriter",
"tensorflow.io.gfile.Open",
"tensorflow.parse_single_example",
"tensorflow.logical_not",
"tensorflow.keras.backend.is_sparse",
"numpy.logical_not",
"numpy.random.choice",
"tensorflow.shape",
"tensorflow.one_hot",
"tensorflow.train.FloatList",
"tensorflow.train.Features",
"tensorflow.io.gfile.Glob",
"numpy.array",
"numpy.flip",
"numpy.sum",
"tensorflow.sparse.to_dense",
"tensorflow.transpose",
"numpy.random.seed",
"tensorflow.range",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.reshape",
"tensorflow.ones",
"tensorflow.compat.v1.logging.set_verbosity",
"tensorflow.compat.v1.logging.info",
"tensorflow.logical_and"
]
] |
jsleb333/paper-decision-trees-as-partitioning-machines
|
[
"0f354dac486a9845a9504419e31c84c7eb39507f",
"0f354dac486a9845a9504419e31c84c7eb39507f"
] |
[
"experiments/pruning.py",
"tests/test_utils/test_convert_tree.py"
] |
[
"from sklearn.model_selection import KFold\nfrom sklearn.metrics import zero_one_loss\nimport numpy as np\nfrom copy import copy\n\nfrom partitioning_machines import breiman_alpha_pruning_objective\n\n\ndef prune_with_bound(decision_tree, bound):\n \"\"\"\n Prunes the tree as described by Algorithm 3 in Appendix E of the paper.\n\n Only to the outside 'while' loop of the algorithm is implemented in here. The 'for' loop is implemented by the 'prune_tree' method of the decision tree classifier object (which is called here).\n \"\"\"\n leaf = decision_tree.tree\n while not leaf.is_leaf():\n leaf = leaf.left_subtree\n best_bound = bound(leaf)\n bounds_value = decision_tree.compute_pruning_coefficients(bound)\n\n while bounds_value and bounds_value[0] <= best_bound:\n best_bound = bounds_value[0]\n decision_tree.prune_tree(best_bound)\n bounds_value = decision_tree.compute_pruning_coefficients(bound)\n\n return best_bound\n\n\ndef prune_with_cv(\n decision_tree,\n X,\n y,\n n_folds=10,\n pruning_objective=breiman_alpha_pruning_objective,\n optimisation_mode='min'):\n \"\"\"\n Pruning using cross-validation. This is an abstraction of CART's cost-complexity pruning algorithm, where the objective can be whatever we want. The optimal pruning threshold is chosen as described by Breiman (1984).\n \"\"\"\n pruning_coefs = decision_tree.compute_pruning_coefficients(pruning_objective)\n\n CV_trees = [copy(decision_tree) for i in range(n_folds)]\n\n fold_idx = list(KFold(n_splits=n_folds).split(X))\n\n for fold, (tr_idx, ts_idx) in enumerate(fold_idx):\n X_tr, y_tr = X[tr_idx], y[tr_idx]\n CV_trees[fold].fit(X_tr, y_tr)\n\n n_errors = [0] * len(pruning_coefs)\n for k, threshold in enumerate(pruning_coefs):\n for tree, (tr_idx, ts_idx) in zip(CV_trees, fold_idx):\n tree.prune_tree(threshold, pruning_objective)\n X_ts, y_ts = X[ts_idx], y[ts_idx]\n y_pred = tree.predict(X_ts)\n n_errors[k] += zero_one_loss(y_true=y_ts, y_pred=y_pred, normalize=False)\n\n sign = 1 if optimisation_mode == 'min' else -1\n\n argmin_first = 0\n argmin_last = 0\n val_min = np.infty\n for i, errors in enumerate(n_errors):\n if sign*errors < sign*val_min:\n argmin_first = i\n argmin_last = i\n val_min = errors\n elif errors == val_min:\n argmin_last = i\n\n optimal_pruning_coef_threshold = (pruning_coefs[argmin_first] + pruning_coefs[argmin_last])/2\n decision_tree.prune_tree(optimal_pruning_coef_threshold)\n\n return optimal_pruning_coef_threshold\n",
"from sklearn.tree import DecisionTreeClassifier\nfrom sklearn.datasets import load_iris\nimport numpy as np\n\nfrom partitioning_machines import Tree\nfrom partitioning_machines.utils import tree_from_sklearn_decision_tree\n\n\ndef test_tree_from_sklearn_decision_tree_with_actual_tree():\n X, y = load_iris(return_X_y=True)\n sklearn_tree = DecisionTreeClassifier()\n sklearn_tree = sklearn_tree.fit(X, y)\n \n tree_from_sklearn = tree_from_sklearn_decision_tree(sklearn_tree)\n\n "
] |
[
[
"sklearn.metrics.zero_one_loss",
"sklearn.model_selection.KFold"
],
[
"sklearn.tree.DecisionTreeClassifier",
"sklearn.datasets.load_iris"
]
] |
tomevans/pyARC
|
[
"935e3d9b49bca36433e4fb85a43c42b1d10d561d"
] |
[
"pyARC/examples/RunEmcee.py"
] |
[
"from pyATMO_dev import pyATMO\nfrom pyARC_dev import pyARC\nimport numpy as np\nimport PrepDataset, InstallNamelistParameters\nimport pdb\n\n\n\"\"\"\nThis module demonstrates how to run an emcee retrieval on the synthetic\ndataset generated by the PrepDataset module. ATMO parameters are imported\nfrom the InstallNamelistParameters module. Execution is:\n>> import RunEmcee\n>> RunEmcee.Main( nchains=1, nwalkers=100, nsteps=100, threads=3, ncorr_burn=0 )\n\"\"\"\n\n\nLOGG = InstallNamelistParameters.LOGG\nTEQ = InstallNamelistParameters.TEQ\nRPLANET = InstallNamelistParameters.RPLANET\nRSTAR = InstallNamelistParameters.RSTAR\nAAU = InstallNamelistParameters.AAU\nMDH = InstallNamelistParameters.MDH\nCORATIO = InstallNamelistParameters.CORATIO\n\n\nTRANSMISSIONMODEL = PrepDataset.TRANSMISSIONMODEL\nTRANSMISSIONDATA = PrepDataset.TRANSMISSIONDATA\n\n\ndef Main( nchains=1, nwalkers=100, nsteps=100, threads=1, ncorr_burn=0, nice=None ):\n\n # Initialise the ATMO and ARC objects:\n ATMO = pyATMO.ATMO()\n InstallNamelistParameters.Main( ATMO )\n if nice!=None:\n ATMO.nice = nice\n Retrieval = pyARC.ARC()\n Retrieval.ATMO = ATMO\n\n # Read in the data:\n Retrieval.TransmissionData = { 'G141':np.loadtxt( TRANSMISSIONDATA ) }\n\n # Specify the log likelihood for the retrieval:\n Retrieval.LogLikeFunc = pyARC.ModelLogLike.ClearChemEqTransmission\n\n # Specify the priors for the free parameters:\n Retrieval.Priors = { 'dRpRs':dRpRs_prior, 'Teff':Teff_prior, 'MdH':MdH_prior, \\\n 'COratio':COratio_prior }\n\n # Specify random functions to draw initial walker positions from:\n Retrieval.InitParSampleFuncs = { 'dRpRs':dRpRs_init, 'Teff':Teff_init, \\\n 'MdH':MdH_init, 'COratio':COratio_init }\n\n # Run the Emcee sampler:\n Retrieval.RunEmcee( nchains=nchains, nwalkers=nwalkers, nsteps=nsteps, \\\n threads=threads, ncorr_burn=ncorr_burn )\n\n return None\n\n\n# Priors for the free parameters:\ndef Uniform( y, l, u ):\n if ( y>=l )*( y<=u ):\n return 1./float( u-l )\n else:\n return -np.inf\ndef dRpRs_prior( y ):\n return Uniform( y, -0.1, 0.1 )\ndef Teff_prior( y ):\n return Uniform( y, 200., 4000. )\ndef MdH_prior( y ):\n return Uniform( y, -4, 4 )\ndef COratio_prior( y ):\n return Uniform( y, 0.1, 3.0 )\n\n\n# Functions to draw random starting points for\n# the emcee walkers:\ndef rfunc( nwalkers, l, u ):\n return l + (u-l)*np.random.random( nwalkers )\ndef dRpRs_init( nwalkers ):\n return rfunc( -0.00001, 0.00001 )\ndef Teff_init( nwalkers ):\n return rfunc( 200., 2000. )\ndef MdH_init( nwalkers ):\n return rfunc( -0.01, 0.01 )\ndef COratio_init( nwalkers ):\n return rfunc( 0.51, 0.61 )\n\n"
] |
[
[
"numpy.random.random",
"numpy.loadtxt"
]
] |
AZdet/causal-infogan
|
[
"146b647863a27542ad4a1a01ddb033cdcab9843d"
] |
[
"logger.py"
] |
[
"# Code referenced from https://gist.github.com/gyglim/1f8dfb1b5c82627ae3efcfbbadb9f514\nimport tensorflow as tf\nimport numpy as np\nimport scipy.misc\n\ntry:\n from StringIO import StringIO # Python 2.7\nexcept ImportError:\n from io import BytesIO # Python 3.x\n\n\nclass Logger(object):\n\n def __init__(self, log_dir):\n \"\"\"Create a summary writer logging to log_dir.\"\"\"\n self.writer = tf.summary.FileWriter(log_dir)\n\n def scalar_summary(self, tag, value, step):\n \"\"\"Log a scalar variable.\"\"\"\n summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)])\n self.writer.add_summary(summary, step)\n\n def image_summary(self, tag, images, step):\n \"\"\"Log a list of images.\"\"\"\n\n img_summaries = []\n for i, img in enumerate(images):\n # Write the image to a string\n try:\n s = StringIO()\n except:\n s = BytesIO()\n scipy.misc.toimage(img).save(s, format=\"png\")\n\n # Create an Image object\n img_sum = tf.Summary.Image(encoded_image_string=s.getvalue(),\n height=img.shape[0],\n width=img.shape[1])\n # Create a Summary value\n img_summaries.append(tf.Summary.Value(tag='%s/%d' % (tag, i), image=img_sum))\n\n # Create and write Summary\n summary = tf.Summary(value=img_summaries)\n self.writer.add_summary(summary, step)\n\n def histo_summary(self, tag, values, step, bins=1000):\n \"\"\"Log a histogram of the tensor of values.\"\"\"\n\n # Create a histogram using numpy\n counts, bin_edges = np.histogram(values, bins=bins)\n\n # Fill the fields of the histogram proto\n hist = tf.HistogramProto()\n hist.min = float(np.min(values))\n hist.max = float(np.max(values))\n hist.num = int(np.prod(values.shape))\n hist.sum = float(np.sum(values))\n hist.sum_squares = float(np.sum(values ** 2))\n\n # Drop the start of the first bin\n bin_edges = bin_edges[1:]\n\n # Add bin edges and counts\n for edge in bin_edges:\n hist.bucket_limit.append(edge)\n for c in counts:\n hist.bucket.append(c)\n\n # Create and write Summary\n summary = tf.Summary(value=[tf.Summary.Value(tag=tag, histo=hist)])\n self.writer.add_summary(summary, step)\n self.writer.flush()\n"
] |
[
[
"tensorflow.summary.FileWriter",
"numpy.min",
"numpy.max",
"tensorflow.Summary.Value",
"numpy.prod",
"tensorflow.HistogramProto",
"tensorflow.Summary",
"numpy.histogram",
"numpy.sum"
]
] |
vcarehuman/tf-pose-estimation-master
|
[
"443b2101449a41cdf69cc8f510b58bef8440b641"
] |
[
"src/estimator_orginal.py"
] |
[
"import itertools\nimport logging\nimport math\nimport pandas as pd\nfrom collections import namedtuple\nfrom datetime import datetime\nimport time\n\n\nimport cv2\nimport numpy as np\nimport tensorflow as tf\nfrom scipy.ndimage import maximum_filter, gaussian_filter\nfrom numpy import array\nimport matplotlib.pyplot as plt\nimport common\nfrom common import CocoPairsNetwork, CocoPairs, CocoPart\nimport xlwt\n\n\nlogger = logging.getLogger('TfPoseEstimator')\nlogger.setLevel(logging.INFO)\nch = logging.StreamHandler()\nformatter = logging.Formatter('[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')\nch.setFormatter(formatter)\nlogger.addHandler(ch)\n\n\nclass Human:\n \"\"\"\n body_parts: list of BodyPart\n \"\"\"\n __slots__ = ('body_parts', 'pairs', 'uidx_list')\n\n def __init__(self, pairs):\n self.pairs = []\n self.uidx_list = set()\n self.body_parts = {}\n for pair in pairs:\n self.add_pair(pair)\n\n @staticmethod\n def _get_uidx(part_idx, idx):\n return '%d-%d' % (part_idx, idx)\n\n def add_pair(self, pair):\n self.pairs.append(pair)\n self.body_parts[pair.part_idx1] = BodyPart(Human._get_uidx(pair.part_idx1, pair.idx1),\n pair.part_idx1,\n pair.coord1[0], pair.coord1[1], pair.score)\n self.body_parts[pair.part_idx2] = BodyPart(Human._get_uidx(pair.part_idx2, pair.idx2),\n pair.part_idx2,\n pair.coord2[0], pair.coord2[1], pair.score)\n self.uidx_list.add(Human._get_uidx(pair.part_idx1, pair.idx1))\n self.uidx_list.add(Human._get_uidx(pair.part_idx2, pair.idx2))\n\n def is_connected(self, other):\n return len(self.uidx_list & other.uidx_list) > 0\n\n def merge(self, other):\n for pair in other.pairs:\n self.add_pair(pair)\n\n def part_count(self):\n return len(self.body_parts.keys())\n\n def get_max_score(self):\n return max([x.score for _, x in self.body_parts.items()])\n\n def __str__(self):\n return ' '.join([str(x) for x in self.body_parts.values()])\n\n\nclass BodyPart:\n \"\"\"\n part_idx : part index(eg. 0 for nose)\n x, y: coordinate of body part\n score : confidence score\n \"\"\"\n __slots__ = ('uidx', 'part_idx', 'x', 'y', 'score')\n\n def __init__(self, uidx, part_idx, x, y, score):\n self.uidx = uidx\n self.part_idx = part_idx\n self.x, self.y = x, y\n self.score = score\n\n def get_part_name(self):\n return CocoPart(self.part_idx)\n\n def __str__(self):\n return 'BodyPart:%d-(%.2f, %.2f) score=%.2f' % (self.part_idx, self.x, self.y, self.score)\n\n\nclass PoseEstimator:\n heatmap_supress = False\n heatmap_gaussian = False\n adaptive_threshold = False\n\n NMS_Threshold = 0.15\n Local_PAF_Threshold = 0.2\n PAF_Count_Threshold = 5\n Part_Count_Threshold = 4\n Part_Score_Threshold = 4.5\n\n PartPair = namedtuple('PartPair', [\n 'score',\n 'part_idx1', 'part_idx2',\n 'idx1', 'idx2',\n 'coord1', 'coord2',\n 'score1', 'score2'\n ])\n\n def __init__(self):\n pass\n\n @staticmethod\n def non_max_suppression(plain, window_size=3, threshold=NMS_Threshold):\n under_threshold_indices = plain < threshold\n plain[under_threshold_indices] = 0\n return plain * (plain == maximum_filter(plain, footprint=np.ones((window_size, window_size))))\n\n @staticmethod\n def estimate(heat_mat, paf_mat):\n if heat_mat.shape[2] == 19:\n heat_mat = np.rollaxis(heat_mat, 2, 0)\n if paf_mat.shape[2] == 38:\n paf_mat = np.rollaxis(paf_mat, 2, 0)\n\n if PoseEstimator.heatmap_supress:\n heat_mat = heat_mat - heat_mat.min(axis=1).min(axis=1).reshape(19, 1, 1)\n heat_mat = heat_mat - heat_mat.min(axis=2).reshape(19, heat_mat.shape[1], 1)\n\n if PoseEstimator.heatmap_gaussian:\n heat_mat = gaussian_filter(heat_mat, sigma=0.5)\n\n if PoseEstimator.adaptive_threshold:\n _NMS_Threshold = max(np.average(heat_mat) * 4.0, PoseEstimator.NMS_Threshold)\n _NMS_Threshold = min(_NMS_Threshold, 0.3)\n else:\n _NMS_Threshold = PoseEstimator.NMS_Threshold\n\n # extract interesting coordinates using NMS.\n coords = [] # [[coords in plane1], [....], ...]\n for plain in heat_mat[:-1]:\n nms = PoseEstimator.non_max_suppression(plain, 5, _NMS_Threshold)\n coords.append(np.where(nms >= _NMS_Threshold))\n\n # score pairs\n pairs_by_conn = list()\n for (part_idx1, part_idx2), (paf_x_idx, paf_y_idx) in zip(CocoPairs, CocoPairsNetwork):\n pairs = PoseEstimator.score_pairs(\n part_idx1, part_idx2,\n coords[part_idx1], coords[part_idx2],\n paf_mat[paf_x_idx], paf_mat[paf_y_idx],\n heatmap=heat_mat,\n rescale=(1.0 / heat_mat.shape[2], 1.0 / heat_mat.shape[1])\n )\n\n pairs_by_conn.extend(pairs)\n\n # merge pairs to human\n # pairs_by_conn is sorted by CocoPairs(part importance) and Score between Parts.\n humans = [Human([pair]) for pair in pairs_by_conn]\n while True:\n merge_items = None\n for k1, k2 in itertools.combinations(humans, 2):\n if k1 == k2:\n continue\n if k1.is_connected(k2):\n merge_items = (k1, k2)\n break\n\n if merge_items is not None:\n merge_items[0].merge(merge_items[1])\n humans.remove(merge_items[1])\n else:\n break\n\n # reject by subset count\n humans = [human for human in humans if human.part_count() >= PoseEstimator.PAF_Count_Threshold]\n\n # reject by subset max score\n humans = [human for human in humans if human.get_max_score() >= PoseEstimator.Part_Score_Threshold]\n\n return humans\n\n @staticmethod\n def score_pairs(part_idx1, part_idx2, coord_list1, coord_list2, paf_mat_x, paf_mat_y, heatmap, rescale=(1.0, 1.0)):\n connection_temp = []\n\n cnt = 0\n for idx1, (y1, x1) in enumerate(zip(coord_list1[0], coord_list1[1])):\n for idx2, (y2, x2) in enumerate(zip(coord_list2[0], coord_list2[1])):\n score, count = PoseEstimator.get_score(x1, y1, x2, y2, paf_mat_x, paf_mat_y)\n cnt += 1\n if count < PoseEstimator.PAF_Count_Threshold or score <= 0.0:\n continue\n connection_temp.append(PoseEstimator.PartPair(\n score=score,\n part_idx1=part_idx1, part_idx2=part_idx2,\n idx1=idx1, idx2=idx2,\n coord1=(x1 * rescale[0], y1 * rescale[1]),\n coord2=(x2 * rescale[0], y2 * rescale[1]),\n score1=heatmap[part_idx1][y1][x1],\n score2=heatmap[part_idx2][y2][x2],\n ))\n\n connection = []\n used_idx1, used_idx2 = set(), set()\n for candidate in sorted(connection_temp, key=lambda x: x.score, reverse=True):\n # check not connected\n if candidate.idx1 in used_idx1 or candidate.idx2 in used_idx2:\n continue\n connection.append(candidate)\n used_idx1.add(candidate.idx1)\n used_idx2.add(candidate.idx2)\n\n return connection\n\n @staticmethod\n def get_score(x1, y1, x2, y2, paf_mat_x, paf_mat_y):\n __num_inter = 10\n __num_inter_f = float(__num_inter)\n dx, dy = x2 - x1, y2 - y1\n normVec = math.sqrt(dx ** 2 + dy ** 2)\n\n if normVec < 1e-4:\n return 0.0, 0\n\n vx, vy = dx / normVec, dy / normVec\n\n xs = np.arange(x1, x2, dx / __num_inter_f) if x1 != x2 else np.full((__num_inter,), x1)\n ys = np.arange(y1, y2, dy / __num_inter_f) if y1 != y2 else np.full((__num_inter,), y1)\n xs = (xs + 0.5).astype(np.int8)\n ys = (ys + 0.5).astype(np.int8)\n\n # without vectorization\n pafXs = np.zeros(__num_inter)\n pafYs = np.zeros(__num_inter)\n for idx, (mx, my) in enumerate(zip(xs, ys)):\n pafXs[idx] = paf_mat_x[my][mx]\n pafYs[idx] = paf_mat_y[my][mx]\n\n # vectorization slow?\n # pafXs = pafMatX[ys, xs]\n # pafYs = pafMatY[ys, xs]\n\n local_scores = pafXs * vx + pafYs * vy\n thidxs = local_scores > PoseEstimator.Local_PAF_Threshold\n\n return sum(local_scores * thidxs), sum(thidxs)\n\n\nclass TfPoseEstimator:\n\n \n ENSEMBLE = 'addup' # average, addup\n \n book = xlwt.Workbook()\n sheet1 = book.add_sheet(\"Sheet1\") \n \n num = 1\n \n \n row = sheet1.row(num)\n row.write(0, \"Time\")\n row.write(1, \"Shoulder X\")\n row.write(2, \"Shoulder Y\")\n row.write(3, \"Elbow X\")\n row.write(4, \"Elbow Y\")\n row.write(5, \"Wrist X\")\n row.write(6, \"Wrist Y\")\n row.write(7, \"Angle\")\n \n\n def __init__(self, graph_path, target_size=(320, 240)):\n self.target_size = target_size\n\n # load graph\n with tf.gfile.GFile(graph_path, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n\n self.graph = tf.get_default_graph()\n tf.import_graph_def(graph_def, name='TfPoseEstimator')\n self.persistent_sess = tf.Session(graph=self.graph)\n\n # for op in self.graph.get_operations():\n # print(op.name)\n\n self.tensor_image = self.graph.get_tensor_by_name('TfPoseEstimator/image:0')\n self.tensor_output = self.graph.get_tensor_by_name('TfPoseEstimator/Openpose/concat_stage7:0')\n\n self.heatMat = self.pafMat = None\n\n # warm-up\n self.persistent_sess.run(\n self.tensor_output,\n feed_dict={\n self.tensor_image: [np.ndarray(shape=(target_size[1], target_size[0], 3), dtype=np.float32)]\n }\n )\n\n def __del__(self):\n self.persistent_sess.close()\n\n @staticmethod\n def _quantize_img(npimg):\n npimg_q = npimg + 1.0\n npimg_q /= (2.0 / 2**8)\n # npimg_q += 0.5\n npimg_q = npimg_q.astype(np.uint8)\n return npimg_q\n \n def py_ang(v1, v2):\n \"\"\" Returns the angle in radians between vectors 'v1' and 'v2' \"\"\"\n cosang = np.dot(v1, v2)\n sinang = la.norm(np.cross(v1, v2))\n return np.arctan2(sinang, cosang)\n \n \n \n @staticmethod\n def draw_humans(npimg, humans, imgcopy=False):\n \n if imgcopy:\n npimg = np.copy(npimg)\n image_h, image_w = npimg.shape[:2]\n centers = {}\n TfPoseEstimator.num +=1\n TfPoseEstimator.row = TfPoseEstimator.sheet1.row(TfPoseEstimator.num)\n \n \n for human in humans:\n a=0\n b=0\n c=0\n \n \n \n \n \n \n # draw point\n for i in range(common.CocoPart.Background.value):\n \n \n \n if i not in human.body_parts.keys():\n continue\n body_part = human.body_parts[i]\n \n if (str(body_part.get_part_name()).strip() not in (\"CocoPart.RShoulder\", \"CocoPart.RElbow\",\"CocoPart.RWrist\")):\n continue\n \n \n center = (int(body_part.x * image_w + 0.5), int(body_part.y * image_h + 0.5))\n centers[i] = center\n cv2.circle(npimg, center, 3, common.CocoColors[i], thickness=3, lineType=8, shift=0)\n \n# cv2.putText(npimg, center, center, 4, 2, 255, 3, 8)\n \n \n if (str(body_part.get_part_name()).strip()==\"CocoPart.RShoulder\"):\n a = np.array([int(body_part.x * image_w + 0.5), int(body_part.y * image_h + 0.5)])\n \n if (str(body_part.get_part_name()).strip()==\"CocoPart.RElbow\"):\n b = np.array([int(body_part.x * image_w + 0.5), int(body_part.y * image_h + 0.5)])\n \n if (str(body_part.get_part_name()).strip()==\"CocoPart.RWrist\"):\n c = np.array([int(body_part.x * image_w + 0.5), int(body_part.y * image_h + 0.5)])\n\n cv2.putText(npimg,\n str(center) ,\n (int(body_part.x * image_w + 0.5) , int(body_part.y * image_h + 0.5)), cv2.FONT_HERSHEY_SIMPLEX, 0.5,\n (0, 255, 0), 2)\n \n print(center)\n \n try:\n TfPoseEstimator.row.write(0, datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-4])\n \n except:\n print(\"time not printed\") \n \n try:\n if (str(body_part.get_part_name()).strip()==\"CocoPart.RShoulder\"):\n TfPoseEstimator.row.write(1, int(body_part.x * image_w + 0.5))\n TfPoseEstimator.row.write(2, int(body_part.y * image_h + 0.5))\n \n if (str(body_part.get_part_name()).strip()==\"CocoPart.RElbow\"):\n TfPoseEstimator.row.write(3, int(body_part.x * image_w + 0.5))\n TfPoseEstimator.row.write(4, int(body_part.y * image_h + 0.5))\n \n if (str(body_part.get_part_name()).strip()==\"CocoPart.RWrist\"):\n TfPoseEstimator.row.write(5, int(body_part.x * image_w + 0.5))\n TfPoseEstimator.row.write(6, int(body_part.y * image_h + 0.5))\n except:\n print(\"time not printed\") \n \n \n print(\"keys = \" + str(human.body_parts.keys()))\n \n \n # draw line\n for pair_order, pair in enumerate(common.CocoPairsRender):\n if pair[0] not in human.body_parts.keys() or pair[1] not in human.body_parts.keys():\n continue\n ba = a - b\n bc = c - b\n\n cosine_angle = np.dot(ba, bc) / (np.linalg.norm(ba) * np.linalg.norm(bc))\n angle = np.arccos(cosine_angle)\n\n degree = np.degrees(angle)\n \n try:\n TfPoseEstimator.row.write(7, str(round(degree))) \n except:\n \n TfPoseEstimator.book.save(\"test.xls\") \n \n \n print(\"elbow angle =\" + str(degree))\n \n \n \n \n \n \n cv2.putText(npimg,\n \"Elbow angle =\" + str(degree),\n (10 , 35), cv2.FONT_HERSHEY_SIMPLEX, 1,\n (255, 0, 0), 2)\n \n \n if (str(body_part.get_part_name()).strip() not in (\"CocoPart.RHip\",\"CocoPart.RShoulder\",\"CocoPart.RElbow\")):\n continue\n \n #npimg = cv2.line(npimg, centers[pair[0]], centers[pair[1]], common.CocoColors[pair_order], 3)\n \n \n return npimg\n \n \n\n def _get_scaled_img(self, npimg, scale):\n get_base_scale = lambda s, w, h: max(self.target_size[0] / float(w), self.target_size[1] / float(h)) * s\n img_h, img_w = npimg.shape[:2]\n\n if scale is None:\n if npimg.shape[:2] != (self.target_size[1], self.target_size[0]):\n # resize\n npimg = cv2.resize(npimg, self.target_size)\n return [npimg], [(0.0, 0.0, 1.0, 1.0)]\n elif isinstance(scale, float):\n # scaling with center crop\n base_scale = get_base_scale(scale, img_w, img_h)\n npimg = cv2.resize(npimg, dsize=None, fx=base_scale, fy=base_scale)\n ratio_x = (1. - self.target_size[0] / float(npimg.shape[1])) / 2.0\n ratio_y = (1. - self.target_size[1] / float(npimg.shape[0])) / 2.0\n roi = self._crop_roi(npimg, ratio_x, ratio_y)\n return [roi], [(ratio_x, ratio_y, 1.-ratio_x*2, 1.-ratio_y*2)]\n elif isinstance(scale, tuple) and len(scale) == 2:\n # scaling with sliding window : (scale, step)\n base_scale = get_base_scale(scale[0], img_w, img_h)\n base_scale_w = self.target_size[0] / (img_w * base_scale)\n base_scale_h = self.target_size[1] / (img_h * base_scale)\n npimg = cv2.resize(npimg, dsize=None, fx=base_scale, fy=base_scale)\n window_step = scale[1]\n rois = []\n infos = []\n for ratio_x, ratio_y in itertools.product(np.arange(0., 1.01 - base_scale_w, window_step),\n np.arange(0., 1.01 - base_scale_h, window_step)):\n roi = self._crop_roi(npimg, ratio_x, ratio_y)\n rois.append(roi)\n infos.append((ratio_x, ratio_y, base_scale_w, base_scale_h))\n return rois, infos\n elif isinstance(scale, tuple) and len(scale) == 3:\n # scaling with ROI : (want_x, want_y, scale_ratio)\n base_scale = get_base_scale(scale[2], img_w, img_h)\n npimg = cv2.resize(npimg, dsize=None, fx=base_scale, fy=base_scale)\n ratio_w = self.target_size[0] / float(npimg.shape[1])\n ratio_h = self.target_size[1] / float(npimg.shape[0])\n\n want_x, want_y = scale[:2]\n ratio_x = want_x - ratio_w / 2.\n ratio_y = want_y - ratio_h / 2.\n ratio_x = max(ratio_x, 0.0)\n ratio_y = max(ratio_y, 0.0)\n if ratio_x + ratio_w > 1.0:\n ratio_x = 1. - ratio_w\n if ratio_y + ratio_h > 1.0:\n ratio_y = 1. - ratio_h\n\n roi = self._crop_roi(npimg, ratio_x, ratio_y)\n return [roi], [(ratio_x, ratio_y, ratio_w, ratio_h)]\n\n def _crop_roi(self, npimg, ratio_x, ratio_y):\n target_w, target_h = self.target_size\n h, w = npimg.shape[:2]\n x = max(int(w*ratio_x-.5), 0)\n y = max(int(h*ratio_y-.5), 0)\n cropped = npimg[y:y+target_h, x:x+target_w]\n\n cropped_h, cropped_w = cropped.shape[:2]\n if cropped_w < target_w or cropped_h < target_h:\n npblank = np.zeros((self.target_size[1], self.target_size[0], 3), dtype=np.uint8)\n\n copy_x, copy_y = (target_w - cropped_w) // 2, (target_h - cropped_h) // 2\n npblank[copy_y:copy_y+cropped_h, copy_x:copy_x+cropped_w] = cropped\n else:\n return cropped\n\n def inference(self, npimg, scales=None):\n if npimg is None:\n raise Exception('The image is not valid. Please check your image exists.')\n\n if not isinstance(scales, list):\n scales = [None]\n\n if self.tensor_image.dtype == tf.quint8:\n # quantize input image\n npimg = TfPoseEstimator._quantize_img(npimg)\n pass\n\n rois = []\n infos = []\n for scale in scales:\n roi, info = self._get_scaled_img(npimg, scale)\n # for dubug...\n # print(roi[0].shape)\n # cv2.imshow('a', roi[0])\n # cv2.waitKey()\n rois.extend(roi)\n infos.extend(info)\n\n logger.debug('inference+')\n output = self.persistent_sess.run(self.tensor_output, feed_dict={self.tensor_image: rois})\n heatMats = output[:, :, :, :19]\n pafMats = output[:, :, :, 19:]\n logger.debug('inference-')\n\n output_h, output_w = output.shape[1:3]\n max_ratio_w = max_ratio_h = 10000.0\n for info in infos:\n max_ratio_w = min(max_ratio_w, info[2])\n max_ratio_h = min(max_ratio_h, info[3])\n mat_w, mat_h = int(output_w/max_ratio_w), int(output_h/max_ratio_h)\n resized_heatMat = np.zeros((mat_h, mat_w, 19), dtype=np.float32)\n resized_pafMat = np.zeros((mat_h, mat_w, 38), dtype=np.float32)\n resized_cntMat = np.zeros((mat_h, mat_w, 1), dtype=np.float32)\n resized_cntMat += 1e-12\n\n for heatMat, pafMat, info in zip(heatMats, pafMats, infos):\n w, h = int(info[2]*mat_w), int(info[3]*mat_h)\n heatMat = cv2.resize(heatMat, (w, h))\n pafMat = cv2.resize(pafMat, (w, h))\n x, y = int(info[0] * mat_w), int(info[1] * mat_h)\n\n if TfPoseEstimator.ENSEMBLE == 'average':\n # average\n resized_heatMat[max(0, y):y + h, max(0, x):x + w, :] += heatMat[max(0, -y):, max(0, -x):, :]\n resized_pafMat[max(0,y):y+h, max(0, x):x+w, :] += pafMat[max(0, -y):, max(0, -x):, :]\n resized_cntMat[max(0,y):y+h, max(0, x):x+w, :] += 1\n else:\n # add up\n resized_heatMat[max(0, y):y + h, max(0, x):x + w, :] = np.maximum(resized_heatMat[max(0, y):y + h, max(0, x):x + w, :], heatMat[max(0, -y):, max(0, -x):, :])\n resized_pafMat[max(0,y):y+h, max(0, x):x+w, :] += pafMat[max(0, -y):, max(0, -x):, :]\n resized_cntMat[max(0, y):y + h, max(0, x):x + w, :] += 1\n\n if TfPoseEstimator.ENSEMBLE == 'average':\n self.heatMat = resized_heatMat / resized_cntMat\n self.pafMat = resized_pafMat / resized_cntMat\n else:\n self.heatMat = resized_heatMat\n self.pafMat = resized_pafMat / (np.log(resized_cntMat) + 1)\n\n humans = PoseEstimator.estimate(self.heatMat, self.pafMat)\n return humans\n"
] |
[
[
"numpy.rollaxis",
"numpy.dot",
"tensorflow.gfile.GFile",
"numpy.ndarray",
"numpy.arctan2",
"numpy.cross",
"tensorflow.get_default_graph",
"numpy.where",
"tensorflow.import_graph_def",
"numpy.arange",
"numpy.full",
"numpy.copy",
"tensorflow.Session",
"numpy.zeros",
"numpy.log",
"numpy.arccos",
"scipy.ndimage.gaussian_filter",
"numpy.degrees",
"numpy.linalg.norm",
"numpy.ones",
"tensorflow.GraphDef",
"numpy.average"
]
] |
kylegenova/nerflet
|
[
"087ed7377c0552837c8ba0e4bead9334992283e9"
] |
[
"run_nerf_helpers.py"
] |
[
"import os\nimport sys\nimport tensorflow as tf\nimport numpy as np\nimport imageio\nimport json\n\n\n# Misc utils\n\ndef img2mse(x, y): return tf.reduce_mean(tf.square(x - y))\n\n\ndef mse2psnr(x): return -10.*tf.log(x)/tf.log(10.)\n\n\ndef to8b(x): return (255*np.clip(x, 0, 1)).astype(np.uint8)\n\n\n# Positional encoding\n\nclass Embedder:\n\n def __init__(self, **kwargs):\n\n self.kwargs = kwargs\n self.create_embedding_fn()\n\n def create_embedding_fn(self):\n\n embed_fns = []\n d = self.kwargs['input_dims']\n out_dim = 0\n if self.kwargs['include_input']:\n embed_fns.append(lambda x: x)\n out_dim += d\n\n max_freq = self.kwargs['max_freq_log2']\n N_freqs = self.kwargs['num_freqs']\n\n if self.kwargs['log_sampling']:\n freq_bands = 2.**tf.linspace(0., max_freq, N_freqs)\n else:\n freq_bands = tf.linspace(2.**0., 2.**max_freq, N_freqs)\n\n for freq in freq_bands:\n for p_fn in self.kwargs['periodic_fns']:\n embed_fns.append(lambda x, p_fn=p_fn,\n freq=freq: p_fn(x * freq))\n out_dim += d\n\n self.embed_fns = embed_fns\n self.out_dim = out_dim\n\n def embed(self, inputs):\n return tf.concat([fn(inputs) for fn in self.embed_fns], -1)\n\n\ndef get_embedder(multires, i=0):\n\n if i == -1:\n return tf.identity, 3\n\n embed_kwargs = {\n 'include_input': True,\n 'input_dims': 3,\n 'max_freq_log2': multires-1,\n 'num_freqs': multires,\n 'log_sampling': True,\n 'periodic_fns': [tf.math.sin, tf.math.cos],\n }\n\n embedder_obj = Embedder(**embed_kwargs)\n def embed(x, eo=embedder_obj): return eo.embed(x)\n return embed, embedder_obj.out_dim\n\n####################### Begin kgenova code #####################################\n\n# Model architecture\ndef roll_pitch_yaw_to_rotation_matrices(roll_pitch_yaw):\n \"\"\"Converts roll-pitch-yaw angles to rotation matrices.\n Args:\n roll_pitch_yaw: Tensor (or convertible value) with shape [..., 3]. The last\n dimension contains the roll, pitch, and yaw angles in radians. The\n resulting matrix rotates points by first applying roll around the x-axis,\n then pitch around the y-axis, then yaw around the z-axis.\n Returns:\n Tensor with shape [..., 3, 3]. The 3x3 rotation matrices corresponding to\n the input roll-pitch-yaw angles.\n \"\"\"\n roll_pitch_yaw = tf.convert_to_tensor(roll_pitch_yaw)\n\n cosines = tf.cos(roll_pitch_yaw)\n sines = tf.sin(roll_pitch_yaw)\n cx, cy, cz = tf.unstack(cosines, axis=-1)\n sx, sy, sz = tf.unstack(sines, axis=-1)\n # pyformat: disable\n rotation = tf.stack(\n [cz * cy, cz * sy * sx - sz * cx, cz * sy * cx + sz * sx,\n sz * cy, sz * sy * sx + cz * cx, sz * sy * cx - cz * sx,\n -sy, cy * sx, cy * cx], axis=-1)\n # pyformat: enable\n shape = tf.concat([tf.shape(rotation)[:-1], [3, 3]], axis=0)\n rotation = tf.reshape(rotation, shape)\n return rotation\n\n\ndef decode_covariance_roll_pitch_yaw(radii, rotations, invert=False):\n \"\"\"Converts 6-D radus vectors to the corresponding covariance matrices.\n Args:\n radii: Tensor with shape [EC, 3]. Covariances of the three Gaussian axes. \n rotations: Tensor with shape [EC, 3]. The roll-pitch-yaw rotation angles\n of the Gaussian frame.\n invert: Whether to return the inverse covariance.\n Returns:\n Tensor with shape [..., 3, 3]. The 3x3 (optionally inverted) covariance\n matrices corresponding to the input radius vectors.\n \"\"\"\n DIV_EPSILON=1e-8\n d = 1.0 / (radii + DIV_EPSILON) if invert else radii\n diag = tf.matrix_diag(d)\n rotation = roll_pitch_yaw_to_rotation_matrices(rotations)\n return tf.matmul(tf.matmul(rotation, diag), rotation, transpose_b=True)\n\n\ndef eval_rbf(samples, centers, radii, rotations):\n \"\"\"Samples gaussian radial basis functions at specified coordinates.\n Args:\n samples: Tensor with shape [N, 3], where N is the number of samples to evaluate.\n centers: Tensor with shape [EC, 3]. Contains the [x,y,z] coordinates of the\n RBF centers.\n radii: Tensor with shape [EC, 3]. First three numbers are covariances of\n the three Gaussian axes. \n rotations: the roll-pitch-yaw rotation angles of the Gaussian frame.\n Returns:\n Tensor with shape [EC, N, 1]. The basis function strength at each sample.\n TODO(kgenova) maybe switch to [N, EC].\n location.\n \"\"\"\n with tf.name_scope('sample_cov_bf'):\n assert len(samples.shape) == 2\n samples = tf.expand_dims(samples, axis=0) # Now shape is [1, N, 3]\n \n # Compute the samples' offset from center, then extract the coordinates.\n diff = samples - tf.expand_dims(centers, axis=-2) # broadcast to [1, n, 3] - [ec, 1, 3] -> [ec, n, 3]\n x, y, z = tf.unstack(diff, axis=-1)\n # Decode 6D radius vectors into inverse covariance matrices, then extract\n # unique elements.\n inv_cov = decode_covariance_roll_pitch_yaw(radii, rotations, invert=True)\n shape = tf.concat([tf.shape(inv_cov)[:-2], [1, 9]], axis=0)\n inv_cov = tf.reshape(inv_cov, shape)\n c00, c01, c02, _, c11, c12, _, _, c22 = tf.unstack(inv_cov, axis=-1)\n # Compute function value.\n dist = (\n x * (c00 * x + c01 * y + c02 * z) + y * (c01 * x + c11 * y + c12 * z) +\n z * (c02 * x + c12 * y + c22 * z))\n dist = tf.expand_dims(tf.exp(-0.5 * dist), axis=-1)\n return dist\n\n# Not used in the final version; a different set of blending equations:\ndef sigmoid_rbf(samples, centers, radii, sharpness):\n with tf.name_scope('sigmoid_rbf'):\n #print('In sigmoid rbf')\n assert len(samples.shape) == 2\n samples = tf.expand_dims(samples, axis=0) # now [1, N, 3]\n\n diff = samples - tf.expand_dims(centers, axis=-2) # [now ec, n, 3]\n distance = tf.sqrt(tf.reduce_sum(diff * diff, axis=2, keepdims=True)) # [ec, n, 1]\n \n assert len(radii.shape) == 2\n assert radii.shape[1] == 1 # radii = the distance at which contribution is 0.5\n shifted = distance - tf.expand_dims(radii, axis=-1) # [ec, n, 1]\n\n assert len(sharpness.shape) == 2\n scaled = tf.expand_dims(sharpness, axis=1) * shifted\n return tf.keras.activations.sigmoid(-1.0 * scaled) # multiply by -1 so that it's large close to 0 (the center)\n\n\nclass StackedFCLayer(tf.keras.layers.Layer):\n def __init__(self, n_duplicates, output_width, activation=None):\n super(StackedFCLayer, self).__init__()\n self.output_width = output_width\n self.n_duplicates = n_duplicates\n self.activation = activation\n\n def build(self, input_shape):\n input_width = input_shape[-1]\n initializer = tf.keras.initializers.glorot_uniform()\n w_inits = np.stack([initializer(shape=(input_width, self.output_width), dtype='float32') for _ in range(self.n_duplicates)])\n self.w = tf.Variable(initial_value=w_inits, trainable=True, name='BatchWeight', dtype=tf.float32)\n self.b = self.add_weight(shape=(self.n_duplicates, 1, self.output_width), initializer='zeros', trainable=True, name='BatchBias', dtype=tf.float32)\n\n\n def call(self, inputs):\n assert len(inputs.shape) == 3\n assert inputs.shape[0] == self.n_duplicates\n outputs = tf.matmul(inputs, self.w) + self.b\n if self.activation is not None:\n outputs = self.activation(outputs)\n return outputs\n\n\nclass RBFLayer(tf.keras.layers.Layer):\n def __init__(self, n_elts, is_fine):\n super(RBFLayer, self).__init__()\n self.n_elts = n_elts\n self.is_fine = is_fine\n\n def build(self, input_shape):\n self.constants = self.add_weight(name='constants', shape=(self.n_elts, 1),\n initializer=tf.keras.initializers.RandomUniform(5.0, 5.1),\n trainable=True)\n self.centers = self.add_weight(name='centers', shape=(self.n_elts, 3),\n initializer=tf.keras.initializers.RandomUniform(-0.6, 0.6),\n trainable=True)\n self.radii = self.add_weight(name='radii', shape=(self.n_elts, 3),\n initializer=tf.keras.initializers.RandomUniform(.05, 0.06),\n trainable=True)\n self.rotations = self.add_weight(name='rotations', shape=(self.n_elts, 3),\n initializer=tf.keras.initializers.RandomUniform(-1.0 * np.pi, np.pi),\n trainable=True)\n # Not used in the final version:\n initial_epsilon = np.array([0.01], dtype=np.float32)\n self.learned_epsilon = tf.Variable(name='epsilon',initial_value=initial_epsilon,trainable=True, dtype='float32')\n \n self.call_count = 0\n\n def call(self, world_space_points, nerflet_activations, dists):\n constants = tf.abs(self.constants)\n centers = self.centers\n radii = tf.abs(self.radii) + 0.005\n rotations = self.rotations\n \n # Inputs are NeRF outputs to be blended:\n rbfs = eval_rbf(world_space_points, centers, radii, rotations)\n # Alternative RBF function:\n # rbfs = sigmoid_rbf(world_space_points, centers, radii, constants)\n constants = tf.expand_dims(constants, axis=1)\n thresh = None #0.01 \n if thresh is not None:\n rbfs = tf.where(rbfs > thresh, rbfs, tf.zeros_like(rbfs))\n to_mask = None \n if to_mask is not None and self.is_fine:\n inds = tf.constant([0] * to_mask + [1] + [0] * (self.n_elts - 1 - to_mask), dtype=tf.float32)\n inds = tf.reshape(inds, [self.n_elts, 1, 1])\n constants = constants * inds\n rbfs = rbfs * constants\n\n # Apply an l1 penalty so that blobs don't overlap much in influence:\n no_penalty_threshold = 0.01\n penalty = 0.001 * tf.reduce_mean(tf.reduce_sum(tf.maximum(rbfs - no_penalty_threshold, 0), axis=0))\n\n alt_weight = 0.1 \n\n bbox_min = tf.constant([[-0.6, -0.6, -0.35]], dtype=tf.float32)\n bbox_max = tf.constant([[0.6, 0.6, 0.6]], dtype=tf.float32)\n centers_above_max = tf.reduce_sum(tf.maximum(centers - bbox_max, 0))\n centers_below_max = tf.reduce_sum(tf.maximum(bbox_min - centers, 0))\n penalty = penalty + 1.0 * (centers_above_max + centers_below_max)\n\n assert len(rbfs.shape) == 3\n\n if isinstance(nerflet_activations, list):\n nerflet_activations = tf.stack(nerflet_activations)\n\n\n # Map to RGBa:\n alpha = 1.0 - tf.exp(-tf.nn.relu(nerflet_activations[..., -1:]) * tf.expand_dims(dists, axis=0))\n rgb = tf.math.sigmoid(nerflet_activations[..., :3])\n nerflet_activations = tf.concat([rgb, alpha], axis=-1)\n\n\n n_to_print = 5\n\n \n rbf_sums = tf.reduce_sum(rbfs, axis=0, keepdims=True) # [1, N, 1]\n\n kill_thresh = 0.0 # 0.00001 for inference\n to_kill = rbfs < kill_thresh\n dummy_outputs = tf.reshape(tf.constant([0, 0, 0, 0], dtype=tf.float32), [1, 1, 4])\n outputs = tf.where_v2(to_kill,\n dummy_outputs, \n rbfs / (rbf_sums + 1e-6) * nerflet_activations)\n outputs = tf.reduce_sum(outputs, axis=0)\n\n to_not_kill = tf.cast(rbfs >= kill_thresh, dtype=tf.float32)\n total_nontrivial = tf.reduce_sum(to_not_kill)\n\n if self.call_count % 100 == 0:\n tf.print('First points: ', world_space_points[:n_to_print, :])\n tf.print('First rbf values: ', rbfs[:, :n_to_print, :])\n tf.print('First nerflet activations: ', nerflet_activations[:, :n_to_print, :])\n tf.print('First final results: ', outputs[:n_to_print, :])\n tf.print('Average RBF sum: ', tf.reduce_mean(rbf_sums))\n tf.print('Average (multiplied) rbf weight: ', tf.reduce_mean(rbfs))\n # Compute the average number of nontrivial weights per nerf (and the mean per cell):\n for thresh in [0.5, 0.1, 0.05, 0.01, 0.005, 0.001, 0.0005, 0.0001]:\n rbf_nontrivial = tf.cast(rbfs > thresh, dtype=tf.float32)\n n_nontrivial = tf.reduce_mean(tf.reduce_sum(rbf_nontrivial, axis=0))\n tf.print(f'Average number of nontrivial rbfs per point at thresh {thresh}: ', n_nontrivial)\n n_nontrivial = tf.reshape(tf.reduce_sum(rbf_nontrivial, axis=1), [-1])\n tf.print(f'Number of nontrivial points per rbf at thresh {thresh}: ', n_nontrivial)\n print(f'Shape of nontrivial points per rbf: {n_nontrivial.shape}')\n frac_sums_nontrivial = tf.reduce_mean(tf.cast(rbf_sums > thresh, dtype=tf.float32))\n tf.print(f'Fraction of RBF sums that are nontrivial at thresh {thresh}: ', frac_sums_nontrivial)\n\n self.call_count += 1\n return outputs, penalty # total_nontrivial for statistics; change sum to mean\n\n\n################## End kgenova code (except minor changes) ############################################################\n\ndef init_nerf_model(D=8, W=256, input_ch=3, input_ch_views=3, output_ch=4, skips=[4], use_viewdirs=False,\n n_elts=None, is_fine=None):\n assert is_fine is not None\n relu = tf.keras.layers.ReLU()\n def dense(W, act=relu): return tf.keras.layers.Dense(W, activation=act)\n def stacked_dense(W, act=relu): return StackedFCLayer(n_elts, W, activation=act)\n\n print('MODEL', input_ch, input_ch_views, type(\n input_ch), type(input_ch_views), use_viewdirs)\n input_ch = int(input_ch)\n input_ch_views = int(input_ch_views)\n\n assert output_ch == 4\n assert use_viewdirs\n assert n_elts is not None\n\n inputs = tf.keras.Input(shape=(input_ch + input_ch_views + 3 + 1))\n inputs_pts, inputs_views, input_unembedded_pts, dists = tf.split(inputs, [input_ch, input_ch_views, 3, 1], -1)\n inputs_pts.set_shape([None, input_ch])\n inputs_views.set_shape([None, input_ch_views])\n input_unembedded_pts.set_shape([None, 3])\n\n stacked_inputs_pts = tf.tile(tf.expand_dims(inputs_pts, 0), [n_elts, 1, 1])\n stacked_inputs_views = tf.tile(tf.expand_dims(inputs_views, 0), [n_elts, 1, 1])\n stacked_input_unembedded_pts = tf.tile(tf.expand_dims(input_unembedded_pts, 0), [n_elts, 1, 1])\n\n outputs = stacked_inputs_pts\n for i in range(D):\n outputs = stacked_dense(W)(outputs)\n if i in skips:\n outputs = tf.concat([stacked_inputs_pts, outputs], -1)\n if use_viewdirs:\n alpha_out = stacked_dense(1, act=None)(outputs)\n bottleneck = stacked_dense(W, act=None)(outputs)\n inputs_viewdirs = tf.concat(\n [bottleneck, stacked_inputs_views], -1)\n outputs = inputs_viewdirs\n for i in range(1):\n outputs = stacked_dense(W//2)(outputs)\n outputs = stacked_dense(3, act=None)(outputs)\n outputs = tf.concat([outputs, alpha_out], -1)\n nerflet_activations = outputs\n \n ldif = RBFLayer(n_elts=n_elts, is_fine=is_fine)\n outputs = ldif(input_unembedded_pts, nerflet_activations, dists)\n\n model = tf.keras.Model(inputs=inputs, outputs=outputs)\n return model\n\n\n# Ray helpers\n\ndef get_rays(H, W, focal, c2w):\n \"\"\"Get ray origins, directions from a pinhole camera.\"\"\"\n i, j = tf.meshgrid(tf.range(W, dtype=tf.float32),\n tf.range(H, dtype=tf.float32), indexing='xy')\n dirs = tf.stack([(i-W*.5)/focal, -(j-H*.5)/focal, -tf.ones_like(i)], -1)\n rays_d = tf.reduce_sum(dirs[..., np.newaxis, :] * c2w[:3, :3], -1)\n rays_o = tf.broadcast_to(c2w[:3, -1], tf.shape(rays_d))\n return rays_o, rays_d\n\n\ndef get_rays_np(H, W, focal, c2w):\n \"\"\"Get ray origins, directions from a pinhole camera.\"\"\"\n i, j = np.meshgrid(np.arange(W, dtype=np.float32),\n np.arange(H, dtype=np.float32), indexing='xy')\n dirs = np.stack([(i-W*.5)/focal, -(j-H*.5)/focal, -np.ones_like(i)], -1)\n rays_d = np.sum(dirs[..., np.newaxis, :] * c2w[:3, :3], -1)\n rays_o = np.broadcast_to(c2w[:3, -1], np.shape(rays_d))\n return rays_o, rays_d\n\n\ndef ndc_rays(H, W, focal, near, rays_o, rays_d):\n \"\"\"Normalized device coordinate rays.\n\n Space such that the canvas is a cube with sides [-1, 1] in each axis.\n\n Args:\n H: int. Height in pixels.\n W: int. Width in pixels.\n focal: float. Focal length of pinhole camera.\n near: float or array of shape[batch_size]. Near depth bound for the scene.\n rays_o: array of shape [batch_size, 3]. Camera origin.\n rays_d: array of shape [batch_size, 3]. Ray direction.\n\n Returns:\n rays_o: array of shape [batch_size, 3]. Camera origin in NDC.\n rays_d: array of shape [batch_size, 3]. Ray direction in NDC.\n \"\"\"\n # Shift ray origins to near plane\n t = -(near + rays_o[..., 2]) / rays_d[..., 2]\n rays_o = rays_o + t[..., None] * rays_d\n\n # Projection\n o0 = -1./(W/(2.*focal)) * rays_o[..., 0] / rays_o[..., 2]\n o1 = -1./(H/(2.*focal)) * rays_o[..., 1] / rays_o[..., 2]\n o2 = 1. + 2. * near / rays_o[..., 2]\n\n d0 = -1./(W/(2.*focal)) * \\\n (rays_d[..., 0]/rays_d[..., 2] - rays_o[..., 0]/rays_o[..., 2])\n d1 = -1./(H/(2.*focal)) * \\\n (rays_d[..., 1]/rays_d[..., 2] - rays_o[..., 1]/rays_o[..., 2])\n d2 = -2. * near / rays_o[..., 2]\n\n rays_o = tf.stack([o0, o1, o2], -1)\n rays_d = tf.stack([d0, d1, d2], -1)\n\n return rays_o, rays_d\n\n\n# Hierarchical sampling helper\n\ndef sample_pdf(bins, weights, N_samples, det=False):\n\n # Get pdf\n weights += 1e-5 # prevent nans\n pdf = weights / tf.reduce_sum(weights, -1, keepdims=True)\n cdf = tf.cumsum(pdf, -1)\n cdf = tf.concat([tf.zeros_like(cdf[..., :1]), cdf], -1)\n\n # Take uniform samples\n if det:\n u = tf.linspace(0., 1., N_samples)\n u = tf.broadcast_to(u, list(cdf.shape[:-1]) + [N_samples])\n else:\n u = tf.random.uniform(list(cdf.shape[:-1]) + [N_samples])\n\n # Invert CDF\n inds = tf.searchsorted(cdf, u, side='right')\n below = tf.maximum(0, inds-1)\n above = tf.minimum(cdf.shape[-1]-1, inds)\n inds_g = tf.stack([below, above], -1)\n cdf_g = tf.gather(cdf, inds_g, axis=-1, batch_dims=len(inds_g.shape)-2)\n bins_g = tf.gather(bins, inds_g, axis=-1, batch_dims=len(inds_g.shape)-2)\n\n denom = (cdf_g[..., 1]-cdf_g[..., 0])\n denom = tf.where(denom < 1e-5, tf.ones_like(denom), denom)\n t = (u-cdf_g[..., 0])/denom\n samples = bins_g[..., 0] + t * (bins_g[..., 1]-bins_g[..., 0])\n\n return samples\n"
] |
[
[
"tensorflow.convert_to_tensor",
"tensorflow.concat",
"tensorflow.stack",
"tensorflow.reduce_sum",
"tensorflow.minimum",
"tensorflow.cast",
"tensorflow.where_v2",
"tensorflow.linspace",
"tensorflow.cumsum",
"numpy.ones_like",
"tensorflow.keras.Input",
"tensorflow.Variable",
"numpy.clip",
"numpy.arange",
"tensorflow.name_scope",
"tensorflow.square",
"tensorflow.matrix_diag",
"tensorflow.matmul",
"tensorflow.keras.layers.ReLU",
"tensorflow.unstack",
"tensorflow.searchsorted",
"tensorflow.shape",
"tensorflow.keras.layers.Dense",
"tensorflow.exp",
"tensorflow.keras.Model",
"tensorflow.math.sigmoid",
"tensorflow.zeros_like",
"tensorflow.keras.initializers.glorot_uniform",
"tensorflow.split",
"numpy.array",
"tensorflow.print",
"numpy.sum",
"tensorflow.nn.relu",
"tensorflow.sin",
"tensorflow.cos",
"tensorflow.constant",
"tensorflow.range",
"tensorflow.keras.initializers.RandomUniform",
"tensorflow.reduce_mean",
"tensorflow.maximum",
"tensorflow.reshape",
"tensorflow.ones_like",
"tensorflow.expand_dims",
"numpy.shape",
"tensorflow.log",
"tensorflow.keras.activations.sigmoid",
"tensorflow.abs"
]
] |
ChenHao96/tensorflow
|
[
"82b76ae80ebf5d1f320a6787371b9522e7cd5a69"
] |
[
"deepLearning/perceptron3/perceptron.py"
] |
[
"import numpy as np\n\nnp.random.seed(42)\ninput_x = np.random.rand(200, 2)\nexample_y = []\n\nfor i in range(len(input_x)):\n result = 3 * input_x[i][0] + 4 * input_x[i][1] - 3.5\n example_y.append(result)\n\nw = np.random.random(2).reshape([2, 1])\nb = np.random.random(1)\n\ndef perceptron(X, W, b):\n return np.dot(X, W) + b\n\ndef error_formula(y, y_hat):\n y = np.float_(y)\n y_hat = np.float_(y_hat)\n return -np.sum(y * np.log(y_hat) + (1 - y) * np.log(1 - y_hat))\n\nlearning = 0.01\nfor _ in range(100):\n for i in range(len(input_x)):\n y_ = perceptron(input_x[i], w, b)\n if y_ > example_y[i]:\n w[0] -= input_x[i][0] * learning\n w[1] -= input_x[i][1] * learning\n b -= learning\n elif y_ < example_y[i]:\n w[0] += input_x[i][0] * learning\n w[1] += input_x[i][1] * learning\n b += learning\n\nprint(w)\nprint(b)\n"
] |
[
[
"numpy.dot",
"numpy.log",
"numpy.random.random",
"numpy.random.seed",
"numpy.random.rand",
"numpy.float_"
]
] |
ivanyu/ar-sudoku-solver
|
[
"50c55fc8c3debc8868ae1f5a6d47683bc45a4159"
] |
[
"python/digit_recognizer_2/__init__.py"
] |
[
"# -*- coding: utf-8 -*-\nimport os\n\nimport numpy as np\nimport torch\nimport torchvision as tv\n\nfrom .model import DigitRecognizer2\n\n\ndef create_recognizer():\n _digit_recognizer: DigitRecognizer2 = DigitRecognizer2()\n _path = os.path.join(\n os.path.dirname(os.path.dirname(__file__)),\n \"model-ft.pth\"\n )\n _digit_recognizer.load_state_dict(torch.load(_path))\n _digit_recognizer.eval()\n\n def recognize_digits(images):\n inputs = []\n for img in images:\n input = tv.transforms.functional.to_tensor(img)\n input = tv.transforms.functional.normalize(input, (0.5,), (0.5,), inplace=True)\n inputs.append(input)\n inputs = torch.cat(inputs).unsqueeze(1)\n outputs = _digit_recognizer(inputs).data\n result = torch.max(outputs, dim=1)\n # +1 because class 0 is 1 and so on\n result = result[1] + 1\n result = list(result.numpy())\n return result, outputs\n\n return recognize_digits\n"
] |
[
[
"torch.cat",
"torch.max",
"torch.load"
]
] |
mjtadema/pytris
|
[
"1c69e5757b877915ce98cd86fc603ac4f622e516"
] |
[
"pytris/audio.py"
] |
[
"# Code based on https://simpleaudio.readthedocs.io/en/latest/tutorial.html#waveobject-s\n# Song composition based on http://www.piano-keyboard-guide.com/how-to-play-the-tetris-theme-song-easy-piano-tutorial-korobeiniki/\n\nimport numpy as np\nimport simpleaudio as sa\n\n# Song improved by Lorenzo Gaifas\nsong = [\"E\",\"B\",\"C\"] + [\"D\",\"C\",\"B\"] + [\"A\",\"A\",\"C\"] + [\"E\",\"D\",\"C\"] + [\"B\",\"B\",\"C\"] + [\"D\",\"E\",\"C\",\"A\",\"A\"] + \\\n [\"P\",\"P\"] + [\"D\",\"F\"] + \\\n [\"Ah\",\"G\",\"F\"] + [\"E\",\"E\",\"C\"] + [\"E\",\"D\",\"C\"] + [\"B\",\"B\",\"C\"] + [\"D\",\"E\",\"C\",\"A\",\"A\"] + \\\n [\"P\"]\ntiming = [2,1,1] * 5 + [2,2,2,2,2] + [2,1] + [2,1] + [2,1,1] * 4 + [2,2,2,2,2] + [2]\n\n# calculate note frequencies\nA_freq = 440\n\nfreqs = [0] + [\n A_freq * 2 ** (i / 12)\n for i in range(13)\n ]\n\nnotes = ['P','A','A#','B','C','C#','D','D#','E','F','F#','G','G#','Ah']\n\nnote_to_freq = {\n n: f\n for n, f in zip(notes, freqs)\n }\n\n# get timesteps for each sample, T is note duration in seconds\nsample_rate = 44100\nT = 0.25\nt = lambda factor: np.linspace(0, T*factor, T*factor * sample_rate, False)\n\ndef note_to_sine(note, time):\n f = note_to_freq[note]\n return np.sin(f * t(time) * 2 * np.pi) \n\nsong_sines = [\n note_to_sine(note, time)\n for note, time in zip(song, timing)\n ]\n\n# concatenate notes\naudio = np.hstack(song_sines)\n# normalize to 16-bit range\naudio *= 32767 / np.max(np.abs(audio))\n# dampen a bit\naudio *= 0.255555\n# convert to 16-bit data\naudio = audio.astype(np.int16)\n\n# start playback\ndef start():\n while True:\n play_obj = sa.play_buffer(audio, 1, 2, sample_rate)\n play_obj.wait_done()\n"
] |
[
[
"numpy.hstack",
"numpy.abs",
"numpy.linspace"
]
] |
haanme/DWIProstateMotionCorrection
|
[
"6ea9f52c54a92316ad0b2dcdec593c184e76ad28"
] |
[
"bfitASCII_IO.py"
] |
[
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n Created on Fri Apr 11 20:20:02 2014\r\n\r\n @author: merisaah\r\n \"\"\"\r\nimport os\r\nimport numpy as np\r\n\r\nclass bfitASCIIError(Exception):\r\n def __init__(self, value):\r\n self.value = value\r\n def __str__(self):\r\n return repr(self.value)\r\n\r\nclass bfitASCIIReadError(bfitASCIIError):\r\n def __str__(self):\r\n return repr('Read error ' + self.value)\r\n\r\n\r\nclass bfitASCIIWriteError(bfitASCIIError):\r\n def __str__(self):\r\n return repr('Write error ' + self.value)\r\n\r\nclass bfitASCII_IO:\r\n\r\n def Write3D(self, path, data, SI_file=True):\r\n\r\n subwindow = data['subwindow']\r\n if data.has_key('ROI_no'):\r\n ROI_No = data['ROI_No']\r\n else:\r\n ROI_No = data['number']\r\n bset = data['bset']\r\n ROIslice = data['ROIslice']\r\n name = data['name']\r\n if data.has_key('SIs'):\r\n SIs = data['SIs']\r\n else:\r\n SIs = data['data']\r\n if not SI_file:\r\n executiontime = data['executiontime']\r\n description = data['description']\r\n parameters = data['parameters']\r\n\r\n f = open(path, 'w')\r\n # write header information\r\n f.write('subwindow: [%d %d %d %d]\\n' % (subwindow[0], subwindow[1], subwindow[2], subwindow[3]))\r\n f.write('number: %d\\n' % ROI_No)\r\n f.write('bset: [')\r\n for i in range(len(bset)):\r\n f.write('%d ' % bset[i])\r\n f.write(']\\n')\r\n f.write('ROIslice: [')\r\n for i in range(len(ROIslice)):\r\n f.write('%d ' % ROIslice[i])\r\n f.write(']\\n')\r\n f.write('name: %s\\n' % name)\r\n if not SI_file:\r\n f.write('executiontime: %d seconds\\n' % executiontime)\r\n f.write('description: %s\\n' % description)\r\n f.write('parameters: %s\\n' % parameters)\r\n data_length = len(SIs)\r\n print_step = data_length/10\r\n bset_length = SIs.shape[1]\r\n for i in range(len(SIs)):\r\n for j in range(bset_length):\r\n f.write('%.15f ' % SIs[i][j])\r\n f.write('\\n')\r\n if(np.mod(i,print_step) == 0):\r\n print ('writing %d/%d' % (i+1, data_length))\r\n print ('writing %d/%d' % (data_length, data_length))\r\n else:\r\n f.write('SIs: \\n')\r\n # write SI data\r\n data_length = len(SIs)\r\n print_step = data_length/10\r\n bset_length = len(bset)\r\n for i in range(len(SIs)):\r\n for j in range(bset_length):\r\n f.write('%.15f ' % SIs[i][j])\r\n f.write('\\n')\r\n if(np.mod(i,print_step) == 0):\r\n print ('writing %d/%d' % (i+1, data_length))\r\n print ('writing %d/%d' % (data_length, data_length))\r\n f.close()\r\n\r\n def Read(self, path, SI_file):\r\n\r\n f = open(path)\r\n lines = f.readlines()\r\n f.close()\r\n outdata = {'data':[]}\r\n for line_i in range(len(lines)):\r\n line = lines[line_i]\r\n # resolve subwindow\r\n if line.find('subwindow') == 0:\r\n subs = line.split()\r\n subs = subs[1:]\r\n subs[0] = subs[0].lstrip('[')\r\n subs[-1] = subs[-1].rstrip(']')\r\n if len(subs[-1]) == 0:\r\n subs = subs[:-1]\r\n outdata['subwindow'] = [int(float(subs[0])), int(float(subs[1])), int(float(subs[2])), int(float(subs[3]))]\r\n continue\r\n # resolve bset\r\n if line.find('bset') == 0:\r\n subs = line.split()\r\n subs = subs[1:]\r\n subs[0] = subs[0].lstrip('[')\r\n subs[-1] = subs[-1].rstrip(']')\r\n if len(subs[-1]) == 0:\r\n subs = subs[:-1]\r\n bset = [int(float(s)) for s in subs]\r\n outdata['bset'] = bset\r\n continue\r\n # resolve parameters\r\n if line.find('parameters') == 0:\r\n subs = line.split()\r\n subs = subs[1:]\r\n outdata['parameters'] = subs\r\n continue\r\n # resolve ROi slice numbers\r\n if line.find('ROIslice') == 0:\r\n subs = line.split()\r\n subs = subs[1:]\r\n subs[0] = subs[0].lstrip('[')\r\n subs[-1] = subs[-1].rstrip(']')\r\n if len(subs[-1]) == 0:\r\n subs = subs[:-1]\r\n ROIslice = [int(float(s)) for s in subs]\r\n outdata['ROIslice'] = ROIslice\r\n continue\r\n # resolve description\r\n if line.find('description') == 0:\r\n subs = line.split(':')\r\n subs = subs[1:]\r\n outdata['description'] = ':'.join(subs).strip()\r\n continue\r\n # resolve name\r\n if line.find('name') == 0:\r\n subs = line.split(':')\r\n subs = subs[1:]\r\n outdata['name'] = subs[0].strip()\r\n continue\r\n # resolve name\r\n if line.find('executiontime') == 0:\r\n subs = line.split(':')\r\n subs = subs[1:]\r\n outdata['executiontime'] = subs[0].strip()\r\n continue\r\n # resolve number\r\n if line.find('number') == 0:\r\n subs = line.split(':')\r\n subs = subs[1:]\r\n outdata['number'] = int(float(subs[0]))\r\n continue\r\n if (SI_file and line_i < 6) or (not SI_file and line_i < 8):\r\n #print \"continue \" + line\r\n continue\r\n #print \"number str \" + line\r\n # resolve parameter values\r\n outdata['data'].append([float(s) for s in line.split()])\r\n outdata['data'] = np.array(outdata['data'])\r\n return outdata\r\n"
] |
[
[
"numpy.mod",
"numpy.array"
]
] |
oisc/ChineseDiscourseParser
|
[
"1363716cc5caa9e9a82987f2ce76fc8b07b4cdfa"
] |
[
"segmenter/svm/model.py"
] |
[
"# coding: UTF-8\n\nimport logging\nfrom collections import OrderedDict\nfrom nltk import ParentedTree\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.svm import LinearSVC\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass SVMCommaClassifier:\n def __init__(self, connectives, candidate=\",,;\", seed=21):\n self.connectives = connectives\n self.candidate = candidate\n self.fet_vector = DictVectorizer()\n self.clf = LinearSVC(random_state=seed)\n\n def predict(self, comma_pos, parse):\n fet = self.extract_features(comma_pos, parse)\n x = self.fet_vector.transform([fet])\n return self.clf.predict(x)[0]\n\n def predict_many(self, x):\n fets = []\n for comma_pos, parse in x:\n fets.append(self.extract_features(comma_pos, parse))\n x = self.fet_vector.transform(fets)\n return self.clf.predict(x)\n\n def extract_features(self, comma_pos, parse):\n childs = list(parse.subtrees(lambda t: t.height() == 2 and t.label() != '-NONE-'))\n offset = 0\n comma = None\n comma_index = -1\n for i, child in enumerate(childs):\n if offset == comma_pos:\n comma = child\n comma_index = i\n offset += len(child[0])\n\n if comma is None:\n return {}\n\n comma_prev = []\n comma_post = []\n if comma_index > 0:\n for child in childs[comma_index-1::-1]:\n if child[0] == ',' or child[0] == ',':\n break\n else:\n comma_prev.append(child)\n comma_prev = comma_prev[::-1]\n for child in childs[comma_index+1:]:\n if child[0] == ',' or child[0] == ',':\n break\n else:\n comma_post.append(child)\n\n # extract feature\n fet = OrderedDict()\n for i, prev in enumerate(comma_prev[:3]):\n fet['F1_P_%d' % (i+1)] = prev.label()\n fet['F1_W_%d' % (i+1)] = prev[0]\n for i, prev in enumerate(comma_prev[-3:]):\n fet['F2_P_%d' % (i+1)] = prev.label()\n fet['F2_W_%d' % (i+1)] = prev[0]\n\n if comma_post:\n fet['F3'] = comma_post[0].label()\n fet['F4'] = comma_post[0][0]\n\n for node in comma_prev:\n if node[0] in self.connectives:\n fet['F5_1'] = node[0]\n for node in comma_post:\n if node[0] in self.connectives:\n fet['F5_2'] = node[0]\n\n lsibling = comma.left_sibling()\n rsibling = comma.right_sibling()\n while isinstance(lsibling, ParentedTree) and lsibling.label() == '-NONE-':\n lsibling = lsibling.left_sibling()\n while isinstance(rsibling, ParentedTree) and rsibling.label() == '-NONE-':\n rsibling = rsibling.right_sibling()\n\n if lsibling:\n fet['F6'] = lsibling.label()\n if rsibling:\n fet['F7'] = rsibling.label()\n if lsibling and rsibling:\n fet['F8'] = '%s_%s' % (fet['F6'], fet['F7'])\n fet['F9'] = '%s_%s_%s' % (fet['F6'], comma.parent().label(), fet['F7'])\n\n for node in comma_prev:\n if node.label().startswith('VC'):\n fet['F10_1'] = 'True'\n if node.label().startswith('VA'):\n fet['F10_2'] = 'True'\n if node.label().startswith('VE'):\n fet['F10_3'] = 'True'\n if node.label().startswith('VV'):\n fet['F10_4'] = 'True'\n if node.label().startswith('CS'):\n fet['F10_5'] = 'True'\n for node in comma_post:\n if node.label().startswith('VC'):\n fet['F11_1'] = 'True'\n if node.label().startswith('VA'):\n fet['F11_2'] = 'True'\n if node.label().startswith('VE'):\n fet['F11_3'] = 'True'\n if node.label().startswith('VV'):\n fet['F11_4'] = 'True'\n if node.label().startswith('CS'):\n fet['F11_5'] = 'True'\n\n pcomma = comma.parent()\n if 'F9' in fet and fet['F9'] == 'IP_IP_IP':\n fet['F12'] = 'True'\n if parse.height() - pcomma.height() == 1:\n fet['F13'] = 'True'\n if 'F12' in fet and fet['F12'] and 'F13' in fet and fet['F13']:\n fet['F14'] = 'True'\n\n punct = []\n for child in childs:\n if child[0] in ',.?!,。?!':\n punct.append(child[0])\n fet['F15'] = '_'.join(punct)\n\n pre_len = len(''.join([node[0] for node in comma_prev]))\n post_len = len(''.join(node[0] for node in comma_post))\n if pre_len < 5:\n fet['F16'] = 'True'\n if abs(pre_len - post_len) > 7:\n fet['F17'] = 'True'\n\n comma_dept = 0\n tmp_node = comma\n while tmp_node.parent() and tmp_node.parent() is not parse:\n comma_dept += 1\n tmp_node = tmp_node.parent()\n fet['F18'] = comma_dept\n del tmp_node\n\n if pcomma and pcomma.label().startswith('NP'):\n fet['F19'] = 'True'\n if isinstance(lsibling, ParentedTree) and lsibling.label().startswith('NP'):\n fet['F20'] = 'True'\n if isinstance(rsibling, ParentedTree) and rsibling.label().startswith('NP'):\n fet['F21'] = 'True'\n\n if len(comma_prev) >= 2:\n fet['F22'] = comma_prev[0].label() + '_' + comma_prev[-1].label()\n fet['F23'] = comma_prev[0][0] + '_' + comma_prev[-1][0]\n\n comma_prev_set = set([(node.label(), node[0]) for node in comma_prev if node.label() != 'PU'])\n comma_post_set = set([(node.label(), node[0]) for node in comma_post if node.label() != 'PU'])\n if comma_prev_set & comma_post_set:\n fet['F24'] = list(comma_prev_set & comma_post_set)[0][0]\n return fet\n"
] |
[
[
"sklearn.feature_extraction.DictVectorizer",
"sklearn.svm.LinearSVC"
]
] |
itsdaveba/rubik-solver
|
[
"eebae6cffc9f91e64d5f3e49d556a78df0e703f5"
] |
[
"tests/test_cube.py"
] |
[
"from .context import rubik_solver\r\nfrom .context import available_moves\r\n\r\nimport numpy as np\r\nimport unittest\r\n\r\n\r\nbenchmark_scrambles = [\r\n(\"F' D U' L2 B2 D2 B2 D B F U L2 R2 B2 U' L F' D U2 F' D2 U L U' B' U' B2 U R2 U\",\r\n\tnp.array([ 1, 2, 0, 1, 2, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1]),\r\n\tnp.array([ 7, 1, 4, 3, 2, 6, 5, 0,19, 8,15,12,17,16,18,11,14,13, 9,10])),\r\n(\"U B2 D F2 D R B' R' D B D2 F R D' F R D2 L' B' D U' R B2 D B2 R U' F2 D' R2\",\r\n\tnp.array([ 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0]),\r\n\tnp.array([ 0, 6, 7, 1, 2, 4, 5, 3, 18,13,15,10,17,14,19,11,16, 9, 8,12])),\r\n(\"D' F2 L2 B2 L B D2 F L2 D2 U2 B2 F2 L R' D U2 L2 U L' U L F' R' F U' B2 F2 L' R'\",\r\n\tnp.array([ 2, 1, 2, 1, 1, 2, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0]),\r\n\tnp.array([ 7, 4, 5, 1, 3, 2, 0, 6,16, 9,19,18, 8,10,17,15,13,11,12,14])),\r\n(\"D2 L' D R F R' D' U' R' U2 R' D F2 D U L' F' D' L2 U' B' F' D2 B' L2 U2 B D' U2 L'\",\r\n\tnp.array([ 0, 1, 1, 0, 2, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0]),\r\n\tnp.array([ 2, 6, 4, 0, 3, 7, 5, 1,18,15,19,10, 9,12,13, 8,11,17,16,14])),\r\n(\"U L2 B F L2 D' U' B D' B2 F' L2 R' F' L R2 F R' U B L2 F' U2 L D U' L2 R' B2 L\",\r\n\tnp.array([ 0, 1, 1, 0, 2, 0, 2, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1]),\r\n\tnp.array([ 7, 0, 6, 5, 4, 1, 2, 3,18,19, 8,15,11,14,12,13,10,16,17, 9]))]\r\n\r\nclass TestCube(unittest.TestCase):\r\n\tdef test_cube(self):\r\n\t\tcube = rubik_solver.Cube()\r\n\t\tself.assertTrue(cube.is_solved())\r\n\t\tself.assertEqual(cube.scramble, None)\r\n\t\tfor scramble, orientation, permutation in benchmark_scrambles:\r\n\t\t\tcube = rubik_solver.Cube(scramble)\r\n\t\t\tself.assertFalse(cube.is_solved())\r\n\t\t\tself.assertTrue(np.array_equal(cube.position['orientation'], orientation))\r\n\t\t\tself.assertTrue(np.array_equal(cube.position['permutation'], permutation))\r\n\t\t\tcube.reset()\r\n\t\t\tcube.position = cube.apply_scramble(scramble)\r\n\t\t\tself.assertFalse(cube.is_solved())\r\n\t\t\tself.assertTrue(np.array_equal(cube.position['orientation'], orientation))\r\n\t\t\tself.assertTrue(np.array_equal(cube.position['permutation'], permutation))\r\n\t\tfor i in range(100):\r\n\t\t\tcube = rubik_solver.Cube(i)\r\n\t\t\tif i == 0:\r\n\t\t\t\tself.assertTrue(cube.is_solved())\r\n\t\t\telse:\r\n\t\t\t\tself.assertFalse(cube.is_solved())\r\n\t\t\tself.assertEqual(len(cube.scramble.split()), i)\r\n\t\tself.assertTrue(cube.is_solved(rubik_solver.Cube().position))\r\n\t\tfor move in available_moves[None]:\r\n\t\t\ttmp1 = cube.make_move(move)\r\n\t\t\ttmp2 = cube.make_move(move, rubik_solver.Cube().position)\r\n\t\t\tself.assertFalse(np.array_equal(tmp1['orientation'], tmp2['orientation']))\r\n\t\t\tself.assertFalse(np.array_equal(tmp1['permutation'], tmp2['permutation']))\r\n\r\nif __name__ == \"__main__\":\r\n\tunittest.main()"
] |
[
[
"numpy.array",
"numpy.array_equal"
]
] |
LogIntelligence/LogADEmpirical
|
[
"48458aee65c1c84466b04dd4092fae79a7f341fd"
] |
[
"logadempirical/neural_log/attention.py"
] |
[
"from torch import Tensor\nimport torch.nn.functional as F\nimport torch\nfrom torch import nn\n\n\ndef scaled_dot_product_attention(query: Tensor, key: Tensor, value: Tensor) -> Tensor:\n temp = query.bmm(key.transpose(1, 2))\n scale = query.size(-1) ** 0.5\n softmax = F.softmax(temp / scale, dim=-1)\n return softmax.bmm(value)\n\n\nclass AttentionHead(nn.Module):\n def __init__(self, dim_in: int, dim_k: int, dim_v: int):\n super().__init__()\n self.q = nn.Linear(dim_in, dim_k)\n self.k = nn.Linear(dim_in, dim_k)\n self.v = nn.Linear(dim_in, dim_v)\n\n def forward(self, query: Tensor, key: Tensor, value: Tensor) -> Tensor:\n return scaled_dot_product_attention(self.q(query), self.k(key), self.v(value))\n\n\nclass MultiHeadAttention(nn.Module):\n def __init__(self, num_heads: int, dim_in: int, dim_k: int, dim_v: int):\n super().__init__()\n self.heads = nn.ModuleList(\n [AttentionHead(dim_in, dim_k, dim_v) for _ in range(num_heads)]\n )\n self.linear = nn.Linear(num_heads * dim_v, dim_in)\n\n def forward(self, query: Tensor, key: Tensor, value: Tensor) -> Tensor:\n return self.linear(\n torch.cat([h(query, key, value) for h in self.heads], dim=-1)\n )\n"
] |
[
[
"torch.nn.Linear",
"torch.nn.functional.softmax"
]
] |
pachecotiago/autobasedoc
|
[
"14e70a454559435d8a8b8b4fde475c1ba6f94db7"
] |
[
"autobasedoc/autoplot.py"
] |
[
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Sep 16 11:11:12 2015\r\n\r\n@author: johannes\r\n\"\"\"\r\nfrom __future__ import print_function\r\nfrom __future__ import unicode_literals\r\n\r\nimport os\r\nimport sys\r\n\r\nfrom io import BytesIO\r\nfrom functools import wraps\r\nfrom cycler import cycler\r\n\r\nimport matplotlib\r\n\r\ntry:\r\n matplotlib.use('Agg', force=True)\r\nexcept:\r\n print(\"check your matplotlib aggregator settings\")\r\n print(\"matplotlib version:\", matplotlib.__version__)\r\n\r\nfrom matplotlib.lines import Line2D\r\nfrom matplotlib.patches import Rectangle\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.transforms import Bbox\r\nfrom matplotlib.ticker import LinearLocator, MultipleLocator, AutoMinorLocator, FormatStrFormatter\r\nimport matplotlib.font_manager as fm\r\nfrom matplotlib.font_manager import findfont\r\nfrom matplotlib import ft2font\r\nfrom matplotlib.font_manager import createFontList, ttfFontProperty\r\n\r\nfrom autobasedoc.pdfimage import PdfImage, PdfAsset, getScaledSvg\r\n\r\n# add color names, missing in matplotlib\r\nmissing_names = {\r\n 'darkyellow': '#CC9900',\r\n 'lightmagenta': '#EDB2ED',\r\n 'lightred': '#FF8787'\r\n}\r\n\r\nmatplotlib.colors.cnames.update(missing_names)\r\n\r\nplt.ioff()\r\n\r\nfontprop = None\r\n\r\n\r\ndef autoPdfImage(func):\r\n \"\"\"\r\n decorator for the autoplot module\r\n\r\n returns two PdfImage objects if wrapped plt-function obeys the principle\r\n demonstated in following minimal example::\r\n\r\n @autoPdfImage\r\n def my_plot(canvaswidth=5): #[inch]\r\n fig, ax = ap.plt.subplots(figsize=(canvaswidth,canvaswidth))\r\n fig.suptitle(\"My Plot\",fontproperties=fontprop)\r\n x=[1,2,3,4,5,6,7,8]\r\n y=[1,6,8,3,9,3,4,2]\r\n ax.plot(x,y,label=\"legendlabel\")\r\n nrow,ncol=1,1\r\n handles, labels = ax.get_legend_handles_labels()\r\n\r\n leg_fig = ap.plt.figure(figsize=(canvaswidth, 0.2*nrow))\r\n\r\n leg = leg_fig.legend(handles, labels, #labels = tuple(bar_names)\r\n ncol=ncol, mode=None,\r\n borderaxespad=0.,\r\n loc='center', # the location of the legend handles\r\n handleheight=None, # the height of the legend handles\r\n #fontsize=9, # prop beats fontsize\r\n markerscale=None,\r\n frameon=False,\r\n prop=fontprop\r\n #fancybox=True,\r\n )\r\n\r\n return fig,leg_fig,leg\r\n\r\n TODO: add example in tests\r\n \"\"\"\r\n\r\n @wraps(func)\r\n def funcwrapper(*args, **kwargs):\r\n \"\"\"\r\n minimal example::\r\n\r\n def my_decorator(f):\r\n @wraps(f)\r\n def wrapper(*args, **kwds):\r\n print('Calling decorated function')\r\n return f(*args, **kwds)\r\n return wrapper\r\n \"\"\"\r\n imgax = BytesIO()\r\n imgleg = BytesIO()\r\n\r\n fig, leg_fig, leg = func(*args, **kwargs)\r\n\r\n if not fig:\r\n return\r\n\r\n leg_fig.savefig(\r\n imgleg,\r\n #additional_artists=(leg.get_window_extent(), ),\r\n bbox_extra_artists=(leg.legendPatch, ),\r\n bbox_inches='tight',\r\n format='PDF',\r\n transparent=True)\r\n # rewind the data\r\n imgleg.seek(0)\r\n\r\n plt.clf()\r\n plt.close('all')\r\n fig.savefig(imgax, format='PDF')\r\n return PdfImage(imgax), PdfImage(imgleg)\r\n\r\n return funcwrapper\r\n\r\n\r\ndef autoPdfImg(func):\r\n \"\"\"\r\n decorator for the autoplot module\r\n\r\n returns one PdfImage objects if wrapped plt-function obeys the principle\r\n demonstated in following minimal example::\r\n\r\n @autoPdfImg\r\n def my_plot(canvaswidth=5): #[inch]\r\n fig, ax = ap.plt.subplots(figsize=(canvaswidth,canvaswidth))\r\n fig.suptitle(\"My Plot\",fontproperties=fontprop)\r\n x=[1,2,3,4,5,6,7,8]\r\n y=[1,6,8,3,9,3,4,2]\r\n ax.plot(x,y,label=\"legendlabel\")\r\n nrow,ncol=1,1\r\n handles, labels = ax.get_legend_handles_labels()\r\n\r\n leg_fig = ap.plt.figure(figsize=(canvaswidth, 0.2*nrow))\r\n\r\n ax.legend(handles, labels, #labels = tuple(bar_names)\r\n ncol=ncol, mode=None,\r\n borderaxespad=0.,\r\n loc='center', # the location of the legend handles\r\n handleheight=None, # the height of the legend handles\r\n #fontsize=9, # prop beats fontsize\r\n markerscale=None,\r\n frameon=False,\r\n prop=fontprop\r\n #fancybox=True,\r\n )\r\n\r\n return fig\r\n\r\n \"\"\"\r\n\r\n @wraps(func)\r\n def funcwrapper(*args, **kwargs):\r\n \"\"\"\r\n minimal example::\r\n\r\n def my_decorator(f):\r\n @wraps(f)\r\n def wrapper(*args, **kwds):\r\n print('Calling decorated function')\r\n return f(*args, **kwds)\r\n return wrapper\r\n \"\"\"\r\n imgax = BytesIO()\r\n\r\n fig = func(*args, **kwargs)\r\n\r\n if not fig:\r\n return\r\n\r\n plt.clf()\r\n\r\n if 'close' in kwargs:\r\n if kwargs['close']:\r\n plt.close('all')\r\n fig.savefig(imgax, format='PDF')\r\n\r\n return PdfImage(imgax)\r\n\r\n return funcwrapper\r\n\r\n\r\ndef full_extent(ax, pad=0.0):\r\n \"\"\"\r\n Get the full extent of an axes, including axes labels, tick labels, and\r\n titles.\r\n \"\"\"\r\n # For text objects, we need to draw the figure first, otherwise the extents\r\n # are undefined.\r\n ax.figure.canvas.draw()\r\n try:\r\n items = ax.get_xticklabels() + ax.get_yticklabels()\r\n except AttributeError:\r\n return ax.get_window_extent()\r\n # items += [ax, ax.title, ax.xaxis.label, ax.yaxis.label]\r\n items += [ax, ax.title]\r\n bbox = Bbox.union([item.get_window_extent() for item in items])\r\n return bbox.expanded(1.0 + pad, 1.0 + pad)\r\n"
] |
[
[
"matplotlib.use",
"matplotlib.colors.cnames.update",
"matplotlib.pyplot.ioff",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.close"
]
] |
abelatnvidia/IntroTF
|
[
"35216d1c149e2cfbf4a3374a2871d8119e585870"
] |
[
"src/module_06/code_03.py"
] |
[
"import os, glob, tensorflow as tf\n\n# create a file pattern to pick up csv files in pwd\nfile_pattern = os.path.join(os.getcwd(),'*.csv')\n\n# get list of csv files in the current directory\ncsv_files = glob.glob(file_pattern)\n\n# create a queue of files\nfile_queue = tf.train.string_input_producer(csv_files)\n\n# init Reader that outputs the lines of a file delimited by newlines\nreader = tf.TextLineReader(skip_header_lines=1)\n\n# read k,v pairs from the file queue contents\nkey, get_record_op = reader.read(file_queue)\n\n# A list of Tensor objects with types from: float32, int32, int64, string.\n# One tensor per column of the input record, with either a scalar default\n# value for that column or empty if the column is required.\ncolumn_descriptors = [[0.0],[0.0],[0.0],[0.0],['Absent'],[0.0],[0.0],[0.0],[0.0],[0]]\n\n# parse a line of data using the descriptors for each element\ndecode_record_ops = tf.decode_csv(get_record_op, record_defaults=column_descriptors,field_delim=',')\n\n# create a predicate (i.e. boolean test)\npredicate = tf.equal(decode_record_ops[4], tf.constant('Present'))\n\n# convert the 5th value based on present/absent to 1.0/0.0\ndecode_record_ops[4] = tf.cond(predicate, lambda: tf.constant(1.0), lambda: tf.constant(0.0))\n\n# init session ...\nwith tf.Session() as sess:\n\n # open summary file writer for tensorboard\n sfw = tf.summary.FileWriter(os.getcwd(),sess.graph)\n\n # Start populating the filename queue.\n queue_coordinator = tf.train.Coordinator()\n queue_worker_threads = tf.train.start_queue_runners(coord=queue_coordinator)\n\n # get a single row of data\n record = sess.run(decode_record_ops)\n\n # close down the queue\n queue_coordinator.request_stop()\n queue_coordinator.join(queue_worker_threads)\n\n # clean up\n sfw.close()\n\n# ok, print the line of data from the csv file\nprint('got a line of data: {}'.format(record))\n\n'''\n See here that we actually get back a tensor/collection of ops\n when for the decode process (one for each column of data in CSV).\n In the decode process, sess.run(decode_ops) actually calls each\n operation one-by-one and then consolodates the op results back\n into a tensor with the appropriate order. \n \n So the line of string data (i.e. the record) produced by the \n TextLineReader is choped up by the decoder into parts using \n the ',' delimiter. Each of these parts is given to its repective\n decoder operation. If we used a different delimiter say \";\" \n instead of \",\", then we could for example have a field that \n contained values of a vector, or matrix: \n \n 1,2,3,4; 5,6,7,8; 9,10,11,12 \n \n could be decoded into 3 elements with dimension 1x4 or 2x2\n \n The interesting thing is that you can take the decode operation\n and add additional operations such as convert strings to values etc\n That is, the decoder provides a set of default operations which\n you can use just like any other operation. This allows for simple\n creation of data pre-processing compute graphs\n'''"
] |
[
[
"tensorflow.constant",
"tensorflow.TextLineReader",
"tensorflow.decode_csv",
"tensorflow.train.start_queue_runners",
"tensorflow.train.Coordinator",
"tensorflow.train.string_input_producer",
"tensorflow.Session"
]
] |
andurilhuang/Movie_Income_Prediction
|
[
"5c705fe60295a175e9dc54191df422fc49ceccd6"
] |
[
"paper/historycode/Code/info_data.py"
] |
[
"def info_data(): \n import os\n import requests\n import json\n import pandas as pd\n from datetime import datetime\n OMDB_KEY = \"4c427520\"\n\n\n #get imdb_id_list from csv\n profit_df = pd.read_csv('revenue.csv')\n # print profit_df.head()\n imdb_id_list = profit_df['imdb_id']\n #loop through imdb_id_list to get all the rest of the variables\n dict_list = []\n for imdb_id in imdb_id_list:\n query = ' http://www.omdbapi.com/?i=%s&apikey=%s'\n r = requests.head(query % (imdb_id,OMDB_KEY))\n #print r.status_code\n if r.status_code == 200:\n try:\n rq = requests.get(query % (imdb_id,OMDB_KEY)).json()\n dict_list.append({'imdbID':imdb_id,\n 'Title': rq['Title'],\n 'Country':rq['Country'],\n 'Language':rq['Language'],\n 'Released':rq['Released'],\n 'Year' : rq['Year'],\n 'Rated':rq['Rated'],\n 'Genre':rq['Genre'],\n 'Actors':rq['Actors'],\n 'Director':rq['Director'],\n 'Runtime':rq['Runtime'],\n 'IMDB Rating': rq['imdbRating'],\n 'IMDB Votes': rq['imdbVotes'],\n 'Production':rq['Production']\n })\n except KeyError as reason:\n print(reason)\n print(\"Finished:\"+imdb_id)\n info_df = pd.DataFrame(dict_list)\n info_df.to_csv(\"Info_data.csv\")\n\n\n #combine_df = profit_df.join(info_df,lsuffix='imdb_id', rsuffix='imdbID')\n #combine_df.to_csv('CombinedData_2.csv')\nif __name__ == \"__main__\":\n info_data()\n"
] |
[
[
"pandas.read_csv",
"pandas.DataFrame"
]
] |
scarecrow1123/pytorch-lightning
|
[
"b39f4798a6859d2237b48b29b39a2390164612c1"
] |
[
"tests/utilities/test_dtype_device_mixin.py"
] |
[
"import pytest\nimport torch\nimport torch.nn as nn\n\nfrom pytorch_lightning import Trainer, Callback\nfrom pytorch_lightning.utilities.device_dtype_mixin import DeviceDtypeModuleMixin\nfrom tests.base import EvalModelTemplate\n\n\nclass SubSubModule(DeviceDtypeModuleMixin):\n pass\n\n\nclass SubModule(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.module = SubSubModule()\n\n\nclass TopModule(EvalModelTemplate):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.module = SubModule()\n\n\nclass DeviceAssertCallback(Callback):\n\n def on_train_batch_start(self, trainer, model):\n rank = trainer.local_rank\n assert isinstance(model, TopModule)\n # index = None also means first device\n assert (model.device.index is None and rank == 0) or model.device.index == rank\n assert model.device == model.module.module.device\n\n\[email protected](['dst_dtype'], [\n pytest.param(torch.float),\n pytest.param(torch.double),\n pytest.param(torch.half),\n])\[email protected](['dst_device'], [\n pytest.param(torch.device('cpu')),\n pytest.param(torch.device('cuda')),\n pytest.param(torch.device('cuda', 0)),\n])\[email protected](not torch.cuda.is_available(), reason=\"test requires GPU machine\")\ndef test_submodules_device_and_dtype(dst_device, dst_dtype):\n \"\"\"\n Test that the device and dtype property updates propagate through mixed nesting of regular\n nn.Modules and the special modules of type DeviceDtypeModuleMixin (e.g. Metric or LightningModule).\n \"\"\"\n\n model = TopModule()\n assert model.device == torch.device('cpu')\n model = model.to(device=dst_device, dtype=dst_dtype)\n # nn.Module does not have these attributes\n assert not hasattr(model.module, '_device')\n assert not hasattr(model.module, '_dtype')\n # device and dtype change should propagate down into all children\n assert model.device == model.module.module.device == dst_device\n assert model.dtype == model.module.module.dtype == dst_dtype\n\n\[email protected](torch.cuda.device_count() < 2, reason=\"test requires multi-GPU machine\")\ndef test_submodules_multi_gpu_dp(tmpdir):\n model = TopModule()\n trainer = Trainer(\n default_root_dir=tmpdir,\n distributed_backend='dp',\n gpus=2,\n callbacks=[DeviceAssertCallback()],\n max_steps=1,\n )\n trainer.fit(model)\n\n\[email protected](torch.cuda.device_count() < 2, reason=\"test requires multi-GPU machine\")\ndef test_submodules_multi_gpu_ddp_spawn(tmpdir):\n model = TopModule()\n trainer = Trainer(\n default_root_dir=tmpdir,\n distributed_backend='dpp_spawn',\n gpus=2,\n callbacks=[DeviceAssertCallback()],\n max_steps=1,\n )\n trainer.fit(model)\n"
] |
[
[
"torch.device",
"torch.cuda.device_count",
"torch.cuda.is_available"
]
] |
jbutle55/Mask_RCNN
|
[
"0a584309fa1198216a8d95787c93dace73da35e1"
] |
[
"samples/shapes/shapes.py"
] |
[
"\"\"\"\nMask R-CNN\nConfigurations and data loading code for the synthetic Shapes dataset.\nThis is a duplicate of the code in the noteobook train_shapes.ipynb for easy\nimport into other notebooks, such as inspect_model.ipynb.\n\nCopyright (c) 2017 Matterport, Inc.\nLicensed under the MIT License (see LICENSE for details)\nWritten by Waleed Abdulla\n\"\"\"\n\nimport os\nimport sys\nimport math\nimport random\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\n\n# Root directory of the project\nROOT_DIR = os.path.abspath(\"../../\")\n\nprint(ROOT_DIR)\n\nMODEL_DIR = os.path.join(ROOT_DIR, \"logs\")\nCOCO_MODEL_PATH = os.path.join(ROOT_DIR, \"mask_rcnn_coco.h5\")\n\n# Import Mask RCNN\nsys.path.append(ROOT_DIR) # To find local version of the library\nfrom mrcnn.config import Config\nfrom mrcnn import utils\nimport mrcnn.model as modellib\nfrom mrcnn import visualize\nfrom mrcnn.model import log\n\n\n\nclass ShapesConfig(Config):\n \"\"\"Configuration for training on the toy shapes dataset.\n Derives from the base Config class and overrides values specific\n to the toy shapes dataset.\n \"\"\"\n # Give the configuration a recognizable name\n NAME = \"shapes\"\n\n # Train on 1 GPU and 8 images per GPU. We can put multiple images on each\n # GPU because the images are small. Batch size is 8 (GPUs * images/GPU).\n GPU_COUNT = 1\n IMAGES_PER_GPU = 1\n\n # Number of classes (including background)\n NUM_CLASSES = 1 + 3 # background + 3 shapes\n\n # Use small images for faster training. Set the limits of the small side\n # the large side, and that determines the image shape.\n IMAGE_MIN_DIM = 512\n IMAGE_MAX_DIM = 512\n\n # Use smaller anchors because our image and objects are small\n RPN_ANCHOR_SCALES = (8, 16, 32, 64, 128) # anchor side in pixels\n\n # Reduce training ROIs per image because the images are small and have\n # few objects. Aim to allow ROI sampling to pick 33% positive ROIs.\n TRAIN_ROIS_PER_IMAGE = 32\n\n # Use a small epoch since the data is simple\n STEPS_PER_EPOCH = 100\n\n # use small validation steps since the epoch is small\n VALIDATION_STEPS = 5\n\n USE_MINI_MASK = False\n\n\nclass ShapesDataset(utils.Dataset):\n \"\"\"Generates the shapes synthetic dataset. The dataset consists of simple\n shapes (triangles, squares, circles) placed randomly on a blank surface.\n The images are generated on the fly. No file access required.\n \"\"\"\n\n def load_shapes(self, count, height, width):\n \"\"\"Generate the requested number of synthetic images.\n count: number of images to generate.\n height, width: the size of the generated images.\n \"\"\"\n # Add classes\n self.add_class(\"shapes\", 1, \"square\")\n self.add_class(\"shapes\", 2, \"circle\")\n self.add_class(\"shapes\", 3, \"triangle\")\n\n # Add images\n # Generate random specifications of images (i.e. color and\n # list of shapes sizes and locations). This is more compact than\n # actual images. Images are generated on the fly in load_image().\n for i in range(count):\n bg_color, shapes = self.random_image(height, width)\n self.add_image(\"shapes\", image_id=i, path=None,\n width=width, height=height,\n bg_color=bg_color, shapes=shapes)\n\n def load_image(self, image_id):\n \"\"\"Generate an image from the specs of the given image ID.\n Typically this function loads the image from a file, but\n in this case it generates the image on the fly from the\n specs in image_info.\n \"\"\"\n info = self.image_info[image_id]\n bg_color = np.array(info['bg_color']).reshape([1, 1, 3])\n image = np.ones([info['height'], info['width'], 3], dtype=np.uint8)\n image = image * bg_color.astype(np.uint8)\n for shape, color, dims in info['shapes']:\n image = self.draw_shape(image, shape, dims, color)\n return image\n\n def image_reference(self, image_id):\n \"\"\"Return the shapes data of the image.\"\"\"\n info = self.image_info[image_id]\n if info[\"source\"] == \"shapes\":\n return info[\"shapes\"]\n else:\n super(self.__class__).image_reference(self, image_id)\n\n def load_mask(self, image_id):\n \"\"\"Generate instance masks for shapes of the given image ID.\n \"\"\"\n info = self.image_info[image_id]\n shapes = info['shapes']\n count = len(shapes)\n mask = np.zeros([info['height'], info['width'], count], dtype=np.uint8)\n for i, (shape, _, dims) in enumerate(info['shapes']):\n mask[:, :, i:i + 1] = self.draw_shape(mask[:, :, i:i + 1].copy(),\n shape, dims, 1)\n # Handle occlusions\n occlusion = np.logical_not(mask[:, :, -1]).astype(np.uint8)\n for i in range(count - 2, -1, -1):\n mask[:, :, i] = mask[:, :, i] * occlusion\n occlusion = np.logical_and(\n occlusion, np.logical_not(mask[:, :, i]))\n # Map class names to class IDs.\n class_ids = np.array([self.class_names.index(s[0]) for s in shapes])\n return mask, class_ids.astype(np.int32)\n\n def draw_shape(self, image, shape, dims, color):\n \"\"\"Draws a shape from the given specs.\"\"\"\n # Get the center x, y and the size s\n x, y, s = dims\n if shape == 'square':\n image = cv2.rectangle(image, (x - s, y - s),\n (x + s, y + s), color, -1)\n elif shape == \"circle\":\n image = cv2.circle(image, (x, y), s, color, -1)\n elif shape == \"triangle\":\n points = np.array([[(x, y - s),\n (x - s / math.sin(math.radians(60)), y + s),\n (x + s / math.sin(math.radians(60)), y + s),\n ]], dtype=np.int32)\n image = cv2.fillPoly(image, points, color)\n return image\n\n def random_shape(self, height, width):\n \"\"\"Generates specifications of a random shape that lies within\n the given height and width boundaries.\n Returns a tuple of three valus:\n * The shape name (square, circle, ...)\n * Shape color: a tuple of 3 values, RGB.\n * Shape dimensions: A tuple of values that define the shape size\n and location. Differs per shape type.\n \"\"\"\n # Shape\n shape = random.choice([\"square\", \"circle\", \"triangle\"])\n # Color\n color = tuple([random.randint(0, 255) for _ in range(3)])\n # Center x, y\n buffer = 20\n y = random.randint(buffer, height - buffer - 1)\n x = random.randint(buffer, width - buffer - 1)\n # Size\n s = random.randint(buffer, height // 4)\n return shape, color, (x, y, s)\n\n def random_image(self, height, width):\n \"\"\"Creates random specifications of an image with multiple shapes.\n Returns the background color of the image and a list of shape\n specifications that can be used to draw the image.\n \"\"\"\n # Pick random background color\n bg_color = np.array([random.randint(0, 255) for _ in range(3)])\n # Generate a few random shapes and record their\n # bounding boxes\n shapes = []\n boxes = []\n N = random.randint(1, 8)\n for _ in range(N):\n shape, color, dims = self.random_shape(height, width)\n shapes.append((shape, color, dims))\n x, y, s = dims\n boxes.append([y - s, x - s, y + s, x + s])\n # Apply non-max suppression wit 0.3 threshold to avoid\n # shapes covering each other\n keep_ixs = utils.non_max_suppression(\n np.array(boxes), np.arange(N), 0.4)\n shapes = [s for i, s in enumerate(shapes) if i in keep_ixs]\n return bg_color, shapes\n\n\ndef get_ax(rows=1, cols=1, size=8):\n \"\"\"Return a Matplotlib Axes array to be used in\n all visualizations in the notebook. Provide a\n central point to control graph sizes.\n\n Change the default size attribute to control the size\n of rendered images\n \"\"\"\n _, ax = plt.subplots(rows, cols, figsize=(size * cols, size * rows))\n return ax\n\n\ndef main():\n config = ShapesConfig()\n config.display()\n\n # Training dataset\n dataset_train = ShapesDataset()\n dataset_train.load_shapes(500, config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1])\n dataset_train.prepare()\n\n # Validation dataset\n dataset_val = ShapesDataset()\n dataset_val.load_shapes(50, config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1])\n dataset_val.prepare()\n\n # Create model in training mode\n model = modellib.MaskRCNN(mode=\"training\", config=config,\n model_dir=MODEL_DIR)\n\n # Which weights to start with?\n init_with = \"coco\" # imagenet, coco, or last\n\n if init_with == \"imagenet\":\n model.load_weights(model.get_imagenet_weights(), by_name=True)\n elif init_with == \"coco\":\n # Load weights trained on MS COCO, but skip layers that\n # are different due to the different number of classes\n # See README for instructions to download the COCO weights\n model.load_weights(COCO_MODEL_PATH, by_name=True,\n exclude=[\"mrcnn_class_logits\", \"mrcnn_bbox_fc\",\n \"mrcnn_bbox\", \"mrcnn_mask\"])\n elif init_with == \"last\":\n # Load the last model you trained and continue training\n model.load_weights(model.find_last(), by_name=True)\n\n # Train the head branches\n # Passing layers=\"heads\" freezes all layers except the head\n # layers. You can also pass a regular expression to select\n # which layers to train by name pattern.\n model.train(dataset_train, dataset_val,\n learning_rate=config.LEARNING_RATE,\n epochs=10,\n layers='all')\n\n # Fine tune all layers\n # Passing layers=\"all\" trains all layers. You can also\n # pass a regular expression to select which layers to\n # train by name pattern.\n #model.train(dataset_train, dataset_val,\n # learning_rate=config.LEARNING_RATE / 10,\n # epochs=2,\n # layers=\"all\")\n\n #model_path = os.path.join(MODEL_DIR, \"mask_rcnn_shapes_tf\")\n #model.keras_model.save_weights(model_path, save_format='tf')\n model_path = model.find_last()\n\n class InferenceConfig(ShapesConfig):\n GPU_COUNT = 1\n IMAGES_PER_GPU = 1\n DETECTION_MIN_CONFIDENCE = 0.5\n\n inference_config = InferenceConfig()\n\n # Recreate the model in inference mode\n model = modellib.MaskRCNN(mode=\"inference\",\n config=inference_config,\n model_dir=MODEL_DIR)\n\n # Get path to saved weights\n # Either set a specific path or find last trained weights\n #model_path = os.path.join(ROOT_DIR, \"mask_rcnn_shapes\")\n # model_path = model.find_last()\n\n # Load trained weights\n print(\"Loading weights from \", model_path)\n model.load_weights(model_path, by_name=True)\n\n confidence_thresholds = np.linspace(0.1, 1, 15)\n all_tp_rates = []\n all_fp_rates = []\n\n # Compute ROCs for above range of thresholds\n # Compute one for each class vs. the other classes\n for index, conf in enumerate(confidence_thresholds):\n tp_of_img = []\n fp_of_img = []\n all_classes = []\n\n tp_rates = {}\n fp_rates = {}\n\n print('Creating model with confidence threshold: {}'.format(conf))\n inference_config.DETECTION_MIN_CONFIDENCE = conf\n\n # Recreate the model in inference mode\n model = modellib.MaskRCNN(mode=\"inference\",\n config=inference_config,\n model_dir=MODEL_DIR)\n\n # Load trained weights\n model.load_weights(model_path, by_name=True)\n\n image_ids = np.random.choice(dataset_val.image_ids, 10)\n for image_id in image_ids:\n # Load image and ground truth data\n image, image_meta, gt_class_id, gt_bbox, gt_mask = \\\n modellib.load_image_gt(dataset_val, config,\n image_id)\n molded_images = np.expand_dims(modellib.mold_image(image, config), 0)\n\n #print('OG Image')\n #visualize.display_instances(image, gt_bbox, gt_mask, gt_class_id, dataset_val.class_names, figsize=(8, 8))\n\n # Run object detection\n results = model.detect([image], verbose=0)\n r = results[0]\n # Detect returns:\n # \"rois\" []\n # \"class_ids\" [N]\n # \"scores\" [N]\n\n #print('Pred Image')\n #visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'], dataset_val.class_names,\n # r['scores'], figsize=(8, 8))\n\n classes = list(set(r['class_ids'])) # All unique class ids\n for c in classes:\n if c not in all_classes:\n all_classes.append(c)\n\n complete_classes = dataset_val.class_ids[1:]\n\n # Need TPR and FPR rates for each class versus the other classes\n # Recall == TPR\n tpr = utils.compute_ap_indiv_class(gt_bbox, gt_class_id, gt_mask,\n r[\"rois\"], r[\"class_ids\"], r[\"scores\"], r['masks'], complete_classes)\n total_fpr = utils.compute_fpr_indiv_class(gt_bbox, gt_class_id, gt_mask,\n r[\"rois\"], r[\"class_ids\"], r[\"scores\"], r['masks'],\n complete_classes)\n\n # print(f'For Image: TPR: {tpr} -- FPR: {total_fpr}')\n\n tp_of_img.append(tpr)\n fp_of_img.append(total_fpr)\n\n all_classes = dataset_val.class_ids[1:]\n\n # Need to get average TPR and FPR for number of images used\n for c in all_classes:\n tp_s = 0\n for item in tp_of_img:\n if c in item.keys():\n tp_s += item[c]\n else:\n tp_s += 0\n\n tp_rates[c] = tp_s / len(image_ids)\n # tp_rates[c] = tp_s\n\n # print(tp_rates)\n\n for c in all_classes:\n fp_s = 0\n for item in fp_of_img:\n if c in item.keys():\n fp_s += item[c]\n else:\n fp_s += 0\n fp_rates[c] = fp_s / len(image_ids)\n # fp_rates[c] = fp_s\n\n all_fp_rates.append(fp_rates)\n all_tp_rates.append(tp_rates)\n\n\n print(f'TP Rates: {all_tp_rates}')\n print(f'FP Rates: {all_fp_rates}')\n\n # Plot roc curves\n utils.compute_roc_curve(all_tp_rates, all_fp_rates, save_fig=True)\n\n\n\nif __name__ == '__main__':\n main()"
] |
[
[
"numpy.logical_not",
"numpy.linspace",
"numpy.random.choice",
"numpy.arange",
"matplotlib.pyplot.subplots",
"numpy.ones",
"numpy.array",
"numpy.zeros"
]
] |
PaliC/pytorch
|
[
"d1a5612a3ea501b83a80b9ecfd6b4dd5eb125181"
] |
[
"test/test_jit_fuser_te.py"
] |
[
"import operator\nimport unittest\nimport contextlib\nimport math\nimport torch\nimport torch.nn.functional as F\nfrom torch.testing import FileCheck\nfrom typing import List\n\n# these needs to be set before `common_utils`\n# infers `GRAPH_EXECUTOR`.\n# this file **requires** these settings\n# and setting them after `GRAPH_EXECUTOR` is\n# inferred erroneously runs or skips\n# some tests\ntorch._C._jit_set_profiling_executor(True)\ntorch._C._jit_set_profiling_mode(True)\n\nfrom torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, \\\n enable_profiling_mode_for_profiling_tests, TestCase\nfrom torch.testing._internal.jit_utils import JitTestCase, \\\n RUN_CUDA, RUN_CUDA_HALF, RUN_CUDA_MULTI_GPU, warmup_backward, set_fusion_group_inlining\n\nfrom torch.testing._internal.common_methods_invocations import op_db\nfrom torch.testing._internal.common_device_type import ops, onlyCPU, instantiate_device_type_tests\n\nfrom textwrap import dedent\nfrom itertools import product, permutations\n\nfrom test_jit import backward_graph, get_lstm_inputs, get_milstm_inputs, \\\n LSTMCellC, LSTMCellF, LSTMCellS, MiLSTMCell\n\nfrom jit.test_fuser_common import TestFuserCommon # noqa: F401\n\nFUSION_GROUP = 'prim::TensorExprGroup'\nLLVM_ENABLED = torch._C._llvm_enabled()\n\ndef strip_profiling_nodes(nodes):\n profiling_opcodes = set(['prim::BailoutTemplate', 'prim::BailOut'])\n return [n for n in nodes if n.kind() not in profiling_opcodes]\n\ndef warmup_forward(f, *args, profiling_count=2):\n for i in range(profiling_count):\n results = f(*args)\n\n return results\n\[email protected]\ndef texpr_reductions_enabled():\n old = torch._C._jit_set_texpr_reductions_enabled(True)\n try:\n yield\n finally:\n torch._C._jit_set_texpr_reductions_enabled(old)\n\[email protected]\ndef inline_fusion_groups():\n old_inlining = torch._C._debug_get_fusion_group_inlining()\n torch._C._debug_set_fusion_group_inlining(True)\n try:\n yield\n finally:\n torch._C._debug_set_fusion_group_inlining(old_inlining)\n\nclass TestTEFuser(JitTestCase):\n def setUp(self):\n self.old_cpu_fuser_state = torch._C._jit_can_fuse_on_cpu()\n self.old_must_use_cpu_state = torch._C._jit_get_te_must_use_llvm_cpu()\n self.old_gpu_fuser_state = torch._C._jit_can_fuse_on_gpu()\n\n torch._C._jit_override_can_fuse_on_cpu(True)\n # TODO: force LLVM. need to add it to asan, mac, windows builds + sandcastle\n # torch._C._jit_set_te_must_use_llvm_cpu(True)\n torch._C._jit_override_can_fuse_on_gpu(True)\n\n self.old_profiling_executor = torch._C._jit_set_profiling_executor(True)\n self.old_profiling_mode = torch._C._jit_set_profiling_mode(True)\n\n self.old_fusion_inlining = torch._C._debug_get_fusion_group_inlining()\n torch._C._debug_set_fusion_group_inlining(False)\n\n self.texpr_fuser_state = torch._C._jit_texpr_fuser_enabled()\n torch._C._jit_set_texpr_fuser_enabled(True)\n\n self.old_te_must_use_llvm_cpu = torch._C._jit_get_te_must_use_llvm_cpu()\n torch._C._jit_set_te_must_use_llvm_cpu(False)\n\n self.devices = ['cpu'] if not torch.cuda.is_available() else ['cpu', 'cuda']\n self.int_dtypes = [\n torch.int8,\n torch.int16,\n torch.int32,\n torch.int64,\n torch.bool,\n ]\n self.fp_dtypes = [\n torch.float16,\n torch.float32,\n torch.float64,\n torch.bfloat16,\n ]\n self.dtypes = self.int_dtypes + self.fp_dtypes\n\n def tearDown(self):\n torch._C._jit_set_profiling_executor(self.old_profiling_executor)\n torch._C._jit_set_profiling_mode(self.old_profiling_mode)\n\n torch._C._jit_override_can_fuse_on_gpu(self.old_gpu_fuser_state)\n torch._C._jit_override_can_fuse_on_cpu(self.old_cpu_fuser_state)\n torch._C._jit_set_te_must_use_llvm_cpu(self.old_must_use_cpu_state)\n torch._C._debug_set_fusion_group_inlining(self.old_fusion_inlining)\n\n torch._C._jit_set_texpr_fuser_enabled(self.texpr_fuser_state)\n torch._C._jit_set_te_must_use_llvm_cpu(self.old_te_must_use_llvm_cpu)\n\n def assertLastGraphAllFused(self):\n self.assertAllFused(torch.jit.last_executed_optimized_graph())\n\n def findFusionGroups(self, graph):\n result = []\n for n in graph.nodes():\n if n.kind() == FUSION_GROUP:\n result.append(n.g('Subgraph'))\n continue\n for block in n.blocks():\n result += self.findFusionGroups(block)\n return result\n\n def test_typecheck(self):\n a = torch.ones(1)\n\n def fused_kernel(a, b):\n return (a + b) * 2.\n\n scripted = self.checkScript(fused_kernel, (a, a))\n graph = scripted.graph_for(a, a)\n # double check we fused\n fusion_groups = self.findFusionGroups(graph)\n self.assertEqual(len(fusion_groups), 1)\n # we use a bigger tensor now (size 2)\n # if we won't trigger a recompilation\n # we will still create a tensor up to (size 1)\n # if the type check fails\n a = torch.ones(2)\n # shape changed if we don't trigger recompilation\n # we would compute the wrong result silently\n self.assertEqual(scripted(a, a), fused_kernel(a, a))\n\n def test_sum_simple(self):\n def func(x):\n x2 = x * x\n return x2.sum()\n\n with texpr_reductions_enabled():\n a = torch.tensor(list(x for x in range(0, 15)), dtype=torch.float, device='cpu')\n a = a.reshape(5, 3)\n scripted = self.checkScript(func, (a,))\n self.assertLastGraphAllFused()\n\n def test_nop(self):\n pass\n\n def test_sum_dim(self):\n def func(x):\n return x.sum((0, )) * 2\n\n def func_neg(x):\n return x.sum((-2, )) * 2\n\n with texpr_reductions_enabled():\n a = torch.tensor(list(x for x in range(0, 15)), dtype=torch.float, device='cpu')\n a = a.reshape(5, 3)\n scripted = self.checkScript(func, (a,))\n self.assertLastGraphAllFused()\n scripted = self.checkScript(func_neg, (a,))\n self.assertLastGraphAllFused()\n\n def test_sum_keepdim_cast(self):\n def func(x):\n return x.sum((0, ), keepdim=True, dtype=torch.double) * 2\n\n with texpr_reductions_enabled():\n a = torch.tensor(list(x for x in range(0, 15)), dtype=torch.float, device='cpu')\n a = a.reshape(5, 3)\n\n self.checkScript(func, (a,))\n self.assertLastGraphAllFused()\n\n def test_abs(self):\n for device in self.devices:\n def func(x):\n return x.abs() * 2\n\n a = torch.randn(5, device=device)\n scripted = self.checkScript(func, (a,))\n self.assertLastGraphAllFused()\n\n def test_unsqueeze_size_calculation(self):\n for device in self.devices:\n def foo(b, d):\n x = d.unsqueeze(1)\n y = x * 42.\n z = b + y\n r = z / 42.\n return r\n\n inputs = (torch.rand(20, 28, device=device, requires_grad=True), torch.rand(20, device=device))\n scripted = self.checkScript(foo, inputs)\n self.assertAllFused(scripted.graph_for(*inputs))\n\n def test_zero_element_tensors(self):\n for device in self.devices:\n def decode(sin_t, cos_t):\n theta = torch.atan2(sin_t.float(), cos_t.float())\n return theta\n\n sin = torch.zeros(0, device=device)\n cos = torch.zeros(0, device=device)\n inputs = [sin, cos]\n ge = self.checkScript(decode, inputs)\n\n def test_arg_configurations_smoke(self):\n # A smoke test to make sure we won't use the same kernel for contiguous\n # and non-contiguous arguments.\n # TODO: add optionally enabled debug counters to the fuser to verify\n # that we really can tell the difference between configurations\n for device in self.devices:\n def f(x, y):\n z1, z2 = (x + y).chunk(2, dim=1)\n return z1 * z2\n\n x = torch.randn(4, 4, dtype=torch.float, device=device)\n y = torch.randn(4, 4, dtype=torch.float, device=device)\n traced_f = torch.jit.trace(f, (x, y,))\n self.assertEqual(traced_f(x.t().contiguous(), y), traced_f(x.t(), y))\n\n def test_broadcast(self):\n for device in self.devices:\n def scaleshift(x, scale, shift):\n return x * scale + shift\n\n inputs = [\n torch.randn(4, 4, dtype=torch.float, device=device),\n torch.randn(4, dtype=torch.float, device=device),\n torch.randn(4, dtype=torch.float, device=device),\n ]\n self.checkScript(scaleshift, inputs)\n\n @unittest.skipIf(not RUN_CUDA, \"fuser requires CUDA\")\n @unittest.skipIf(not RUN_CUDA_HALF, \"no half support\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.LEGACY, \"no half support with profiling on\")\n def test_cuda_half(self):\n x = torch.randn(4, 4, dtype=torch.half, device='cuda')\n y = torch.randn(4, 4, dtype=torch.half, device='cuda')\n\n funcs = [\n self.fn_test_comparison_gt_lt,\n self.fn_test_relu,\n self.fn_test_exp\n ]\n\n # Note: Non fused inputs must be float to prevent loss of precision\n inputs = (x.float(), y.float())\n fusion_inputs = (x, y)\n for fn in funcs:\n local_inputs = [t.clone().requires_grad_() for t in inputs]\n local_fusion_inputs = [t.clone().requires_grad_() for t in fusion_inputs]\n\n # Verifies outputs\n fusion = torch.jit.trace(fn, local_fusion_inputs, check_trace=False)\n outputs = fn(*local_inputs)\n fusion_outputs = fusion(*local_fusion_inputs)\n outputs_half = [t.half() for t in outputs]\n self.assertEqual(outputs_half, fusion_outputs)\n\n # Verifies gradients\n for output, fusion_output in zip(outputs_half, fusion_outputs):\n grads = torch.autograd.grad(\n output.float().sum(), local_inputs, allow_unused=True, retain_graph=True)\n fusion_grads = torch.autograd.grad(\n fusion_output.sum(), local_fusion_inputs, allow_unused=True, retain_graph=True)\n grads_half = [t.half() for t in grads]\n self.assertEqual(grads_half, fusion_grads)\n\n def test_checks_cat_inputs(self):\n # single fusion node causes error\n with set_fusion_group_inlining(True):\n for device in self.devices:\n # We shouldn't treat cat nodes as broadcasting. All their inputs\n # need to be checked for having the same map size, before we can\n # run the kernel.\n def f(x, y):\n return torch.cat([x + 2 * x + x ** 2, y + 4 * y + y ** 3], dim=0)\n\n # NOTE: y is broadcastable to x, but output of f(x, y) should have\n # shape 3x4, and not 4x4.\n x = torch.randn(2, 4, dtype=torch.float, device=device)\n y = torch.randn(1, 4, dtype=torch.float, device=device)\n\n scripted = self.checkScript(f, (x, y))\n self.assertEqual(scripted(x, y).shape, (3, 4))\n self.assertAllFused(scripted.graph_for(x, y))\n\n def test_chunk(self):\n for device in self.devices:\n def fn(x):\n a, b, c = x.chunk(3, 1)\n return a * b + c\n\n inputs = [torch.randn(10, 6, dtype=torch.float, device=device)]\n\n self.checkScript(fn, inputs)\n self.assertLastGraphAllFused()\n\n def test_chunk_correctness(self):\n for device in self.devices:\n def chunk_4_0(x):\n x0, x1, x2, x3 = x.chunk(4, 0)\n return x0 + x1 + x2 + x3\n\n def chunk_4_1(x):\n x0, x1, x2, x3 = x.chunk(4, 1)\n return x0 + x1 + x2 + x3\n\n def chunk_4_last(x):\n x0, x1, x2, x3 = x.chunk(4, 2)\n return x0 + x1 + x2 + x3\n\n fns = [chunk_4_0, chunk_4_1, chunk_4_last]\n tensors = [\n # splitSize = 1\n torch.randn(4, 4, 4, dtype=torch.float, device=device),\n\n # contiguous case\n torch.randn(12, 8, 16, dtype=torch.float, device=device),\n\n # non-contiguous case\n torch.randn(12, 8, 16, dtype=torch.float, device=device).transpose(1, 2),\n ]\n\n for tensor in tensors:\n for fn in fns:\n self.checkScript(fn, [tensor])\n self.assertLastGraphAllFused()\n\n def test_chunk_distributes(self):\n for device in self.devices:\n def f(x, y):\n z1, z2 = (x + y).chunk(2, dim=1)\n return z1 * z2\n\n x = torch.randn(4, 4, dtype=torch.float, device=device)\n y = torch.randn(4, 4, dtype=torch.float, device=device)\n\n ge = self.checkTrace(f, (x, y))\n graph = ge.graph_for(x, y)\n # XXX: The old fuser does broadcast_tensors but the new fuser doesn't.\n # FileCheck().check(\"broadcast_tensors\").check('with ' + FUSION_GROUP + '_') \\\n # .check_count('ConstantChunk', 2, exactly=True).run(str(graph))\n FileCheck().check(\"with \" + FUSION_GROUP + \"_\").check_count(\n \"ConstantChunk\", 1, exactly=True\n ).run(str(graph))\n\n def test_chunk_motion_deduplicates_inputs(self):\n for device in self.devices:\n def func1(x):\n z = x * x\n z0, z1 = z.chunk(2)\n return z0 * z1\n\n def func2(x):\n z = x * x * x\n z0, z1 = z.chunk(2)\n return z0 * z1\n\n inputs = [\n torch.tensor([1.1, 1.2], device=device, dtype=torch.float),\n ]\n for func in [func1, func2]:\n self.checkScript(func, inputs)\n self.assertLastGraphAllFused()\n\n def test_chunk_multiple(self):\n for device in self.devices:\n # The arguments are intentionally used out of order as a test to see\n # if the fusion compiler adds extra args in the correct order\n def fn(s, x, y, z):\n z1, z2 = z.chunk(2, 2)\n x1, x2, x3 = x.chunk(3, 1)\n y1, y2 = y.chunk(2, 0)\n return s + x1 + x2 + x3 + y1 + y2 + z1 + z2\n\n inputs = [\n torch.randn(5, 2, 3, dtype=torch.float, device=device),\n torch.randn(5, 6, 3, dtype=torch.float, device=device),\n torch.randn(10, 2, 3, dtype=torch.float, device=device),\n torch.randn(5, 2, 6, dtype=torch.float, device=device),\n ]\n\n ge = self.checkScript(fn, inputs)\n self.assertAllFused(ge.graph_for(*inputs))\n\n def test_minmax(self):\n for device in self.devices:\n def tmax(a, b):\n return torch.max(2 * a, b)\n\n def tmin(a, b):\n return torch.min(2 * a, b)\n\n a = torch.randn(4, 4, dtype=torch.float)\n b = torch.randn(4, 4, dtype=torch.float)\n nan = torch.tensor(float('nan'), dtype=torch.float)\n\n for f, inputs, device in product(\n (tmax, tmin),\n ([a, b], [a, nan], [b, nan]),\n self.devices):\n inputs = [t.to(device) for t in inputs]\n s = self.checkScript(f, inputs)\n self.assertAllFused(s.graph_for(*inputs))\n\n def test_clamp(self):\n for device in self.devices:\n def func2(a, b):\n return torch.clamp(a + b, min=0, max=2)\n\n def funcInf(a, b):\n return torch.clamp(a + b, min=0, max=float('inf'))\n\n def funcNegInf(a, b):\n return torch.clamp(a + b, min=float('-inf'), max=0)\n\n def funcOptMin(a, b):\n return torch.clamp(a + b, max=2)\n\n def funcOptMax(a, b):\n return torch.clamp(a + b, min=0)\n\n a = torch.randn(4, 4, dtype=torch.float, device=device, requires_grad=True)\n b = torch.randn(4, 4, dtype=torch.float, device=device)\n nan = torch.tensor(float('nan'), dtype=torch.float, device=device)\n\n funcs = (func2, funcInf, funcNegInf, funcOptMin, funcOptMax)\n for f, inputs in product(funcs, [[a, b], [a, nan]]):\n inp1, inp2 = inputs\n s = self.checkScript(f, (inp1, inp2), profiling=ProfilingMode.PROFILING)\n self.assertAllFused(s.graph_for(inp1, inp2), except_for={'aten::size', 'aten::_size_if_not_equal'})\n c = s(inp1, inp2)\n with enable_profiling_mode_for_profiling_tests():\n warmup_backward(c.sum())\n graph = backward_graph(s)\n self.assertAllFused(graph, except_for={'aten::Float', 'aten::_grad_sum_to_size'})\n\n def test_clamp_double(self):\n for device in self.devices:\n def clamp_double(x, eta: float):\n return 1 - x.clamp(eta, 1 - eta)\n\n x = torch.tensor([1.0, 1.0], dtype=torch.double, device=device)\n eta = 1e-9\n s = self.checkScript(clamp_double, (x, eta), profiling=ProfilingMode.PROFILING, atol=1e-10, rtol=1e-5)\n self.assertAllFused(s.graph_for(x, eta))\n\n def test_clamp_int(self):\n for device in self.devices:\n def clamp_int(x, eta: int):\n return x.clamp(0, eta)\n\n x = torch.tensor([1, 1], device=device)\n eta = 1 << 32\n s = self.checkScript(clamp_int, (x, eta), profiling=ProfilingMode.PROFILING)\n self.assertAllFused(s.graph_for(x, eta))\n\n def test_add_bool(self):\n sizes = [(1,), (2,), (4, 4)]\n for device, size in product(self.devices, sizes):\n def f(x, y, z):\n return x + y + z\n\n x = torch.randint(0, 2, size, dtype=torch.bool, device=device)\n y = torch.randint(0, 2, size, dtype=torch.bool, device=device)\n z = torch.randint(0, 2, size, dtype=torch.bool, device=device)\n ge = self.checkTrace(f, (x, y, z), inputs_require_grads=False)\n self.assertAllFused(ge.graph_for(x, y, z))\n\n def test_mul_bool(self):\n for device in self.devices:\n def f(x, y, z):\n return x * y * z\n\n x = torch.randint(0, 2, (4, 4), dtype=torch.bool, device=device)\n y = torch.randint(0, 2, (4, 4), dtype=torch.bool, device=device)\n z = torch.randint(0, 2, (4, 4), dtype=torch.bool, device=device)\n\n ge = self.checkTrace(f, (x, y, z), inputs_require_grads=False)\n self.assertAllFused(ge.graph_for(x, y, z))\n\n def test_div_bool(self):\n for device in self.devices:\n def f(x, y, z):\n return (x + y) / z\n\n x = torch.randint(0, 2, (4, 4), dtype=torch.bool, device=device)\n y = torch.randint(0, 2, (4, 4), dtype=torch.bool, device=device)\n z = torch.ones_like(x, dtype=torch.bool, device=device)\n\n ge = self.checkTrace(f, (x, y, z), inputs_require_grads=False)\n self.assertAllFused(ge.graph_for(x, y, z))\n\n def test_bitwise_ops(self):\n def apply(fn):\n return lambda x, y, z: fn(fn(x, y), z)\n\n binary_ops = [\n operator.__and__,\n operator.__or__,\n operator.__xor__,\n operator.__lshift__,\n operator.__rshift__,\n ]\n devices = self.devices\n for dtype, op, device in product(self.int_dtypes, binary_ops, devices):\n try:\n x = self.data_for(dtype, device)\n y = self.data_for(dtype, device)\n z = self.data_for(dtype, device)\n fn = apply(op)\n ref = fn(x, y, z)\n except Exception:\n # If eager mode doesn't support a dtype/op/device combo,\n # neither does the fuser. Catch everything to avoid needing to\n # guess what errors might be thrown by eager.\n continue\n try:\n t = torch.jit.trace(fn, (x, y, z))\n self.assertEqual(ref, t(x, y, z))\n self.assertAllFused(t.graph_for(x, y, z))\n except Exception as e:\n raise RuntimeError(\n \" \".join([\"Failed:\", str(dtype), op.__name__, device])\n )\n\n def test_minmax_int_ops(self):\n def apply(fn):\n return lambda x, y, z: fn(fn(x, y), z)\n\n binary_ops = [\n torch.min,\n torch.max\n ]\n devices = self.devices\n for dtype, op, device in product(self.int_dtypes, binary_ops, devices):\n try:\n x = self.data_for(dtype, device)\n y = self.data_for(dtype, device)\n z = self.data_for(dtype, device)\n fn = apply(op)\n ref = fn(x, y, z)\n except Exception:\n # If eager mode doesn't support a dtype/op/device combo,\n # neither does the fuser. Catch everything to avoid needing to\n # guess what errors might be thrown by eager.\n continue\n try:\n t = torch.jit.trace(fn, (x, y, z))\n self.assertEqual(ref, t(x, y, z))\n self.assertAllFused(t.graph_for(x, y, z))\n except Exception as e:\n raise RuntimeError(\n \" \".join([\"Failed:\", str(dtype), op.__name__, device])\n )\n\n def test_comparison_eq_ne(self):\n for device in self.devices:\n def f(x, y):\n mask = (x == 0).type_as(x)\n z = x * mask + y\n mask = (x != 0).type_as(x)\n z = z * mask + y\n return z\n\n x = torch.randn(4, 4, dtype=torch.float, device=device)\n y = torch.randn(4, 4, dtype=torch.float, device=device)\n\n ge = self.checkTrace(f, (x, y))\n self.assertAllFused(ge.graph_for(x, y))\n\n @staticmethod\n def fn_test_comparison_gt_lt(x, y):\n mask = (x > 0).type_as(x)\n z = x * mask + y\n mask = (x < 0).type_as(x)\n z = z * mask + y\n return z\n\n def test_comparison_gt_lt(self):\n for device in self.devices:\n x = torch.randn(4, 4, dtype=torch.float, device=device)\n y = torch.randn(4, 4, dtype=torch.float, device=device)\n\n ge = self.checkTrace(self.fn_test_comparison_gt_lt, (x, y))\n self.assertAllFused(ge.graph_for(x, y))\n\n def test_comparison_ge_le(self):\n for device in self.devices:\n def f(x, y):\n mask = (x >= 0).type_as(x)\n z = x * mask + y\n mask = (x <= 0).type_as(x)\n z = z * mask + y\n return z\n\n x = torch.randn(4, 4, dtype=torch.float, device=device)\n y = torch.randn(4, 4, dtype=torch.float, device=device)\n\n ge = self.checkTrace(f, (x, y))\n self.assertAllFused(ge.graph_for(x, y))\n x.requires_grad_(True)\n y.requires_grad_(True)\n self.assertAllFused(ge.graph_for(x, y), except_for=(\"aten::size\", \"prim::BroadcastSizes\",\n \"aten::_size_if_not_equal\"))\n\n def test_addcmul(self):\n for device in self.devices:\n t = torch.randn(1, 4, dtype=torch.float, device=device)\n t1 = torch.randn(4, 1, dtype=torch.float, device=device)\n t2 = torch.randn(1, 4, dtype=torch.float, device=device)\n\n def foo(t, t1, t2):\n return t.addcmul(t + 1, t2, value=0.1)\n\n ge = self.checkTrace(foo, (t, t1, t2), allow_unused=True)\n graph = ge.graph_for(t, t1, t2)\n fusion_groups = self.findFusionGroups(graph)\n self.assertEqual(len(fusion_groups), 1)\n FileCheck().check(\"aten::add(\").check(\"aten::addcmul(\").run(str(fusion_groups[0]))\n\n # TODO: We leak CUDA memory here because the traced graph holds onto a\n # constant-ified tensor. Since the Python-global CompilationUnit is alive\n # until the end of the process, the memory is effectively leaked.\n # Removed `_cuda` suffix from this test which disables leak-checking.\n # If this is a real problem, we'll need to revisit Torchscript Function\n # lifetimes in Python.\n def test_lerp(self):\n for device in self.devices:\n start = torch.randn(4, 1, dtype=torch.float, device=device)\n end = torch.randn(1, 4, dtype=torch.float, device=device)\n weight = torch.tensor(0.5, dtype=torch.float, device=device)\n\n # scalar weight overload\n def foo_weight_scalar(start, end):\n return torch.lerp(start + 1, end, 0.5)\n\n # tensor weight overload\n def foo_weight_tensor(start, end):\n return torch.lerp(start + 1, end, weight)\n\n ge_weight_scalar = self.checkTrace(foo_weight_scalar, (start, end))\n graph = ge_weight_scalar.graph_for(start, end)\n self.assertAllFused(graph)\n\n # TODO: uncomment when TE enables support for scalar tensors\n # ge_weight_tensor = self.checkTrace(foo_weight_tensor, (start, end))\n # graph = ge_weight_tensor.graph_for(start, end)\n # self.assertAllFused(graph)\n\n def test_concat(self):\n # disabling concat causes error with single concat node\n with set_fusion_group_inlining(True):\n for device in self.devices:\n hx = torch.randn(3, 20, dtype=torch.float, device=device)\n cx = torch.randn(3, 20, dtype=torch.float, device=device)\n\n def foo(hx, cx):\n return torch.cat((hx + cx, hx * cx))\n\n ge = self.checkTrace(foo, (hx, cx))\n graph = ge.graph_for(hx, cx)\n self.assertAllFused(graph)\n # XXX: TE fuser can handle concats in a fusion group.\n # FileCheck().check(\"FusedConcat\").check_next(\"return\").run(str(graph))\n\n def test_remove_output_used_only_in_size(self):\n for device in self.devices:\n def test_fuse(a, b):\n c = a + b\n d = c + b\n return d\n\n scripted_f = torch.jit.script(test_fuse)\n x = torch.ones(1, requires_grad=True, device=device)\n y = torch.ones(1, requires_grad=True, device=device)\n warmup_forward(scripted_f, x, y, profiling_count=3)\n g = scripted_f.graph_for(x, y)\n diff_nodes = g.findAllNodes('prim::DifferentiableGraph')\n self.assertEqual(len(diff_nodes), 1)\n g = diff_nodes[0].g('Subgraph')\n if_nodes = [n for n in g.nodes() if n.kind() == 'prim::If']\n self.assertEqual(len(if_nodes), 1)\n # the if node and the fusion group inside it should only have one output\n self.assertEqual(len(list(if_nodes[0].outputs())), 1)\n\n def test_concat_invariant(self):\n for device in self.devices:\n # Invariant: the output of prim::FusedConcat may\n # not be an input to any node inside the FusionGroup.\n def fn(x, y, z):\n x1 = x + y\n y1 = x - y\n w = torch.cat([x1, y1])\n return w + z\n\n x = torch.randn(2, 2, dtype=torch.float, device=device)\n y = torch.randn(2, 2, dtype=torch.float, device=device)\n z = torch.randn(4, 2, dtype=torch.float, device=device)\n ge = self.checkTrace(fn, (x, y, z))\n graph = ge.graph_for(x, y, z)\n self.assertAllFused(graph, except_for={'aten::add'})\n # XXX: TE fuser can handle concats inside a fusion group.\n # FileCheck().check(\"FusedConcat\").check_next(\"return\").run(str(graph))\n\n @staticmethod\n def fn_test_exp(x, y):\n return (x + .5 * y).exp()\n\n def test_exp(self):\n for device in self.devices:\n x = torch.randn(4, 4, dtype=torch.float, device=device)\n y = torch.randn(4, 4, dtype=torch.float, device=device)\n\n ge = self.checkTrace(self.fn_test_exp, (x, y))\n self.assertAllFused(ge.graph_for(x, y))\n\n def test_threshold(self):\n for device in self.devices:\n def f(x):\n return torch.threshold(x, 0, -10) + x + x + x\n\n x = torch.tensor([-1, -0.5, 0, 1, 2, 3], device=device)\n scripted = self.checkScript(f, (x,))\n self.assertAllFused(scripted.graph_for(x))\n\n def test_scalar_arg(self):\n for device in self.devices:\n def fn_test_scalar_arg(x: torch.Tensor, p: float) -> torch.Tensor:\n return p * (x * x + x)\n\n x = torch.randn(4, 4, dtype=torch.float, device=device)\n p = 3\n scripted = self.checkScript(fn_test_scalar_arg, (x, p))\n self.assertAllFused(scripted.graph_for(x, p))\n\n x.requires_grad_(True)\n\n # use another function otherwise we will bailout\n # and won't be able to do fused checks\n def fn_test_scalar_arg_requires_grad(x: torch.Tensor, p: float) -> torch.Tensor:\n return p * (x * x + x)\n\n scripted = torch.jit.script(fn_test_scalar_arg_requires_grad)\n out = scripted(x, p)\n out = scripted(x, p)\n out = scripted(x, p)\n self.assertAllFused(scripted.graph_for(x, p), except_for=(\"aten::size\", \"prim::BroadcastSizes\",\n \"aten::_size_if_not_equal\"))\n\n @unittest.skipIf(not RUN_CUDA, \"fuser requires CUDA\")\n @unittest.skipIf(not RUN_CUDA_MULTI_GPU, \"needs non-zero device\")\n def test_fusion_reuse_multi_gpu(self):\n def fn(x, y):\n return x * y * x * y\n\n inputs_cpu = [\n torch.randn(4, 4, dtype=torch.float),\n torch.randn(4, 4, dtype=torch.float),\n ]\n inputs_cuda0 = [x.cuda(0) for x in inputs_cpu]\n inputs_cuda1 = [y.cuda(1) for y in inputs_cpu]\n\n # Should not crash; these should compile different kernels.\n ge = self.checkScript(fn, inputs_cpu)\n self.assertAllFused(ge.graph_for(*inputs_cpu))\n ge(*inputs_cuda0)\n ge(*inputs_cuda1)\n\n # TODO: we're currently not checking 'device' in the type info when pulling\n # nodes into a fusion group. We should fix that and re-enable this test.\n @unittest.skipIf(not RUN_CUDA, \"fuser requires CUDA\")\n @unittest.skipIf(not RUN_CUDA_MULTI_GPU, \"needs non-zero device\")\n def test_kernel_cache_multi_gpu(self):\n def not_fusible(x):\n return x\n\n def fn(x, y, z):\n x_out = x * x * x * x * x # fusion: lambda x. x * x * x * x * x\n y_out = y * y * y * y * y\n z_out = z * z * z * z * z\n return not_fusible(x_out), not_fusible(y_out), not_fusible(z_out)\n\n inputs = [\n torch.randn(4, 4, dtype=torch.float),\n torch.randn(4, 4, dtype=torch.float, device='cuda:0'),\n torch.randn(4, 4, dtype=torch.float, device='cuda:1'),\n ]\n\n prev_cache_size = torch._C._jit_debug_fuser_num_cached_kernel_specs()\n\n # There are 3 FusionGroups. Because they have the same graph, they\n # should reuse the same KernelSpec in the KernelSpec cache.\n ge = self.checkScript(fn, inputs)\n self.assertGraphContainsExactly(\n ge.graph_for(*inputs), FUSION_GROUP, 3, True)\n new_cache_size = torch._C._jit_debug_fuser_num_cached_kernel_specs()\n # XXX: This assumes that the same kernel isn't already used by another test\n # FIXME: Use the TE fuser's way of querying the cache.\n # self.assertEqual(new_cache_size - prev_cache_size, 1)\n\n @unittest.skipIf(not RUN_CUDA_MULTI_GPU, \"needs non-zero device\")\n def test_nonzero_device_cuda(self):\n device = 'cuda:' + str(1)\n x = torch.tensor([0.4], dtype=torch.float, device=device)\n y = torch.tensor([0.7], dtype=torch.float, device=device)\n\n def doit(x, y):\n return torch.sigmoid(torch.tanh(x * (x + y) + x))\n\n ge = self.checkTrace(doit, (x, y))\n self.assertAllFused(ge.graph_for(x, y))\n\n def test_lstm(self):\n for device in self.devices:\n inputs = get_lstm_inputs(device, training=True)\n module = self.checkScript(LSTMCellS, inputs)\n self.assertAllFused(module.graph_for(inputs))\n\n def test_lstm_concat(self):\n # single fusion node causes error\n with set_fusion_group_inlining(True):\n for device in self.devices:\n inputs = get_lstm_inputs(device)\n ge = self.checkTrace(LSTMCellC, inputs)\n graph = ge.graph_for(*inputs)\n self.assertAllFused(ge.graph_for(*inputs))\n # XXX: TE fuser can handle concats inside a fusion group.\n # FileCheck().check(\"FusedConcat\").check_next(\"return\").run(str(graph))\n\n def test_lstm_gates_permutations(self):\n for device in self.devices:\n # lstm has gates = x.mm(w_ih.t()) + hx.mm(w_hh.t()) + b_ih + b_hh.\n # Test that any permutation of this will still result in one FusionGroup.\n choices = ['x.mm(w_ih.t())', 'hx.mm(w_hh.t())', 'b_ih', 'b_hh']\n template = dedent('''\n def cell(x, hx, cx, w_ih, w_hh, b_ih, b_hh):\n gates = {} + {} + {} + {}\n ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)\n return ingate * forgetgate * cellgate * outgate\n ''')\n for permutation in permutations(choices, len(choices)):\n code = template.format(*permutation)\n scope = {}\n exec(code, globals(), scope)\n cu = torch.jit.CompilationUnit(code)\n\n inputs = get_lstm_inputs(device, training=False)\n self.assertEqual(cu.cell(*inputs), scope['cell'](*inputs))\n forward_graph = cu.cell.graph_for(*inputs)\n self.assertGraphContainsExactly(forward_graph, FUSION_GROUP, 1)\n\n # TODO: Fuser doesn't work at all when inputs require grad. Fix that\n def test_lstm_traced(self):\n for device in self.devices:\n inputs = get_lstm_inputs(device)\n ge = self.checkTrace(LSTMCellF, inputs)\n graph = ge.graph_for(*inputs)\n fusion_groups = self.findFusionGroups(graph)\n self.assertEqual(len(fusion_groups), 1)\n FileCheck().check(\"Chunk\").check(\"aten::sigmoid\").check(\"aten::tanh\").run(str(fusion_groups[0]))\n\n def test_milstm(self):\n for device in self.devices:\n inputs = get_milstm_inputs(device, training=True)\n module = self.checkScript(MiLSTMCell, inputs)\n forward_graph = module.graph_for(*inputs)\n self.assertGraphContainsExactly(\n forward_graph, FUSION_GROUP, 1, consider_subgraphs=True)\n FileCheck().check(\"DifferentiableGraph\").check(\"TupleConstruct\") \\\n .check_next(\"return\").check(FUSION_GROUP).run(str(forward_graph))\n hy, cy = module(*inputs)\n warmup_backward((hy + cy).sum())\n\n @unittest.skipIf(not RUN_CUDA, \"fuser requires CUDA\")\n @unittest.skip(\"rand_like is not supported yet\")\n def test_rand_cuda(self):\n class M(torch.jit.ScriptModule):\n __constants__ = ['d']\n\n def __init__(self):\n super(M, self).__init__()\n self.d = torch.device('cuda')\n\n @torch.jit.script_method\n def create(self, x):\n return x * x + x + torch.rand_like(x)\n\n x = torch.zeros([3, 4, 5], dtype=torch.float, device='cuda')\n m = M()\n out1 = m.create(x)\n out2 = m.create(x)\n self.assertNotEqual(out1, out2)\n self.assertTrue(torch.all(out1 >= 0))\n self.assertTrue(torch.all(out1 < 1))\n self.assertTrue(torch.all(out2 >= 0))\n self.assertTrue(torch.all(out2 < 1))\n self.assertAllFused(m.create.graph_for(x))\n\n @staticmethod\n def fn_test_relu(x, y):\n return F.relu(x + .5 * y)\n\n def test_relu(self):\n for device in self.devices:\n x = torch.randn(4, 4, dtype=torch.float, device=device)\n y = torch.randn(4, 4, dtype=torch.float, device=device)\n\n ge = self.checkTrace(self.fn_test_relu, (x, y))\n self.assertAllFused(ge.graph_for(x, y))\n\n def test_erf(self):\n for device in self.devices:\n def fn_test_erf(x):\n return F.relu(torch.erf(x) - torch.erfc(x))\n\n x = torch.randn(4, 4, dtype=torch.float, device=device)\n ge = self.checkScript(fn_test_erf, (x,), profiling=ProfilingMode.PROFILING)\n self.assertAllFused(ge.graph_for(x))\n x.requires_grad_(True)\n ge = self.checkScript(fn_test_erf, (x,), profiling=ProfilingMode.PROFILING)\n self.assertAllFused(ge.graph_for(x), except_for=(\"aten::size\", \"prim::BroadcastSizes\",\n \"aten::_size_if_not_equal\"))\n\n @unittest.skipIf(not RUN_CUDA, \"fuser requires CUDA\")\n @unittest.skip(\"rand_like is not supported yet\")\n def test_rand_broadcast_cuda(self):\n def fn_test_rand(x, y):\n r = torch.rand_like(y)\n return r * x + x\n\n # If using profiling, a different function is needed to test different\n # shapes, or we'll use a cached script.\n def fn_test_rand2(x, y):\n r = torch.rand_like(y)\n return r * x * x\n\n x = torch.randn(4, 4, dtype=torch.float, device='cuda')\n y = torch.randn(4, 4, dtype=torch.float, device='cuda')\n script_f = torch.jit.script(fn_test_rand)\n warmup_forward(script_f, x, y)\n out = script_f(x, y)\n self.assertAllFused(script_f.graph_for(x, y))\n x.requires_grad_(True)\n out = script_f(x, y)\n self.assertAllFused(script_f.graph_for(x, y), except_for=(\"aten::size\", \"prim::BroadcastSizes\",\n \"aten::_size_if_not_equal\"))\n\n # test that broadcasting random produces correct results\n x = torch.ones(4, 4, dtype=torch.float, device='cuda')\n y = torch.ones(4, dtype=torch.float, device='cuda')\n script_f = torch.jit.script(fn_test_rand2)\n warmup_forward(script_f, x, y)\n out = script_f(x, y)\n # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095\n self.assertEqualIgnoreType(out[0, :] + torch.zeros(4, 4, device='cuda'), out)\n\n @unittest.skipIf(not RUN_CUDA, \"fuser requires CUDA\")\n @unittest.skip(\"rand_like is not supported yet\")\n def test_rand_diamond(self):\n def fn_test_diamond(x, y):\n r = torch.rand_like(y)\n a = x + r\n b = y - r\n return a + b\n\n x = torch.randn(4, 4, dtype=torch.float, device='cuda')\n y = torch.randn(4, 4, dtype=torch.float, device='cuda')\n script_f = torch.jit.script(fn_test_diamond)\n warmup_forward(script_f, x, y)\n out = script_f(x, y)\n self.assertEqual(out, x + y)\n\n def test_scalar(self):\n def fn(x, y):\n return 2 * x + y\n\n x = torch.tensor(0.1, dtype=torch.float, device='cpu')\n y = torch.tensor(1, dtype=torch.float, device='cpu')\n ge = self.checkScript(fn, (x, y))\n self.assertAllFused(ge.graph_for(x, y))\n\n def test_small_constant(self):\n for device in self.devices:\n def fn_test_small_constant(x, y):\n return (1e-8 * x + 5e-9 * y) * 1e8\n x = torch.randn(4, 4, dtype=torch.float, device=device)\n y = torch.randn(4, 4, dtype=torch.float, device=device)\n\n ge = self.checkTrace(fn_test_small_constant, (x, y))\n self.assertAllFused(ge.graph_for(x, y))\n\n # Currently we don't pull constants into fusion groups, because in some\n # cases it could remove the constant from the original graph and now our\n # fusion group needs to return that constant for its other users.\n # Instead of never pulling constants into the fusion group, we should just\n # be more careful at how we rewrite its users.\n # TODO: fix that and reenable the test.\n def test_tensor_scalar_ops(self):\n for device in self.devices:\n def should_fuse(x):\n z = 3.\n y = x + z\n return x * y\n\n def should_fuse_scalar(x, z):\n y = x + int(z)\n return x * y\n\n inputs = [torch.randn(2, 2, dtype=torch.float, device=device)]\n ge = self.checkScript(should_fuse, inputs)\n graph = ge.graph_for(*inputs)\n fusion_groups = self.findFusionGroups(graph)\n self.assertEqual(len(fusion_groups), 1)\n FileCheck().check(\"aten::add\").check(\"aten::mul\").run(str(fusion_groups[0]))\n\n inputs = [\n torch.randn(2, 2, dtype=torch.float, device=device),\n torch.tensor(3., dtype=torch.float, device=device),\n ]\n ge = self.checkScript(should_fuse_scalar, inputs)\n # Check that the fused graph computes correct results when the scalar\n # input changes.\n inputs = [\n torch.randn(2, 2, dtype=torch.float, device=device),\n torch.tensor(7., dtype=torch.float, device=device),\n ]\n self.assertEqual(ge(*inputs), should_fuse_scalar(*inputs))\n # The TE fuser supports fusion of non-constant scalars\n self.assertGraphContainsExactly(\n ge.graph_for(*inputs), FUSION_GROUP, 1, consider_subgraphs=True)\n\n def test_where_and_typing(self):\n for device in self.devices:\n def f(x, y):\n mask = x > y\n res = torch.where(mask, x, y)\n return mask, res\n\n x = torch.randn(4, 4, dtype=torch.double, device=device)\n y = torch.randn(4, 4, dtype=torch.double, device=device)\n\n script_f = self.checkScript(f, (x, y))\n self.assertAllFused(script_f.graph_for(x, y), except_for={'prim::TupleConstruct'})\n\n def test_disabled(self):\n old_cpu_fuser_state = torch._C._jit_can_fuse_on_cpu()\n torch._C._jit_override_can_fuse_on_cpu(False)\n\n def fn(a):\n return a ** 2 + a\n\n x = torch.randn(4, dtype=torch.float, device=\"cpu\")\n s = self.checkScript(fn, (x,))\n g = s.graph_for(x)\n self.assertEqual(len(self.findFusionGroups(g)), 0)\n\n torch._C._jit_override_can_fuse_on_cpu(old_cpu_fuser_state)\n\n def data_for(self, dtype, device=\"cuda\", size=None):\n if size is None:\n v = torch.arange(1, 3, dtype=torch.float, device=device)\n else:\n v = torch.rand(*size, device=device)\n if dtype == torch.bool:\n return v > 2\n elif dtype in [torch.qint8, torch.quint8, torch.qint32]:\n return torch.quantize_per_tensor(v, 0.1, 1, dtype=dtype)\n else:\n return v.to(dtype)\n\n def test_torch_to(self):\n # test no op\n @torch.jit.script\n def foo(x):\n return x.to(torch.float)\n\n foo(torch.tensor([3.], dtype=torch.float))\n foo(torch.tensor([3.], dtype=torch.float))\n FileCheck().check_not(\"TensorExpr\").run(torch.jit.last_executed_optimized_graph())\n\n # test not fusing non-const inputs\n @torch.jit.script\n def foo(x, dtype: int):\n return x.to(dtype)\n\n foo(torch.tensor([3.], dtype=torch.float), torch.int)\n foo(torch.tensor([3.], dtype=torch.float), torch.int)\n FileCheck().check_not(\"TensorExpr\").run(torch.jit.last_executed_optimized_graph())\n\n # test not fusing to_pinned inputs\n @torch.jit.script\n def foo(x, dtype: int):\n return x.to(pin_memory=True)\n\n foo(torch.tensor([3.], dtype=torch.float), torch.int)\n foo(torch.tensor([3.], dtype=torch.float), torch.int)\n FileCheck().check_not(\"TensorExpr\").run(torch.jit.last_executed_optimized_graph())\n\n\n # test across-device not supported\n if torch.cuda.is_available():\n @torch.jit.script\n def foo(x):\n return x.to(device=\"cuda\")\n\n foo(torch.tensor([3.], dtype=torch.float))\n foo(torch.tensor([3.], dtype=torch.float))\n FileCheck().check_not(\"TensorExpr\").run(torch.jit.last_executed_optimized_graph())\n\n sizes = [(1, 4), (4, 4)]\n # reuses cast impl, smaller dtype set for faster test\n dtypes = [\n torch.bool,\n torch.int,\n torch.float16,\n torch.float32,\n torch.float64,\n ]\n\n class MyMod(torch.nn.Module):\n def __init__(self, dtype):\n super(MyMod, self).__init__()\n self.dtype = dtype\n\n def forward(self, x):\n return x.to(self.dtype)\n\n bad_dtypes = []\n for dtype, output_dtype, device, size in product(dtypes, dtypes, self.devices, sizes):\n # TODO: Add back when https://github.com/pytorch/pytorch/issues/55905 is closed\n if dtype in [torch.float16, torch.bfloat16] and device == \"cpu\":\n continue\n if dtype == output_dtype:\n continue\n\n x = self.data_for(dtype, device, size=size)\n mod = MyMod(output_dtype)\n ref = mod.forward(x)\n # use freezing to make non-Tensor args to `to` constant\n mod = torch.jit.freeze(torch.jit.script(mod.eval()))\n warmup_forward(mod.forward, x)\n self.assertEqual(ref, mod.forward(x))\n self.assertLastGraphAllFused()\n\n @unittest.skip(\"Temporarily disabled\")\n def test_masked_fill(self):\n dtypes = [\n torch.int8,\n torch.int16,\n torch.int32,\n torch.int64,\n # TODO: Add back when https://github.com/pytorch/pytorch/issues/55905 is closed\n # torch.float16,\n torch.float32,\n torch.float64,\n torch.bool,\n ]\n sizes = [(2,), (4, 4)]\n for self_dtype, device, scalar_val, size in product(dtypes, self.devices, [0.4, 3], sizes):\n input_v = self.data_for(self_dtype, device, size=size)\n mask = self.data_for(torch.bool, device, size=size)\n\n def fn(input_v, mask):\n return torch.masked_fill(input_v, mask, scalar_val)\n ref = fn(input_v, mask)\n try:\n t = torch.jit.trace(fn, (input_v, mask))\n torch.testing.assert_close(ref, t(input_v, mask))\n print(torch.jit.last_executed_optimized_graph())\n self.assertLastGraphAllFused()\n except Exception as e:\n raise RuntimeError(\n \" \".join([\"Failed:\", str(self_dtype), op.__name__, device, str(size)])\n )\n\n def test_isnan(self):\n x = torch.rand([4])\n x[0] = float('nan')\n inputs = [\n x,\n torch.tensor([float('nan'), .5])\n ]\n dtypes = [\n torch.int8,\n torch.int16,\n torch.int32,\n torch.int64,\n torch.float16,\n torch.float32,\n torch.float64,\n torch.bool,\n ]\n\n for inp, device, dtype in product(inputs, self.devices, dtypes):\n # TODO: Add back when https://github.com/pytorch/pytorch/issues/55905 is closed\n if dtype in [torch.float16, torch.bfloat16] and device == \"cpu\":\n continue\n inp = inp.to(device=device, dtype=dtype)\n try:\n f = torch.jit.trace(lambda x: x.isnan(), (inp,))\n warmup_forward(f, inp)\n self.assertEqual(f(inp), inp.isnan())\n self.assertLastGraphAllFused()\n except Exception as e:\n raise RuntimeError(\n \" \".join([\"Failed:\", str(dtype), 'isnan', device])\n )\n\n def test_unary_ops(self):\n def apply(fn):\n return lambda x: fn(x)\n\n unary_ops = [\n torch.lgamma,\n torch.sigmoid,\n torch.reciprocal,\n torch.neg,\n torch.relu,\n F.relu6,\n torch.log,\n torch.log10,\n torch.log1p,\n torch.log2,\n torch.exp,\n torch.expm1,\n torch.erf,\n torch.erfc,\n torch.cos,\n torch.sin,\n torch.tan,\n torch.acos,\n torch.asin,\n torch.cosh,\n torch.sinh,\n torch.atan,\n torch.tanh,\n F.hardtanh,\n F.hardsigmoid,\n F.hardswish,\n F.softplus,\n torch.sqrt,\n torch.rsqrt,\n F.gelu,\n torch.abs,\n torch.ceil,\n torch.floor,\n torch.round,\n torch.trunc,\n torch.frac,\n # TODO: broken on ROCm?\n # F.hardshrink,\n F.leaky_relu,\n lambda x: torch.threshold(x, 0, -10),\n lambda x: torch.clamp(x, -10, 10),\n ]\n gpu_only = {torch.erf, torch.erfc}\n sizes = [(1,), (2,), (4, 4)]\n for dtype, op, device, size in product(self.dtypes, unary_ops, self.devices, sizes):\n # TODO: Add back when https://github.com/pytorch/pytorch/issues/55905 is closed\n if dtype in [torch.float16, torch.bfloat16] and device == \"cpu\":\n continue\n if op in gpu_only and device == \"cpu\":\n continue\n try:\n x = self.data_for(dtype, device, size=size)\n fn = apply(op)\n ref = fn(x)\n except Exception:\n # If eager mode doesn't support a dtype/op/device combo,\n # neither does the fuser. Catch everything to avoid needing to\n # guess what errors might be thrown by eager.\n continue\n try:\n t = torch.jit.trace(fn, (x,))\n torch.testing.assert_close(ref, t(x))\n self.assertAllFused(t.graph_for(x))\n except Exception as e:\n raise RuntimeError(\n \" \".join([\"Failed:\", str(dtype), op.__name__, device, str(size)])\n )\n\n def test_binary_ops(self):\n def apply(fn):\n return lambda x, y: fn(x, y)\n\n binary_ops = [\n operator.__and__,\n operator.__or__,\n operator.__xor__,\n torch.add,\n torch.sub,\n torch.mul,\n torch.min,\n torch.max,\n lambda x, y: torch.lerp(x, y, 0.5),\n torch.atan2,\n torch.div,\n torch.eq,\n torch.ne,\n torch.ge,\n torch.gt,\n torch.lt,\n torch.fmod,\n torch.remainder,\n lambda x, y: y.type_as(x),\n ]\n fp_only = [\n torch.fmod,\n torch.remainder,\n ]\n devices = self.devices\n for dtype, op, device in product(self.dtypes, binary_ops, devices):\n if dtype in [torch.float16, torch.bfloat16] and device == \"cpu\":\n continue\n try:\n x = self.data_for(dtype, device)\n y = self.data_for(dtype, device)\n fn = apply(op)\n ref = fn(x, y)\n except Exception:\n # If eager mode doesn't support a dtype/op/device combo,\n # neither does the fuser. Catch everything to avoid needing to\n # guess what errors might be thrown by eager.\n continue\n try:\n t = torch.jit.trace(fn, (x, y))\n self.assertEqual(ref, t(x, y))\n if op not in fp_only or dtype.is_floating_point:\n self.assertAllFused(t.graph_for(x, y))\n except Exception as e:\n raise RuntimeError(\n \" \".join([\"Failed:\", str(dtype), op.__name__, device])\n )\n\n def test_matmul(self):\n def fn(x, y):\n return torch.matmul(x, y)\n\n devices = ['cpu'] # No cuda support for ext calls yet\n sizes = [[[128, 128], [128, 128]],\n [[10, 10], [10, 10]],\n [[1, 16], [16, 128]],\n [[128], [128]],\n [[128], [128, 128]],\n [[3], [3]],\n [[3, 4], [4]],\n [[10, 3, 4], [4]],\n [[10, 3, 4], [10, 4, 5]],\n [[10, 3, 4], [4, 5]],\n ]\n\n # Only 2D x 2D matrix multiply is supported. For non-supported sizes we\n # still want to run results verification to test that we didn't\n # accidentally fuse it, but we skip the 'is-fused' check.\n # TODO: add support for other shape combinations and make this set empty:\n skip_is_fused_check_sizes = [\"[[128], [128]]\",\n \"[[128], [128, 128]]\",\n \"[[3], [3]]\",\n \"[[3, 4], [4]]\",\n \"[[10, 3, 4], [4]]\",\n \"[[10, 3, 4], [10, 4, 5]]\",\n \"[[10, 3, 4], [4, 5]]\",\n ]\n for dtype, size, device in product(self.dtypes, sizes, devices):\n if dtype in [torch.float16, torch.bfloat16] and device == \"cpu\":\n continue\n try:\n size_x, size_y = size\n x = self.data_for(dtype, device, size=size_x)\n y = self.data_for(dtype, device, size=size_y)\n ref = fn(x, y)\n except Exception as e:\n # If eager mode doesn't support a dtype/op/device combo,\n # neither does the fuser. Catch everything to avoid needing to\n # guess what errors might be thrown by eager.\n continue\n try:\n t = torch.jit.trace(fn, (x, y))\n t(x, y)\n self.assertEqual(ref, t(x, y))\n if not str(size) in skip_is_fused_check_sizes:\n self.assertAllFused(t.graph_for(x, y))\n except Exception as e:\n raise RuntimeError(\n \" \".join([\"Failed:\", str(dtype), device])\n )\n\n def test_binary_tensor_scalar_ops(self):\n def apply_with_scalar(fn, scalar):\n return lambda x: fn(x, scalar)\n\n # FIXME: Fails in IR Eval: torch.int64 and_ cpu\n binary_ops = [\n operator.__and__,\n operator.__or__,\n operator.__xor__,\n torch.add,\n torch.sub,\n torch.mul,\n torch.eq,\n torch.ne,\n torch.ge,\n torch.lt,\n torch.gt,\n ]\n devices = self.devices\n # Maybe we should split this into separate tests to speed it up by\n # only using scalar values relevant to particular ops\n scalars = [1.5, 3, 0, -2.0, -1]\n for dtype, op, device, scalar in product(self.dtypes, binary_ops, devices, scalars):\n if dtype in [torch.float16, torch.bfloat16] and device == \"cpu\":\n continue\n try:\n x = self.data_for(dtype, device)\n fn = apply_with_scalar(op, scalar)\n ref = fn(x)\n except Exception:\n # If eager mode doesn't support a dtype/op/device combo,\n # neither does the fuser. Catch everything to avoid needing to\n # guess what errors might be thrown by eager.\n continue\n try:\n t = torch.jit.trace(fn, (x))\n self.assertEqual(ref, t(x))\n self.assertAllFused(t.graph_for(x))\n except Exception as e:\n raise RuntimeError(\n \" \".join([\"Failed:\", str(dtype), op.__name__, device])\n )\n\n def test_binary_div_ops(self):\n def apply_with_scalar(fn, scalar):\n return lambda x: fn(x, scalar)\n\n binary_ops = [\n torch.div,\n torch.remainder,\n torch.fmod,\n ]\n devices = self.devices\n # Maybe we should split this into separate tests to speed it up by\n # only using scalar values relevant to particular ops\n scalars = [1.5, 3, -2.0, -1] # skip 0\n for dtype, op, device, scalar in product(self.dtypes, binary_ops, devices, scalars):\n if dtype in [torch.float16, torch.bfloat16] and device == \"cpu\":\n continue\n try:\n x = self.data_for(dtype, device)\n fn = apply_with_scalar(op, scalar)\n ref = fn(x)\n except Exception:\n # If eager mode doesn't support a dtype/op/device combo,\n # neither does the fuser. Catch everything to avoid needing to\n # guess what errors might be thrown by eager.\n continue\n try:\n t = torch.jit.trace(fn, (x))\n self.assertEqual(ref, t(x))\n except Exception as e:\n raise RuntimeError(\n \"Failed: {} {} {} {}\".format(dtype, op.__name__, device, scalar)\n )\n\n def test_binary_pow(self):\n def apply_with_scalar(fn, scalar):\n return lambda x: fn(x, scalar)\n\n dtypes = [\n # FIXME: 'pow' fails with dtype=torch.float16/device=cuda/scalar=0\n # torch.float16,\n torch.float32,\n torch.float64,\n # torch.bool intentionally not included\n ]\n binary_ops = [\n torch.pow,\n ]\n # Maybe we should split this into separate tests to speed it up by\n # only using scalar values relevant to particular ops\n scalars = [1.5, 3, 0, -2.0, -1]\n for dtype, op, device, scalar in product(dtypes, binary_ops, self.devices, scalars):\n if dtype in [torch.float16, torch.bfloat16] and device == \"cpu\":\n continue\n try:\n x = self.data_for(dtype, device)\n fn = apply_with_scalar(op, scalar)\n ref = fn(x)\n except Exception:\n # If eager mode doesn't support a dtype/op/device combo,\n # neither does the fuser. Catch everything to avoid needing to\n # guess what errors might be thrown by eager.\n continue\n try:\n t = torch.jit.trace(fn, (x))\n self.assertEqual(ref, t(x))\n self.assertAllFused(t.graph_for(x))\n except Exception as e:\n raise RuntimeError(\n \" \".join([\"Failed:\", str(dtype), op.__name__, device])\n )\n\n def test_ternary_ops(self):\n def apply(fn):\n return lambda x, y, z: fn(x, y, z)\n\n ternary_ops = [\n torch.lerp,\n torch.addcmul,\n ]\n devices = self.devices\n for dtype, op, device in product(self.dtypes, ternary_ops, devices):\n if dtype in [torch.float16, torch.bfloat16] and device == \"cpu\":\n continue\n try:\n x = self.data_for(dtype, device)\n y = self.data_for(dtype, device)\n z = self.data_for(dtype, device)\n fn = apply(op)\n ref = fn(x, y, z)\n except Exception:\n # If eager mode doesn't support a dtype/op/device combo,\n # neither does the fuser. Catch everything to avoid needing to\n # guess what errors might be thrown by eager.\n continue\n try:\n t = torch.jit.trace(fn, (x, y, z))\n self.assertEqual(ref, t(x, y, z))\n self.assertAllFused(t.graph_for(x, y, z))\n except Exception as e:\n raise RuntimeError(\n \" \".join([\"Failed:\", str(dtype), op.__name__, device])\n )\n\n def test_ternary_norm_ops(self):\n def apply(fn):\n return lambda x, y, z: fn(x, y, z)\n\n ternary_ops = [\n F.batch_norm,\n ]\n devices = self.devices\n for dtype, op, device in product(self.dtypes, ternary_ops, devices):\n if dtype in [torch.float16, torch.bfloat16] and device == \"cpu\":\n continue\n try:\n x = self.data_for(dtype, device, size=[5, 3, 128, 128])\n y = self.data_for(dtype, device, size=[3])\n z = self.data_for(dtype, device, size=[3])\n fn = apply(op)\n ref = fn(x, y, z)\n except Exception:\n # If eager mode doesn't support a dtype/op/device combo,\n # neither does the fuser. Catch everything to avoid needing to\n # guess what errors might be thrown by eager.\n continue\n try:\n t = torch.jit.trace(fn, (x, y, z))\n self.assertEqual(ref, t(x, y, z))\n self.assertAllFused(t.graph_for(x, y, z))\n except Exception as e:\n raise RuntimeError(\n \" \".join([\"Failed:\", str(dtype), op.__name__, device])\n )\n\n\n @unittest.skip(\"FIXME: fuser doesn't include ListConstruct nodes to the group causing a failure\")\n def test_list_ops(self):\n def apply(fn):\n return lambda x, y, z: fn([x * x, y * y, z * z])\n\n devices = self.devices\n list_ops = [\n torch.cat,\n ]\n for dtype, op, device in product(self.dtypes, list_ops, devices):\n if dtype in [torch.float16, torch.bfloat16] and device == \"cpu\":\n continue\n try:\n x = self.data_for(dtype, device, size=[5, 4, 1, 7])\n y = self.data_for(dtype, device, size=[5, 4, 1, 7])\n z = self.data_for(dtype, device, size=[5, 4, 1, 7])\n fn = apply(op)\n ref = fn(x, y, z)\n except Exception:\n # If eager mode doesn't support a dtype/op/device combo,\n # neither does the fuser. Catch everything to avoid needing to\n # guess what errors might be thrown by eager.\n continue\n try:\n t = torch.jit.trace(fn, (x, y, z))\n self.assertEqual(ref, t(x, y, z))\n self.assertAllFused(t.graph_for(x, y, z))\n except Exception as e:\n raise RuntimeError(\n \" \".join([\"Failed:\", str(dtype), op.__name__, device])\n )\n\n def test_where_ops(self):\n def apply(fn):\n return lambda cond, x, y: fn(cond, x, y)\n\n ops = [\n torch.where,\n lambda cond, x, y: torch.where(cond, x, 3.1415),\n lambda cond, x, y: torch.where(cond, 42, y),\n ]\n devices = self.devices\n for dtype, op, device in product(self.dtypes, ops, devices):\n if dtype in [torch.float16, torch.bfloat16] and device == \"cpu\":\n continue\n try:\n cond = self.data_for(torch.bool, device)\n x = self.data_for(dtype, device)\n y = self.data_for(dtype, device)\n fn = apply(op)\n ref = fn(cond, x, y)\n except Exception:\n # If eager mode doesn't support a dtype/op/device combo,\n # neither does the fuser. Catch everything to avoid needing to\n # guess what errors might be thrown by eager.\n continue\n try:\n t = torch.jit.trace(fn, (cond, x, y))\n self.assertEqual(ref, t(cond, x, y))\n self.assertAllFused(t.graph_for(cond, x, y))\n except Exception as e:\n raise RuntimeError(\n \" \".join([\"Failed:\", str(dtype), op.__name__, device])\n )\n\n def test_unsupported_dtypes(self):\n for device in self.devices:\n def fn(x):\n return x * x + x\n\n unsupported_dtypes = [\n torch.uint8,\n torch.complex32,\n torch.complex64,\n torch.complex128,\n torch.qint8,\n torch.quint8,\n torch.qint32,\n ]\n for dtype in unsupported_dtypes:\n try:\n x = self.data_for(dtype, device)\n ref = fn(x)\n except Exception:\n # If eager mode doesn't support a dtype/op/device combo,\n # neither does the fuser. Catch everything to avoid needing to\n # guess what errors might be thrown by eager.\n continue\n t = torch.jit.trace(fn, (x,))\n self.assertEqual(ref, t(x))\n self.assertEqual(len(self.findFusionGroups(t.graph_for(x))), 0)\n\n def test_superslomo(self):\n devices = self.devices.copy()\n if not LLVM_ENABLED:\n devices.remove(\"cpu\")\n for device in devices:\n # Test extracted from Super-SloMo: https://github.com/avinashpaliwal/Super-SloMo\n # A few interesting things happen here: strided inputs of mixed size,\n # plus outputs of mixed shapes. The latter characteristic happened to\n # expose a memory corruption bug due to not properly guarding the\n # outputs.\n def eager(t0, t1, t2, t3, t4):\n t5 = torch.mul(t0, t4)\n t6 = torch.mul(t2, t3)\n t7 = torch.mul(t6, t1)\n t9 = torch.add(t5, t7)\n t11 = torch.add(t0, t6)\n ft_p = torch.div(t9, t11)\n return (ft_p, t11, t9, t6)\n\n t0 = torch.rand(1, 6, 352, 352, device=device).transpose(0, 1)\n t1 = torch.rand(6, 3, 352, 352, device=device)\n t2 = torch.rand(6, device=device)[None, None, None, :].permute(3, 0, 1, 2)\n t3 = torch.rand(6, 1, 352, 352, device=device)\n t4 = torch.rand(6, 3, 352, 352, device=device)\n inputs = [t0, t1, t2, t3, t4]\n\n script = torch.jit.script(eager)\n for _ in range(4):\n for pair in zip(script(*inputs), eager(*inputs)):\n test, ref = pair\n torch.testing.assert_close(test, ref)\n self.assertAllFused(script.graph_for(*inputs))\n\n def test_sub_gt_and(self):\n for device in self.devices:\n def eager(t1, t2, t3, t4, t: float):\n w = t1 - t2\n h = t3 - t4\n k = (w > t) & (h > t)\n assert k.dtype == torch.bool\n if t > 0.5:\n # Putting a use of k in a never-executed conditional prevents\n # profiling its type, which leaves it as \"Tensor\". If we\n # propagate Tensor back to the definition of k, we have to be\n # careful not to create a fusion group containing it.\n return k + 1\n return w\n t = torch.rand(8, dtype=torch.float, device=device)\n scripted = self.checkScript(eager, (t, t, t, t, 0.1))\n\n def test_chunk_mul_one(self):\n for device in self.devices:\n def eager(x):\n z, y, w = torch.chunk(x, 3, -1)\n return z * 3, y, w\n x = torch.rand(64, 1, 3072, dtype=torch.float, device=device)\n z, y, w = eager(x)\n script = self.checkScript(eager, (x,))\n\n def test_eq_unsqueeze_type_as(self):\n for device in self.devices:\n def eager(a, b):\n mask = b == 1\n mask = torch.unsqueeze(mask, -1)\n x = mask.type_as(a)\n return x, mask\n a = torch.rand(1, 64, 1024, device=device, dtype=torch.float)\n b = torch.randint(-2, 2, (1, 64), device=device, dtype=torch.long)\n script = self.checkScript(eager, (a, b))\n\n def test_neg_pow(self):\n def eager_tt(a: torch.Tensor, b: torch.Tensor):\n return torch.neg(torch.pow(a, b))\n\n def eager_ts(a: torch.Tensor, b: float):\n return torch.neg(torch.pow(a, b))\n\n def eager_st(a: float, b: torch.Tensor):\n return torch.neg(torch.pow(a, b))\n\n a = torch.rand(1, dtype=torch.float)\n b = torch.rand(1, dtype=torch.float)\n s = b.item()\n script = self.checkScript(eager_tt, (a, b))\n self.assertAllFused(script.graph_for(a, b))\n script = self.checkScript(eager_ts, (a, s))\n self.assertAllFused(script.graph_for(a, s))\n script = self.checkScript(eager_st, (s, b))\n self.assertAllFused(script.graph_for(s, b))\n\n @unittest.skipIf(not LLVM_ENABLED, \"Too slow to run with the TE interpreter\")\n def test_conv2d_depthwise(self):\n def eager(input, weight, bias):\n return torch.conv2d(input, weight, bias, stride=1, padding=1, groups=72)\n\n input = torch.rand((1, 72, 56, 56), dtype=torch.float)\n weight = torch.rand((72, 1, 3, 3), dtype=torch.float)\n bias = torch.rand((72), dtype=torch.float)\n\n script = self.checkScript(eager, (input, weight, bias))\n self.assertAllFused(script.graph_for(input, weight, bias))\n\n def test_conv2d(self):\n def eager(input, weight, bias):\n return torch.conv2d(input, weight, bias, stride=1, padding=1, groups=1)\n\n input = torch.rand((1, 64, 56, 56), dtype=torch.float)\n weight = torch.rand((64, 64, 3, 3), dtype=torch.float)\n bias = torch.rand((64), dtype=torch.float)\n\n script = self.checkScript(eager, (input, weight, bias))\n FileCheck().check_not(\"TensorExpr\").run(torch.jit.last_executed_optimized_graph())\n\n def test_type_as_cat(self):\n with inline_fusion_groups():\n def eager(x, y):\n return torch.cat((x, y.type_as(x)), dim=1)\n dtypes = self.dtypes.copy()\n # CPU fuser doesn't support float16.\n dtypes.remove(torch.float16)\n dtypes.remove(torch.bfloat16)\n for dtype1, dtype2 in product(dtypes, dtypes):\n x = torch.randint(2, (1, 13,)).to(dtype1)\n zero = torch.tensor([[0]]).to(dtype2)\n one = torch.tensor([[1]]).to(dtype2)\n script = torch.jit.trace(eager, (x, zero))\n for _ in range(3):\n torch.testing.assert_close(\n script(x, zero),\n eager(x, zero))\n torch.testing.assert_close(\n script(x, one),\n eager(x, one))\n self.assertAllFused(script.graph_for(x, one))\n\n def test_to_device(self):\n def eager(x):\n return x.to(device=\"cpu\").relu()\n x = torch.rand(8)\n script = self.checkScript(eager, (x,))\n self.assertAllFused(script.graph_for(x))\n\n def test_dims(self):\n def eager(x, y):\n return x / (y + 0.0001)\n x = torch.linspace(-1, 1, 768, dtype=torch.float32).as_strided((1, 1, 768), (768, 1, 1))\n y = torch.tensor([[[2.0]]], dtype=torch.float32)\n script = self.checkScript(eager, (x, y))\n self.assertAllFused(script.graph_for(x, y))\n\n def test_unsqueeze_var_dim(self):\n def eager(x, y, z: int):\n return x * torch.unsqueeze(y, dim=z)\n x = torch.rand(4, 4, 64).permute(1, 0, 2)\n y = torch.rand(4, 4)\n z = 2\n script = self.checkScript(eager, (x, y, z))\n\n def _test_fwd_bwd(self, fn):\n x = torch.arange(-10, 10, dtype=torch.float32, requires_grad=True)\n xs = torch.arange(-10, 10, dtype=torch.float32, requires_grad=True)\n script = torch.jit.script(fn)\n for i in range(11):\n y = fn(x)\n g0 = torch.rand_like(y)\n y.backward(g0)\n\n ys = script(xs)\n ys.backward(g0)\n\n with torch.no_grad():\n x -= 0.1 * x.grad\n xs -= 0.1 * xs.grad\n x.grad = None\n xs.grad = None\n torch.testing.assert_close(y, ys)\n\n def test_relu_fwd_bwd(self):\n def eager(x):\n return torch.relu(x * 1.01)\n self._test_fwd_bwd(eager)\n\n def test_hardswish_fwd_bwd(self):\n def eager(x):\n return F.hardswish(x) * 1.01\n self._test_fwd_bwd(eager)\n\n def test_hardsigmoid_fwd_bwd(self):\n def eager(x):\n return F.hardsigmoid(x) * 1.01\n self._test_fwd_bwd(eager)\n\n def test_dynamic_cat(self):\n with inline_fusion_groups():\n @torch.jit.script\n def repro(xs: List[torch.Tensor], ys: List[torch.Tensor], zs: List[torch.Tensor]):\n return [\n torch.cat([x, torch.cat([y, z], dim=-1)], dim=-1)\n for x, y, z in zip(xs, ys, zs)\n ]\n for _ in range(3):\n N = 3\n xs = [torch.ones(21) for _ in range(N)]\n # Note: concat of ys and zs will have the same size for each\n # pair, even though the individual ys and zs do not.\n ys = [torch.ones(N - i) for i in range(N)]\n zs = [torch.ones(i) for i in range(N)]\n repro(xs, ys, zs)\n\n def test_scalar_only_inputs(self):\n def eager(b: float):\n a = torch.ones(1)\n return a * b\n\n script = self.checkScript(eager, (1.0,))\n\n def test_cat_2k_args(self):\n with inline_fusion_groups():\n def eager(x):\n return torch.relu(torch.cat([x for _ in range(2000)]))\n x = torch.randn(1)\n trace = self.checkTrace(eager, (x,))\n fusion_groups = self.findFusionGroups(trace.graph_for(x))\n self.assertEqual(len(fusion_groups), 0)\n\n def test_adaptive_avg_pool2d(self):\n # TODO: once the adaptive_avg_pool2d is available in OpInfo DB, this\n # test should be moved there\n with inline_fusion_groups():\n def foo1(x):\n return torch.nn.functional.adaptive_avg_pool2d(x, (2, 2))\n\n def foo2(x):\n return torch.nn.functional.adaptive_avg_pool2d(x, (2))\n\n x = torch.randn(4, 4, 4)\n for foo in [foo1, foo2]:\n f = torch.jit.trace(foo, (x,))\n kernel = torch._C._te.TensorExprKernel(f.graph)\n correct_val = f(x)\n self.assertEqual(kernel.run((x,)), correct_val)\n\n def test_unrolled_cat(self):\n with inline_fusion_groups():\n def eager(x):\n ret = torch.empty(0)\n for i in range(x.shape[0]):\n ret = torch.cat([ret, x[i].relu()])\n return ret\n script = torch.jit.script(eager)\n\n # Warm up with size=1 tensor; since the loop iterates once the\n # profile data will be \"burned in\" assuming size=1, and then\n # unrolled.\n x = torch.ones(1, 1)\n for _ in range(3):\n script(x)\n\n torch.testing.assert_close(eager(x), script(x))\n\n # Now when an input hits the unrolled path, it will produce an\n # incorrectly-sized tensor, since size=1 has been burned in.\n x = torch.ones((8, 1))\n torch.testing.assert_close(eager(x), script(x))\n\n def test_batch_norm(self):\n def test(fn, args):\n trace = torch.jit.trace(fn, args)\n self.assertAllFused(trace.graph_for(*args))\n torch.testing.assert_allclose(fn(*args), trace(*args))\n\n def bn(i, x):\n return torch.batch_norm(i, x, x, x, x, False, 0.1, 1e-4, False).relu()\n\n def bn_no_weight(i, x):\n return torch.batch_norm(i, None, x, x, x, False, 0.1, 1e-4, False).relu()\n\n def bn_no_bias(i, x):\n return torch.batch_norm(i, x, None, x, x, False, 0.1, 1e-4, False).relu()\n\n def bn_neither(i, x):\n return torch.batch_norm(i, None, None, x, x, False, 0.1, 1e-4, False).relu()\n\n for device in self.devices:\n i = torch.randn(4, 16, 32, 40, device=device)\n x = torch.randn(16, device=device)\n for fn in [bn, bn_no_weight, bn_no_bias, bn_neither]:\n test(fn, (i, x))\n\n def test_profiler(self):\n @torch.jit.script\n def test(x, y, z):\n return x * y + z\n\n args = [torch.randn(4) for _ in range(3)]\n with torch.autograd.profiler.profile() as prof:\n for _ in range(3):\n test(*args)\n self.assertIn(\"fused_mul_add\", prof.table())\n\n\nworks_list = [\n '__radd__',\n '__rdiv__',\n '__rmul__',\n '__rmod__',\n 'abs',\n 'acos',\n 'add',\n 'addcmul',\n 'addmm.decomposed',\n 'asin',\n 'atan',\n 'atan2',\n 'ceil',\n 'clamp',\n 'clamp.scalar',\n 'cos',\n 'cosh',\n 'div.no_rounding_mode',\n 'div.true_rounding',\n 'div.floor_rounding',\n 'div.trunc_rounding',\n 'eq',\n 'erf',\n 'erfc',\n 'exp',\n 'expand',\n 'expand_as',\n 'expm1',\n 'floor',\n 'fmod',\n 'fmod.autodiffed',\n 'ge',\n 'gt',\n 'isnan',\n 'le',\n 'lerp',\n 'lgamma',\n 'log',\n 'log10',\n 'log1p',\n 'log2',\n 'lt',\n 'masked_fill',\n 'max.binary',\n 'mean',\n 'min.binary',\n 'mm',\n 'mul',\n 'ne',\n 'neg',\n 'nn.functional.gelu',\n 'nn.functional.hardshrink',\n 'nn.functional.hardsigmoid',\n 'nn.functional.hardswish',\n 'nn.functional.softplus',\n 'nn.functional.hardtanh',\n 'nn.functional.leaky_relu',\n 'nn.functional.relu',\n 'nn.functional.relu6',\n 'permute',\n 'pow',\n 'reciprocal',\n 'remainder',\n 'remainder.autodiffed',\n 'reshape',\n 'reshape_as',\n 'round',\n 'rsub',\n 'rsub.rsub_tensor',\n 'rsqrt',\n 'sigmoid',\n 'sign',\n 'sin',\n 'sinh',\n 'sqrt',\n 'sub',\n 'sum',\n 't',\n 'tan',\n 'tanh',\n 'transpose',\n 'true_divide',\n 'trunc',\n 'unsqueeze',\n 'view',\n 'view_as',\n 'where',\n 'bool',\n 'byte',\n 'char',\n 'double',\n 'float',\n 'half',\n 'int',\n 'long',\n 'short',\n]\n\nknown_failures = [\n '__rmatmul__'\n 'frac',\n 'matmul',\n]\n\n# If your OpInfo test causes this test to fail, add it here\nskip_ops = [\n 'conj'\n]\n\ndef get_name(op):\n l = [op.name]\n if op.variant_test_name != '':\n l.append(op.variant_test_name)\n return '.'.join(l)\n\nclass TestNNCOpInfo(TestCase):\n def te_compile(self, device, dtype, op):\n if op.name in skip_ops:\n return\n sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=False)\n for sample_input in sample_inputs_itr:\n arg_values = [sample_input.input] + list(sample_input.args)\n kwarg_values = sample_input.kwargs\n param_names = []\n param_values = []\n fx_args = []\n for idx, v in enumerate(arg_values):\n if isinstance(v, torch.Tensor):\n param_names.append(f\"arg_{idx}\")\n param_values.append(v)\n fx_args.append(param_names[-1])\n else:\n fx_args.append(f'{repr(v)}')\n\n for k, v in kwarg_values.items():\n if isinstance(v, torch.Tensor):\n param_names.append(k)\n param_values.append(v)\n fx_args.append(f'{k} = {k}')\n else:\n fx_args.append(f'{k} = {repr(v)}')\n\n code = f\"\"\"\ndef f({', '.join(param_names)}):\n return op.op({', '.join(fx_args)})\"\"\"\n g = {'torch': torch, 'inf' : math.inf, 'op': op}\n exec(code, g)\n f = g['f']\n f.__module__ = 'test'\n out = f(*param_values)\n\n ts_g = torch.jit.trace(f, param_values)\n kernel = torch._C._te.TensorExprKernel(ts_g.graph)\n correct_val = f(*param_values)\n self.assertEqual(kernel.run(tuple(param_values)), correct_val)\n self.assertEqual(kernel.fallback(tuple(param_values)), correct_val)\n\n @onlyCPU\n @unittest.skipIf(not LLVM_ENABLED, \"Compiles with TensorExprKernel\")\n @ops([op for op in op_db if get_name(op) in works_list], allowed_dtypes=(torch.float,))\n def test_working(self, device, dtype, op):\n self.te_compile(device, dtype, op)\n\n @onlyCPU\n @unittest.skipIf(not LLVM_ENABLED, \"Compiles with TensorExprKernel\")\n @ops([op for op in op_db if get_name(op) in known_failures], allowed_dtypes=(torch.float,))\n def test_failures(self, device, dtype, op):\n try:\n self.te_compile(device, dtype, op)\n except Exception as e:\n pass\n else:\n raise RuntimeError(\"Expected test to fail. If it now works, move op into works_list\")\n\n @onlyCPU\n @unittest.skipIf(not LLVM_ENABLED, \"Compiles with TensorExprKernel\")\n @ops([op for op in op_db if get_name(op) not in works_list + known_failures], allowed_dtypes=(torch.float,))\n def test_unsupported(self, device, dtype, op):\n if get_name(op) in skip_ops:\n return\n try:\n self.te_compile(device, dtype, op)\n except Exception as e:\n pass\n else:\n raise RuntimeError(\"Expected test to fail. If it now works, move op into works_list\")\n\n\nonly_for = (\"cpu\", \"cuda\")\ninstantiate_device_type_tests(TestNNCOpInfo, globals(), only_for=only_for)\n\nif __name__ == '__main__':\n run_tests()\n"
] |
[
[
"torch.all",
"torch.randint",
"torch.max",
"torch.rand_like",
"torch._C._debug_set_fusion_group_inlining",
"torch.zeros",
"torch._C._jit_set_profiling_mode",
"torch._C._llvm_enabled",
"torch._C._jit_set_texpr_reductions_enabled",
"torch.cat",
"torch.masked_fill",
"torch.erfc",
"torch.jit.last_executed_optimized_graph",
"torch._C._jit_override_can_fuse_on_gpu",
"torch.tanh",
"torch.testing.FileCheck",
"torch._C._jit_debug_fuser_num_cached_kernel_specs",
"torch.no_grad",
"torch.cuda.is_available",
"torch.where",
"torch.device",
"torch._C._jit_set_te_must_use_llvm_cpu",
"torch.pow",
"torch.jit.script",
"torch.ones",
"torch.jit.trace",
"torch.add",
"torch.randn",
"torch._C._jit_texpr_fuser_enabled",
"torch._C._jit_set_texpr_fuser_enabled",
"torch.tensor",
"torch.relu",
"torch.nn.functional.relu",
"torch._C._jit_override_can_fuse_on_cpu",
"torch.rand",
"torch.quantize_per_tensor",
"torch.arange",
"torch.mul",
"torch.nn.functional.hardswish",
"torch.jit.CompilationUnit",
"torch.ones_like",
"torch.testing.assert_close",
"torch.batch_norm",
"torch._C._te.TensorExprKernel",
"torch.div",
"torch.lerp",
"torch.linspace",
"torch._C._jit_can_fuse_on_cpu",
"torch.empty",
"torch.nn.functional.hardsigmoid",
"torch.min",
"torch.testing._internal.common_utils.enable_profiling_mode_for_profiling_tests",
"torch.unsqueeze",
"torch.nn.functional.adaptive_avg_pool2d",
"torch._C._jit_can_fuse_on_gpu",
"torch.threshold",
"torch.testing._internal.common_utils.run_tests",
"torch._C._debug_get_fusion_group_inlining",
"torch.conv2d",
"torch._C._jit_set_profiling_executor",
"torch.testing._internal.jit_utils.set_fusion_group_inlining",
"torch.matmul",
"torch.erf",
"torch._C._jit_get_te_must_use_llvm_cpu",
"torch.chunk",
"torch.clamp",
"torch.autograd.profiler.profile"
]
] |
ILABUTK/handson-ml
|
[
"dd42ed2cae1d3c1e44dffba3a324c21154f13c01"
] |
[
"xp_models/XP_RL/AnyLogic-Python/python_v2/RL_SIM.py"
] |
[
"\"\"\"\nReinforcement learning.\n\"\"\"\n\n# import\nimport re\nimport json\nimport logging\nimport subprocess\nimport numpy as np\n\n\nclass RL_Sim_Env:\n \"\"\"\n Reinforcement learning class, using ANYLOGIC to simulate;\n - java_cmd: java command to run the model;\n - no transition function;\n \"\"\"\n\n def __init__(\n self, name, states, actions, java_cmd, discount_factor\n ):\n self.name = name\n self.states = states\n self.actions = actions\n self.java_cmd = java_cmd\n self.discount_factor = discount_factor\n\n def __prepare_q(self, q):\n \"\"\"\n prepare q values to transfer to java.\n \"\"\"\n q_str = {}\n for key in q.keys():\n if key[0] == 'Delta':\n s = 'Delta-{}'.format(\n key[1]\n )\n else:\n s = '{}-{}-{}-{}-{}'.format(\n key[0][0][0], key[0][0][1], key[0][1][0],\n key[0][1][1], key[1]\n )\n q_str[s] = q[key]\n byte_q = json.dumps(q_str).encode('utf-8')\n return byte_q\n\n def __execute_java(self, java_cmd, q):\n \"\"\"\n run java file.\n \"\"\"\n proc = subprocess.Popen(\n java_cmd, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT\n )\n stdout, stderr = proc.communicate(q)\n return stdout.decode(\"utf-8\")\n\n def __extract_sim_output(self, output):\n \"\"\"\n extract states, actions and returns\n \"\"\"\n # get data\n matched = re.findall(r\".*?\\[(.*)].*\", output)\n # states\n states_text = re.findall(r'[+-]?\\d+(?:\\.\\d+)?', matched[0])\n states = []\n ind = 0\n s = [[0, 0], [0, 0]]\n for i in states_text:\n if ind > 1:\n s[1][ind - 2] = int(i)\n else:\n s[0][ind] = int(i)\n if (ind + 1) % 4 == 0:\n # check if it's Delta\n if s[1][0] < 0 or s[1][0] > 7 or\\\n s[1][1] < 0 or s[1][1] > 7:\n states.append('Delta')\n else:\n states.append(\n (tuple(s[0]), tuple(s[1]))\n )\n s = [[0, 0], [0, 0]]\n ind = 0\n continue\n ind += 1\n # actions\n actions = []\n for i in matched[1]:\n if i != ' ' and i != ',':\n actions.append(i)\n # returns\n returns_text = re.findall(r'[+-]?\\d+(?:\\.\\d+)?', matched[2])\n returns = [\n float(i)\n for i in returns_text\n ]\n return states, actions, returns\n\n def __Q_update(self, q, b_policy, epsilon, alpha):\n \"\"\"one episode of Q-learning\"\"\"\n # make q values byte-like objects\n byte_q = self.__prepare_q(q)\n # run a simulation\n output = self.__execute_java(self.java_cmd, byte_q)\n # inteprete results\n sim_states, sim_actions, sim_returns = self.__extract_sim_output(\n output\n )\n # averaged return\n G = 0\n # start iteration to update q value\n for iter in range(len(sim_returns)):\n # state\n state = sim_states[iter]\n # termination condition\n if state == 'Delta':\n break\n # take action\n action = sim_actions[iter]\n # get reward\n R = sim_returns[iter]\n # observe reward and new_state\n new_state = sim_states[iter + 1]\n # update q\n G = G + self.discount_factor * R\n q[state, action] = q[state, action] + alpha * (\n R + self.discount_factor * np.max([\n q[new_state, a]\n for a in self.actions\n ]) - q[state, action]\n )\n return q, G\n\n def Q_Leaning(self, episodes, b_policy, epsilon, alpha):\n \"\"\"\n Q_learning.\n - b_policy: behavior policy. 'e-greedy' or 'random';\n - ternimal state is denoted by 'Delta';\n - alpha: step size;\n - function return: 'policy', 'q', 'G'.\n \"\"\"\n # initialization\n q = {}\n for s in self.states:\n for a in self.actions:\n q[s, a] = 0\n G = {}\n # loop for episodes\n for iter in range(episodes):\n logging.info(\"Iteration {}\".format(iter))\n q, G[iter] = self.__Q_update(q, b_policy, epsilon, alpha)\n logging.info(\"G {}\".format(G[iter]))\n\n # find the learned policy\n policy = {}\n for s in self.states:\n policy[s] = self.actions[np.argmax([\n q[s, a]\n for a in self.actions\n ])]\n\n return policy, q, G\n\n def simulation(self, q):\n \"\"\"\n simulation\n \"\"\"\n byte_q = self.__prepare_q(q)\n output = self.__execute_java(self.java_cmd, byte_q)\n return output\n"
] |
[
[
"numpy.max",
"numpy.argmax"
]
] |
UdoGi/dark-matter
|
[
"3d49e89fa5e81f83144119f6216c5774176d203b"
] |
[
"dark/mutations.py"
] |
[
"import os\nfrom collections import defaultdict\nimport numpy as np\n\ntry:\n import matplotlib\n if not os.environ.get('DISPLAY'):\n # Use non-interactive Agg backend\n matplotlib.use('Agg')\n import matplotlib.pyplot as plt\nexcept ImportError:\n import platform\n if platform.python_implementation() == 'PyPy':\n # PyPy doesn't have a version of matplotlib. Make a fake\n # class that raises if it is used. This allows us to use other\n # 'dark' code that happens to import dark.mutations but not use the\n # functions that rely on matplotlib.\n class plt(object):\n def __getattr__(self, _):\n raise NotImplementedError(\n 'matplotlib is not supported under pypy')\n else:\n raise\n\nfrom random import choice, uniform\n\nfrom dark import ncbidb\n\n\ndef basePlotter(blastHits, title):\n \"\"\"\n Plot the reads and the subject, so that bases in the reads which are\n different from the subject are shown. Else a '.' is shown.\n like so:\n subject_gi ATGCGTACGTACGACACC\n read_1 A......TTC..T\n\n @param blastHits: A L{dark.blast.BlastHits} instance.\n @param title: A C{str} sequence title that was matched by BLAST. We plot\n the reads that matched this title.\n \"\"\"\n result = []\n params = blastHits.plotParams\n assert params is not None, ('Oops, it looks like you forgot to run '\n 'computePlotInfo.')\n\n sequence = ncbidb.getSequence(title, blastHits.records.blastDb)\n subject = sequence.seq\n gi = title.split('|')[1]\n sub = '%s\\t \\t \\t%s' % (gi, subject)\n result.append(sub)\n\n plotInfo = blastHits.titles[title]['plotInfo']\n assert plotInfo is not None, ('Oops, it looks like you forgot to run '\n 'computePlotInfo.')\n\n items = plotInfo['items']\n count = 0\n for item in items:\n count += 1\n hsp = item['hsp']\n queryTitle = blastHits.fasta[item['readNum']].id\n # If the product of the subject and query frame values is +ve,\n # then they're either both +ve or both -ve, so we just use the\n # query as is. Otherwise, we need to reverse complement it.\n if item['frame']['subject'] * item['frame']['query'] > 0:\n query = blastHits.fasta[item['readNum']].seq\n reverse = False\n else:\n # One of the subject or query has negative sense.\n query = blastHits.fasta[\n item['readNum']].reverse_complement().seq\n reverse = True\n query = query.upper()\n queryStart = hsp['queryStart']\n subjectStart = hsp['subjectStart']\n queryEnd = hsp['queryEnd']\n subjectEnd = hsp['subjectEnd']\n\n # Before comparing the read to the subject, make a string of the\n # same length as the subject, which contains the read and\n # has ' ' where the read does not match.\n # 3 parts need to be taken into account:\n # 1) the left offset (if the query doesn't stick out to the left)\n # 2) the query. if the frame is -1, it has to be reversed.\n # The query consists of 3 parts: left, middle (control for gaps)\n # 3) the right offset\n\n # Do part 1) and 2).\n if queryStart < 0:\n # The query is sticking out to the left.\n leftQuery = ''\n if subjectStart == 0:\n # The match starts at the first base of the subject.\n middleLeftQuery = ''\n else:\n # The match starts into the subject.\n # Determine the length of the not matching query\n # part to the left.\n leftOffset = -1 * queryStart\n rightOffset = subjectStart + leftOffset\n middleLeftQuery = query[leftOffset:rightOffset]\n else:\n # The query is not sticking out to the left\n # make the left offset.\n leftQuery = queryStart * ' '\n\n leftQueryOffset = subjectStart - queryStart\n middleLeftQuery = query[:leftQueryOffset]\n\n # Do part 3).\n # Disregard gaps in subject while adding.\n matchQuery = item['origHsp'].query\n matchSubject = item['origHsp'].sbjct\n index = 0\n mid = ''\n for item in range(len(matchQuery)):\n if matchSubject[index] != ' ':\n mid += matchQuery[index]\n index += 1\n # if the query has been reversed, turn the matched part around\n if reverse:\n rev = ''\n toReverse = mid\n reverseDict = {' ': ' ', '-': '-', 'A': 'T', 'T': 'A',\n 'C': 'G', 'G': 'C', '.': '.', 'N': 'N'}\n for item in toReverse:\n newItem = reverseDict[item]\n rev += newItem\n mid = rev[::-1]\n\n middleQuery = middleLeftQuery + mid\n\n # add right not-matching part of the query\n rightQueryOffset = queryEnd - subjectEnd\n rightQuery = query[-rightQueryOffset:]\n middleQuery += rightQuery\n\n read = leftQuery + middleQuery\n\n # do part 3)\n offset = len(subject) - len(read)\n # if the read is sticking out to the right\n # chop it off\n if offset < 0:\n read = read[:offset]\n # if it's not sticking out, fill the space with ' '\n elif offset > 0:\n read += offset * ' '\n\n # compare the subject and the read, make a string\n # called 'comparison', which contains a '.' if the bases\n # are equal and the letter of the read if they are not.\n comparison = ''\n for readBase, subjectBase in zip(read, subject):\n if readBase == ' ':\n comparison += ' '\n elif readBase == subjectBase:\n comparison += '.'\n elif readBase != subjectBase:\n comparison += readBase\n index += 1\n que = '%s \\t %s' % (queryTitle, comparison)\n result.append(que)\n\n # sanity checks\n assert (len(comparison) == len(subject)), (\n '%d != %d' % (len(comparison), len(subject)))\n\n index = 0\n if comparison[index] == ' ':\n index += 1\n else:\n start = index - 1\n assert (start == queryStart or start == -1), (\n '%s != %s or %s != -1' % (start, queryStart, start))\n\n return result\n\n\ndef getAPOBECFrequencies(dotAlignment, orig, new, pattern):\n \"\"\"\n Gets mutation frequencies if they are in a certain pattern.\n\n @param dotAlignment: result from calling basePlotter\n @param orig: A C{str}, naming the original base\n @param new: A C{str}, what orig was mutated to\n @param pattern: A C{str}m which pattern we're looking for\n (must be one of 'cPattern', 'tPattern')\n \"\"\"\n cPattern = ['ACA', 'ACC', 'ACG', 'ACT', 'CCA', 'CCC', 'CCG', 'CCT',\n 'GCA', 'GCC', 'GCG', 'GCT', 'TCA', 'TCC', 'TCG', 'TCT']\n tPattern = ['ATA', 'ATC', 'ATG', 'ATT', 'CTA', 'CTC', 'CTG', 'CTT',\n 'GTA', 'GTC', 'GTG', 'GTT', 'TTA', 'TTC', 'TTG', 'TTT']\n # choose the right pattern\n if pattern == 'cPattern':\n patterns = cPattern\n middleBase = 'C'\n else:\n patterns = tPattern\n middleBase = 'T'\n # generate the freqs dict with the right pattern\n freqs = defaultdict(int)\n for pattern in patterns:\n freqs[pattern] = 0\n # get the subject sequence from dotAlignment\n subject = dotAlignment[0].split('\\t')[3]\n # exclude the subject from the dotAlignment, so just the queries\n # are left over\n queries = dotAlignment[1:]\n for item in queries:\n query = item.split('\\t')[1]\n index = 0\n for queryBase in query:\n qBase = query[index]\n sBase = subject[index]\n if qBase == new and sBase == orig:\n try:\n plusSb = subject[index + 1]\n minusSb = subject[index - 1]\n except IndexError:\n plusSb = 'end'\n motif = '%s%s%s' % (minusSb, middleBase, plusSb)\n if motif in freqs:\n freqs[motif] += 1\n index += 1\n\n return freqs\n\n\ndef getCompleteFreqs(blastHits):\n \"\"\"\n Make a dictionary which collects all mutation frequencies from\n all reads.\n Calls basePlotter to get dotAlignment, which is passed to\n getAPOBECFrequencies with the respective parameter, to collect\n the frequencies.\n\n @param blastHits: A L{dark.blast.BlastHits} instance.\n \"\"\"\n allFreqs = {}\n for title in blastHits.titles:\n allFreqs[title] = {\n 'C>A': {},\n 'C>G': {},\n 'C>T': {},\n 'T>A': {},\n 'T>C': {},\n 'T>G': {},\n }\n basesPlotted = basePlotter(blastHits, title)\n for mutation in allFreqs[title]:\n orig = mutation[0]\n new = mutation[2]\n if orig == 'C':\n pattern = 'cPattern'\n else:\n pattern = 'tPattern'\n freqs = getAPOBECFrequencies(basesPlotted, orig, new, pattern)\n allFreqs[title][mutation] = freqs\n numberOfReads = len(blastHits.titles[title]['plotInfo']['items'])\n allFreqs[title]['numberOfReads'] = numberOfReads\n allFreqs[title]['bitScoreMax'] = blastHits.titles[\n title]['plotInfo']['bitScoreMax']\n return allFreqs\n\n\ndef makeFrequencyGraph(allFreqs, title, substitution, pattern,\n color='blue', createFigure=True, showFigure=True,\n readsAx=False):\n \"\"\"\n For a title, make a graph showing the frequencies.\n\n @param allFreqs: result from getCompleteFreqs\n @param title: A C{str}, title of virus of which frequencies should be\n plotted.\n @param substitution: A C{str}, which substitution should be plotted;\n must be one of 'C>A', 'C>G', 'C>T', 'T>A', 'T>C', 'T>G'.\n @param pattern: A C{str}, which pattern we're looking for ( must be\n one of 'cPattern', 'tPattern')\n @param color: A C{str}, color of bars.\n @param createFigure: If C{True}, create a figure.\n @param showFigure: If C{True}, show the created figure.\n @param readsAx: If not None, use this as the subplot for displaying reads.\n \"\"\"\n cPattern = ['ACA', 'ACC', 'ACG', 'ACT', 'CCA', 'CCC', 'CCG', 'CCT',\n 'GCA', 'GCC', 'GCG', 'GCT', 'TCA', 'TCC', 'TCG', 'TCT']\n tPattern = ['ATA', 'ATC', 'ATG', 'ATT', 'CTA', 'CTC', 'CTG', 'CTT',\n 'GTA', 'GTC', 'GTG', 'GTT', 'TTA', 'TTC', 'TTG', 'TTT']\n\n # choose the right pattern\n if pattern == 'cPattern':\n patterns = cPattern\n else:\n patterns = tPattern\n\n fig = plt.figure(figsize=(10, 10))\n ax = readsAx or fig.add_subplot(111)\n # how many bars\n N = 16\n ind = np.arange(N)\n width = 0.4\n # make a list in the right order, so that it can be plotted easily\n divisor = allFreqs[title]['numberOfReads']\n toPlot = allFreqs[title][substitution]\n index = 0\n data = []\n for item in patterns:\n newData = toPlot[patterns[index]] / divisor\n data.append(newData)\n index += 1\n # create the bars\n ax.bar(ind, data, width, color=color)\n maxY = np.max(data) + 5\n # axes and labels\n if createFigure:\n title = title.split('|')[4][:50]\n ax.set_title('%s \\n %s' % (title, substitution), fontsize=20)\n ax.set_ylim(0, maxY)\n ax.set_ylabel('Absolute Number of Mutations', fontsize=16)\n ax.set_xticks(ind + width)\n ax.set_xticklabels(patterns, rotation=45, fontsize=8)\n if createFigure is False:\n ax.set_xticks(ind + width)\n ax.set_xticklabels(patterns, rotation=45, fontsize=0)\n else:\n if showFigure:\n plt.show()\n return maxY\n\n\ndef makeFrequencyPanel(allFreqs, patientName):\n \"\"\"\n For a title, make a graph showing the frequencies.\n\n @param allFreqs: result from getCompleteFreqs\n @param patientName: A C{str}, title for the panel\n \"\"\"\n titles = sorted(\n iter(allFreqs.keys()),\n key=lambda title: (allFreqs[title]['bitScoreMax'], title))\n\n origMaxY = 0\n cols = 6\n rows = len(allFreqs)\n figure, ax = plt.subplots(rows, cols, squeeze=False)\n substitutions = ['C>A', 'C>G', 'C>T', 'T>A', 'T>C', 'T>G']\n colors = ['blue', 'black', 'red', 'yellow', 'green', 'orange']\n\n for i, title in enumerate(titles):\n for index in range(6):\n for subst in allFreqs[str(title)]:\n substitution = substitutions[index]\n print(i, index, title, 'substitution', substitutions[index])\n if substitution[0] == 'C':\n pattern = 'cPattern'\n else:\n pattern = 'tPattern'\n maxY = makeFrequencyGraph(allFreqs, title, substitution,\n pattern, color=colors[index],\n createFigure=False, showFigure=False,\n readsAx=ax[i][index])\n if maxY > origMaxY:\n origMaxY = maxY\n\n # add title for individual plot.\n # if used for other viruses, this will have to be adapted.\n if index == 0:\n gi = title.split('|')[1]\n titles = title.split(' ')\n try:\n typeIndex = titles.index('type')\n except ValueError:\n typeNumber = 'gi: %s' % gi\n else:\n typeNumber = titles[typeIndex + 1]\n\n ax[i][index].set_ylabel(('Type %s \\n maxBitScore: %s' % (\n typeNumber, allFreqs[title]['bitScoreMax'])), fontsize=10)\n # add xAxis tick labels\n if i == 0:\n ax[i][index].set_title(substitution, fontsize=13)\n if i == len(allFreqs) - 1 or i == (len(allFreqs) - 1) / 2:\n if index < 3:\n pat = ['ACA', 'ACC', 'ACG', 'ACT', 'CCA', 'CCC', 'CCG',\n 'CCT', 'GCA', 'GCC', 'GCG', 'GCT', 'TCA', 'TCC',\n 'TCG', 'TCT']\n else:\n pat = ['ATA', 'ATC', 'ATG', 'ATT', 'CTA', 'CTC', 'CTG',\n 'CTT', 'GTA', 'GTC', 'GTG', 'GTT', 'TTA', 'TTC',\n 'TTG', 'TTT']\n ax[i][index].set_xticklabels(pat, rotation=45, fontsize=8)\n\n # make Y-axis equal\n for i, title in enumerate(allFreqs):\n for index in range(6):\n a = ax[i][index]\n a.set_ylim([0, origMaxY])\n # add title of whole panel\n figure.suptitle('Mutation Signatures in %s' % patientName, fontsize=20)\n figure.set_size_inches(5 * cols, 3 * rows, forward=True)\n figure.show()\n\n return allFreqs\n\n\ndef mutateString(original, n, replacements='acgt'):\n \"\"\"\n Mutate C{original} in C{n} places with chars chosen from C{replacements}.\n\n @param original: The original C{str} to mutate.\n @param n: The C{int} number of locations to mutate.\n @param replacements: The C{str} of replacement letters.\n\n @return: A new C{str} with C{n} places of C{original} mutated.\n @raises ValueError: if C{n} is too high, or C{replacement} contains\n duplicates, or if no replacement can be made at a certain locus\n because C{replacements} is of length one, or if C{original} is of\n zero length.\n \"\"\"\n if not original:\n raise ValueError('Empty original string passed.')\n\n if n > len(original):\n raise ValueError('Cannot make %d mutations in a string of length %d' %\n (n, len(original)))\n\n if len(replacements) != len(set(replacements)):\n raise ValueError('Replacement string contains duplicates')\n\n if len(replacements) == 1 and original.find(replacements) != -1:\n raise ValueError('Impossible replacement')\n\n result = list(original)\n length = len(original)\n\n for offset in range(length):\n if uniform(0.0, 1.0) < float(n) / (length - offset):\n # Mutate.\n while True:\n new = choice(replacements)\n if new != result[offset]:\n result[offset] = new\n break\n n -= 1\n if n == 0:\n break\n\n return ''.join(result)\n"
] |
[
[
"numpy.arange",
"matplotlib.use",
"matplotlib.pyplot.subplots",
"numpy.max",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
space-physics/transcar
|
[
"a9305bd29723beb45004a8882627fa518d8a1bb6"
] |
[
"tests/test_all.py"
] |
[
"#!/usr/bin/env python\nimport pandas\nfrom pathlib import Path\nimport pytest\nfrom pytest import approx\nimport transcar.base as transcar\nimport transcarread as tr\n\nroot = Path(__file__).parents[1]\nbeam = \"beam947.2\"\nrefdir = root / \"tests\" / beam\nkinfn = \"dir.output/emissions.dat\"\n\n\ndef test_run_transcar(tmp_path):\n\n odir = tmp_path\n\n params = {\"rodir\": odir, \"Q0\": 70114000000.0, \"msgfn\": \"transcar.log\", \"errfn\": \"transcarError.log\"}\n\n beams = pandas.read_csv(root / \"tests/test_E1E2prev.csv\", header=None, names=[\"E1\", \"E2\", \"pr1\", \"pr2\"]).squeeze()\n\n transcar.mono_beam_arbiter(beams, params)\n\n refexc = tr.ExcitationRates(refdir / kinfn)\n\n exc = tr.ExcitationRates(odir / beam / kinfn)\n\n ind = [[1, 12, 5], [0, 62, 8]]\n\n for i in ind:\n assert refexc[i[0], i[1], i[2]].values == approx(exc[i[0], i[1], i[2]].values, rel=1e-3)\n\n assert refexc.time.shape == refexc.time.shape, \"did you rerun the test without clearing the output directory first?\"\n assert (refexc.time == exc.time).all(), \"simulation time of current run did not match reference run\"\n\n\nif __name__ == \"__main__\":\n pytest.main([\"-x\", __file__])\n"
] |
[
[
"pandas.read_csv"
]
] |
lokax/duckdb
|
[
"c2581dfebccaebae9468c924c2c722fcf0306944"
] |
[
"tools/pythonpkg/tests/fast/arrow/test_filter_pushdown.py"
] |
[
"import duckdb\nimport os\nimport pytest\nimport tempfile\ntry:\n import pyarrow as pa\n import pyarrow.parquet as pq\n import pyarrow.dataset as ds\n import numpy as np\n import pandas as pd\n import re\n can_run = True\nexcept:\n can_run = False\n\n## DuckDB connection used in this test\nduckdb_conn = duckdb.connect()\n\ndef numeric_operators(data_type, tbl_name):\n duckdb_conn.execute(\"CREATE TABLE \" +tbl_name+ \" (a \"+data_type+\", b \"+data_type+\", c \"+data_type+\")\")\n duckdb_conn.execute(\"INSERT INTO \" +tbl_name+ \" VALUES (1,1,1),(10,10,10),(100,10,100),(NULL,NULL,NULL)\")\n duck_tbl = duckdb_conn.table(tbl_name)\n arrow_table = duck_tbl.arrow()\n print (arrow_table)\n\n duckdb_conn.register(\"testarrow\",arrow_table)\n # Try ==\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a =1\").fetchone()[0] == 1\n # Try >\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a >1\").fetchone()[0] == 2\n # Try >=\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a >=10\").fetchone()[0] == 2\n # Try <\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a <10\").fetchone()[0] == 1\n # Try <=\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a <=10\").fetchone()[0] == 2\n\n # Try Is Null\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a IS NULL\").fetchone()[0] == 1\n # Try Is Not Null\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a IS NOT NULL\").fetchone()[0] == 3\n\n # Try And\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a=10 and b =1\").fetchone()[0] == 0\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a =100 and b = 10 and c = 100\").fetchone()[0] == 1\n\n # Try Or\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a = 100 or b =1\").fetchone()[0] == 2\n\n duckdb_conn.execute(\"EXPLAIN SELECT count(*) from testarrow where a = 100 or b =1\")\n print(duckdb_conn.fetchall())\n\n\ndef numeric_check_or_pushdown(tbl_name):\n duck_tbl = duckdb_conn.table(tbl_name)\n arrow_table = duck_tbl.arrow()\n\n arrow_tbl_name = \"testarrow_\" + tbl_name\n duckdb_conn.register(arrow_tbl_name ,arrow_table)\n\n # Multiple column in the root OR node, don't push down\n query_res = duckdb_conn.execute(\"EXPLAIN SELECT * FROM \" +arrow_tbl_name+ \" WHERE a=1 OR b=2 AND (a>3 OR b<5)\").fetchall()\n match = re.search(\".*ARROW_SCAN.*Filters:.*\", query_res[0][1])\n assert not match\n\n # Single column in the root OR node\n query_res = duckdb_conn.execute(\"EXPLAIN SELECT * FROM \" +arrow_tbl_name+ \" WHERE a=1 OR a=10\").fetchall()\n match = re.search(\".*ARROW_SCAN.*Filters: a=1 OR a=10.*|$\", query_res[0][1])\n assert match\n\n # Single column + root OR node with AND\n query_res = duckdb_conn.execute(\"EXPLAIN SELECT * FROM \" +arrow_tbl_name+ \" WHERE a=1 OR (a>3 AND a<5)\").fetchall()\n match = re.search(\".*ARROW_SCAN.*Filters: a=1 OR a>3 AND a<5.*|$\", query_res[0][1])\n assert match\n\n # Single column multiple ORs\n query_res = duckdb_conn.execute(\"EXPLAIN SELECT * FROM \" +arrow_tbl_name+ \" WHERE a=1 OR a>3 OR a<5\").fetchall()\n match = re.search(\".*ARROW_SCAN.*Filters: a=1 OR a>3 OR a<5.*|$\", query_res[0][1])\n assert match\n\n # Testing not equal\n query_res = duckdb_conn.execute(\"EXPLAIN SELECT * FROM \" +arrow_tbl_name+ \" WHERE a!=1 OR a>3 OR a<2\").fetchall()\n match = re.search(\".*ARROW_SCAN.*Filters: a!=1 OR a>3 OR a<2.*|$\", query_res[0][1])\n assert match\n\n # Multiple OR filters connected with ANDs\n query_res = duckdb_conn.execute(\"EXPLAIN SELECT * FROM \" +arrow_tbl_name+ \" WHERE (a<2 OR a>3) AND (a=1 OR a=4) AND (b=1 OR b<5)\").fetchall()\n match = re.search(\".*ARROW_SCAN.*Filters: a<2 OR a>3 AND a=1|\\n.*OR a=4.*\\n.*b=2 OR b<5.*|$\", query_res[0][1])\n assert match\n\n\ndef string_check_or_pushdown(tbl_name):\n duck_tbl = duckdb_conn.table(tbl_name)\n arrow_table = duck_tbl.arrow()\n\n arrow_tbl_name = \"testarrow_varchar\"\n duckdb_conn.register(arrow_tbl_name ,arrow_table)\n\n # Check string zonemap\n query_res = duckdb_conn.execute(\"EXPLAIN SELECT * FROM \" +arrow_tbl_name+ \" WHERE a>='1' OR a<='10'\").fetchall()\n match = re.search(\".*ARROW_SCAN.*Filters: a>=1 OR a<=10.*|$\", query_res[0][1])\n assert match\n\n # No support for OR with is null\n query_res = duckdb_conn.execute(\"EXPLAIN SELECT * FROM \" +arrow_tbl_name+ \" WHERE a IS NULL or a='1'\").fetchall()\n match = re.search(\".*ARROW_SCAN.*Filters:.*\", query_res[0][1])\n assert not match\n\n # No support for OR with is not null\n query_res = duckdb_conn.execute(\"EXPLAIN SELECT * FROM \" +arrow_tbl_name+ \" WHERE a IS NOT NULL OR a='1'\").fetchall()\n match = re.search(\".*ARROW_SCAN.*Filters:.*\", query_res[0][1])\n assert not match\n\n # OR with the like operator\n query_res = duckdb_conn.execute(\"EXPLAIN SELECT * FROM \" +arrow_tbl_name+ \" WHERE a=1 OR a LIKE '10%'\").fetchall()\n match = re.search(\".*ARROW_SCAN.*Filters:.*\", query_res[0][1])\n assert not match\n\n\nclass TestArrowFilterPushdown(object):\n def test_filter_pushdown_numeric(self,duckdb_cursor):\n if not can_run:\n return\n\n numeric_types = ['TINYINT', 'SMALLINT', 'INTEGER', 'BIGINT', 'UTINYINT', 'USMALLINT', 'UINTEGER', 'UBIGINT',\n 'FLOAT', 'DOUBLE', 'HUGEINT']\n for data_type in numeric_types:\n tbl_name = \"test_\" + data_type\n numeric_operators(data_type, tbl_name)\n numeric_check_or_pushdown(tbl_name)\n\n def test_filter_pushdown_decimal(self,duckdb_cursor):\n if not can_run:\n return\n numeric_types = {'DECIMAL(4,1)': 'test_decimal_4_1', 'DECIMAL(9,1)': 'test_decimal_9_1',\n 'DECIMAL(18,4)': 'test_decimal_18_4','DECIMAL(30,12)': 'test_decimal_30_12'}\n for data_type in numeric_types:\n tbl_name = numeric_types[data_type]\n numeric_operators(data_type, tbl_name)\n numeric_check_or_pushdown(tbl_name)\n\n def test_filter_pushdown_varchar(self,duckdb_cursor):\n if not can_run:\n return\n duckdb_conn.execute(\"CREATE TABLE test_varchar (a VARCHAR, b VARCHAR, c VARCHAR)\")\n duckdb_conn.execute(\"INSERT INTO test_varchar VALUES ('1','1','1'),('10','10','10'),('100','10','100'),(NULL,NULL,NULL)\")\n duck_tbl = duckdb_conn.table(\"test_varchar\")\n arrow_table = duck_tbl.arrow()\n\n duckdb_conn.register(\"testarrow\",arrow_table)\n # Try ==\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a ='1'\").fetchone()[0] == 1\n # Try >\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a >'1'\").fetchone()[0] == 2\n # Try >=\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a >='10'\").fetchone()[0] == 2\n # Try <\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a <'10'\").fetchone()[0] == 1\n # Try <=\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a <='10'\").fetchone()[0] == 2\n\n # Try Is Null\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a IS NULL\").fetchone()[0] == 1\n # Try Is Not Null\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a IS NOT NULL\").fetchone()[0] == 3\n\n # Try And\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a='10' and b ='1'\").fetchone()[0] == 0\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a ='100' and b = '10' and c = '100'\").fetchone()[0] == 1\n # Try Or\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a = '100' or b ='1'\").fetchone()[0] == 2\n\n # More complex tests for OR pushed down on string\n string_check_or_pushdown(\"test_varchar\")\n\n\n def test_filter_pushdown_bool(self,duckdb_cursor):\n if not can_run:\n return\n duckdb_conn.execute(\"CREATE TABLE test_bool (a BOOL, b BOOL)\")\n duckdb_conn.execute(\"INSERT INTO test_bool VALUES (TRUE,TRUE),(TRUE,FALSE),(FALSE,TRUE),(NULL,NULL)\")\n duck_tbl = duckdb_conn.table(\"test_bool\")\n arrow_table = duck_tbl.arrow()\n\n duckdb_conn.register(\"testarrow\",arrow_table)\n # Try ==\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a =True\").fetchone()[0] == 2\n\n # Try Is Null\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a IS NULL\").fetchone()[0] == 1\n # Try Is Not Null\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a IS NOT NULL\").fetchone()[0] == 3\n\n # Try And\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a=True and b =True\").fetchone()[0] == 1\n # Try Or\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a = True or b =True\").fetchone()[0] == 3\n\n def test_filter_pushdown_time(self,duckdb_cursor):\n if not can_run:\n return\n duckdb_conn.execute(\"CREATE TABLE test_time (a TIME, b TIME, c TIME)\")\n duckdb_conn.execute(\"INSERT INTO test_time VALUES ('00:01:00','00:01:00','00:01:00'),('00:10:00','00:10:00','00:10:00'),('01:00:00','00:10:00','01:00:00'),(NULL,NULL,NULL)\")\n duck_tbl = duckdb_conn.table(\"test_time\")\n arrow_table = duck_tbl.arrow()\n\n duckdb_conn.register(\"testarrow\",arrow_table)\n # Try ==\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a ='00:01:00'\").fetchone()[0] == 1\n # Try >\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a >'00:01:00'\").fetchone()[0] == 2\n # Try >=\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a >='00:10:00'\").fetchone()[0] == 2\n # Try <\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a <'00:10:00'\").fetchone()[0] == 1\n # Try <=\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a <='00:10:00'\").fetchone()[0] == 2\n\n # Try Is Null\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a IS NULL\").fetchone()[0] == 1\n # Try Is Not Null\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a IS NOT NULL\").fetchone()[0] == 3\n\n # Try And\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a='00:10:00' and b ='00:01:00'\").fetchone()[0] == 0\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a ='01:00:00' and b = '00:10:00' and c = '01:00:00'\").fetchone()[0] == 1\n # Try Or\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a = '01:00:00' or b ='00:01:00'\").fetchone()[0] == 2\n\n def test_filter_pushdown_timestamp(self,duckdb_cursor):\n if not can_run:\n return\n duckdb_conn.execute(\"CREATE TABLE test_timestamp (a TIMESTAMP, b TIMESTAMP, c TIMESTAMP)\")\n duckdb_conn.execute(\"INSERT INTO test_timestamp VALUES ('2008-01-01 00:00:01','2008-01-01 00:00:01','2008-01-01 00:00:01'),('2010-01-01 10:00:01','2010-01-01 10:00:01','2010-01-01 10:00:01'),('2020-03-01 10:00:01','2010-01-01 10:00:01','2020-03-01 10:00:01'),(NULL,NULL,NULL)\")\n duck_tbl = duckdb_conn.table(\"test_timestamp\")\n arrow_table = duck_tbl.arrow()\n print (arrow_table)\n\n duckdb_conn.register(\"testarrow\",arrow_table)\n # Try ==\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a ='2008-01-01 00:00:01'\").fetchone()[0] == 1\n # Try >\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a >'2008-01-01 00:00:01'\").fetchone()[0] == 2\n # Try >=\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a >='2010-01-01 10:00:01'\").fetchone()[0] == 2\n # Try <\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a <'2010-01-01 10:00:01'\").fetchone()[0] == 1\n # Try <=\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a <='2010-01-01 10:00:01'\").fetchone()[0] == 2\n\n # Try Is Null\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a IS NULL\").fetchone()[0] == 1\n # Try Is Not Null\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a IS NOT NULL\").fetchone()[0] == 3\n\n # Try And\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a='2010-01-01 10:00:01' and b ='2008-01-01 00:00:01'\").fetchone()[0] == 0\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a ='2020-03-01 10:00:01' and b = '2010-01-01 10:00:01' and c = '2020-03-01 10:00:01'\").fetchone()[0] == 1\n # Try Or\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a = '2020-03-01 10:00:01' or b ='2008-01-01 00:00:01'\").fetchone()[0] == 2\n\n def test_filter_pushdown_timestamp_TZ(self,duckdb_cursor):\n if not can_run:\n return\n duckdb_conn.execute(\"CREATE TABLE test_timestamptz (a TIMESTAMPTZ, b TIMESTAMPTZ, c TIMESTAMPTZ)\")\n duckdb_conn.execute(\"INSERT INTO test_timestamptz VALUES ('2008-01-01 00:00:01','2008-01-01 00:00:01','2008-01-01 00:00:01'),('2010-01-01 10:00:01','2010-01-01 10:00:01','2010-01-01 10:00:01'),('2020-03-01 10:00:01','2010-01-01 10:00:01','2020-03-01 10:00:01'),(NULL,NULL,NULL)\")\n duck_tbl = duckdb_conn.table(\"test_timestamptz\")\n arrow_table = duck_tbl.arrow()\n print (arrow_table)\n\n duckdb_conn.register(\"testarrow\",arrow_table)\n # Try ==\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a ='2008-01-01 00:00:01'\").fetchone()[0] == 1\n # Try >\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a >'2008-01-01 00:00:01'\").fetchone()[0] == 2\n # Try >=\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a >='2010-01-01 10:00:01'\").fetchone()[0] == 2\n # Try <\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a <'2010-01-01 10:00:01'\").fetchone()[0] == 1\n # Try <=\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a <='2010-01-01 10:00:01'\").fetchone()[0] == 2\n\n # Try Is Null\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a IS NULL\").fetchone()[0] == 1\n # Try Is Not Null\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a IS NOT NULL\").fetchone()[0] == 3\n\n # Try And\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a='2010-01-01 10:00:01' and b ='2008-01-01 00:00:01'\").fetchone()[0] == 0\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a ='2020-03-01 10:00:01' and b = '2010-01-01 10:00:01' and c = '2020-03-01 10:00:01'\").fetchone()[0] == 1\n # Try Or\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a = '2020-03-01 10:00:01' or b ='2008-01-01 00:00:01'\").fetchone()[0] == 2\n\n\n def test_filter_pushdown_date(self,duckdb_cursor):\n if not can_run:\n return\n duckdb_conn.execute(\"CREATE TABLE test_date (a DATE, b DATE, c DATE)\")\n duckdb_conn.execute(\"INSERT INTO test_date VALUES ('2000-01-01','2000-01-01','2000-01-01'),('2000-10-01','2000-10-01','2000-10-01'),('2010-01-01','2000-10-01','2010-01-01'),(NULL,NULL,NULL)\")\n duck_tbl = duckdb_conn.table(\"test_date\")\n arrow_table = duck_tbl.arrow()\n\n duckdb_conn.register(\"testarrow\",arrow_table)\n # Try ==\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a ='2000-01-01'\").fetchone()[0] == 1\n # Try >\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a >'2000-01-01'\").fetchone()[0] == 2\n # Try >=\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a >='2000-10-01'\").fetchone()[0] == 2\n # Try <\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a <'2000-10-01'\").fetchone()[0] == 1\n # Try <=\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a <='2000-10-01'\").fetchone()[0] == 2\n\n # Try Is Null\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a IS NULL\").fetchone()[0] == 1\n # Try Is Not Null\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a IS NOT NULL\").fetchone()[0] == 3\n\n # Try And\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a='2000-10-01' and b ='2000-01-01'\").fetchone()[0] == 0\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a ='2010-01-01' and b = '2000-10-01' and c = '2010-01-01'\").fetchone()[0] == 1\n # Try Or\n assert duckdb_conn.execute(\"SELECT count(*) from testarrow where a = '2010-01-01' or b ='2000-01-01'\").fetchone()[0] == 2\n\n\n def test_filter_pushdown_no_projection(self,duckdb_cursor):\n if not can_run:\n return\n duckdb_conn.execute(\"CREATE TABLE test_int (a INTEGER, b INTEGER, c INTEGER)\")\n duckdb_conn.execute(\"INSERT INTO test_int VALUES (1,1,1),(10,10,10),(100,10,100),(NULL,NULL,NULL)\")\n duck_tbl = duckdb_conn.table(\"test_int\")\n arrow_table = duck_tbl.arrow()\n duckdb_conn.register(\"testarrowtable\",arrow_table)\n assert duckdb_conn.execute(\"SELECT * FROM testarrowtable VALUES where a =1\").fetchall() == [(1, 1, 1)]\n arrow_dataset = ds.dataset(arrow_table)\n duckdb_conn.register(\"testarrowdataset\",arrow_dataset)\n assert duckdb_conn.execute(\"SELECT * FROM testarrowdataset VALUES where a =1\").fetchall() == [(1, 1, 1)]\n\n def test_filter_pushdown_2145(self,duckdb_cursor):\n if not can_run:\n return\n\n date1 = pd.date_range(\"2018-01-01\", \"2018-12-31\", freq=\"B\")\n df1 = pd.DataFrame(np.random.randn(date1.shape[0], 5), columns=list(\"ABCDE\"))\n df1[\"date\"] = date1\n\n date2 = pd.date_range(\"2019-01-01\", \"2019-12-31\", freq=\"B\")\n df2 = pd.DataFrame(np.random.randn(date2.shape[0], 5), columns=list(\"ABCDE\"))\n df2[\"date\"] = date2\n\n pq.write_table(pa.table(df1), \"data1.parquet\")\n pq.write_table(pa.table(df2), \"data2.parquet\")\n\n table = pq.ParquetDataset([\"data1.parquet\", \"data2.parquet\"]).read()\n\n con = duckdb.connect()\n con.register(\"testarrow\",table)\n\n output_df = duckdb.arrow(table).filter(\"date > '2019-01-01'\").df()\n expected_df = duckdb.from_parquet(\"data*.parquet\").filter(\"date > '2019-01-01'\").df()\n pd.testing.assert_frame_equal(expected_df, output_df)\n\n os.remove(\"data1.parquet\")\n os.remove(\"data2.parquet\")\n"
] |
[
[
"numpy.random.randn",
"pandas.testing.assert_frame_equal",
"pandas.date_range"
]
] |
sanyabt/napdi-kg
|
[
"fa2f1ac0da13fceb88fd99a5832e0be5c57f88a1"
] |
[
"machine_read_scripts/reachJSONextraction.py"
] |
[
"import os, sys\nfrom indra.statements import stmts_from_json_file, stmts_to_json_file\nimport pandas as pd\nfrom indra.ontology.bio import bio_ontology\nimport indra.tools.assemble_corpus as ac\nimport re\nimport pickle\nimport requests\nfrom datetime import datetime, timedelta\nimport time\nfrom indra.sources import reach\n#####Potentially can use iDISK terminology for mapping here\n\npredicates_exclude = ['Agent', 'Conversion', 'Complex', 'SelfModification', 'ActiveForm' 'Gef', 'Gap', 'Autophosphorylation', 'Translocation', 'Transphosphorylation']\nworkingDir = os.getcwd()\nreachDir = workingDir + '/output_files/'\nreach_file = sys.argv[1]\nlogDir = workingDir + '/logs/'\noutputDir = reachDir + 'greentea/'\nfile_umls = workingDir + '/output_files/umls_dict_20211004.pickle'\nsave_umls = True\nlogging = True\n\ncount_dict = {\n\t'n_statements': 0,\n\t'n_statements_extracted': 0,\n\t'n_entities_total': 0,\n\t'n_entities_mapped': 0\n}\nassemble = True\npub_year_to_pmid_map = {}\n\n'''\nSteps to be modified based on use case and required output and processing\n'''\n#Also figure out how to include heuristics in map_grounding: \n#https://indra.readthedocs.io/en/latest/modules/tools/index.html?highlight=run_preassembly#indra.tools.assemble_corpus.map_grounding\ndef run_assembly_pipeline(statements):\n\t#statements = ac.filter_grounded_only(statements) # Filter out ungrounded agents\n\tstatements = ac.filter_no_hypothesis(statements) # Filter out hypothetical statements\n\tstatements = ac.map_grounding(statements, gilda_mode='local') # Map grounding\n\t#statements = ac.filter_human_only(statements) # Filter out non-human genes\n\t#statements = ac.map_sequence(statements) # Map sequence\n\tstatements = ac.run_preassembly(statements,\n\t\treturn_toplevel=False,\n\t\tontology=bio_ontology,\n\t\tnormalize_equivalences=True, # Optional: rewrite equivalent groundings to one standard\n\t\tnormalize_opposites=True, # Optional: rewrite opposite groundings to one standard\n\t\tnormalize_ns='OBO') # WM = world modelers, OBO = Bio_Ontology\n\t#make this true later\n\t#statements = ac.filter_belief(statements, 0.8) # Apply belief cutoff of 0.8\n\treturn statements\n\ndef get_publication_year(pmid):\n\tif pmid == '':\n\t\treturn ''\n\tif pmid in pub_year_to_pmid_map:\n\t\tif pub_year_to_pmid_map[pmid] != '':\n\t\t\treturn pub_year_to_pmid_map[pmid]\n\ttime.sleep(5)\n\turi = \"https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esummary.fcgi?db=pubmed&id=\"+pmid+\"&retmode=json\"\n\tpub_year = ''\n\tresponse = requests.get(uri)\n\tif response.status_code == 429:\n\t\ttime.sleep(5)\n\t\tresponse = requests.get(uri)\n\tif response.status_code == 200:\n\t\tresult = response.json()\n\t\tpub_year = result['result'][pmid]['pubdate']\n\t\tpub_year_to_pmid_map[pmid] = pub_year\n\treturn pub_year\n\ndef get_umls_concepts(subject_source, object_source, umls_dict, subj_mapped, obj_mapped):\n\tcount_dict['n_entities_total'] += 2\n\n#(replace spaces with underscores of preferred names in the UMLS)\n\tsubject_dict = {}\n\tobject_dict = {}\n\tif subject_source in umls_dict:\n\t\tsubject_dict['cui'] = umls_dict[subject_source]['cui']\n\t\tname = umls_dict[subject_source]['umls_term']\n\t\tsubject_dict['name'] = \"_\".join(name.split())\n\t\tsubject_dict['type'] = umls_dict[subject_source]['sem_type']\n\t\tsubject_dict['score'] = umls_dict[subject_source]['score']\n\t\tcount_dict['n_entities_mapped'] += 1\n\telif subj_mapped in umls_dict:\n\t\tsubject_dict['cui'] = umls_dict[subj_mapped]['cui']\n\t\tname = umls_dict[subj_mapped]['umls_term']\n\t\tsubject_dict['name'] = \"_\".join(name.split())\n\t\tsubject_dict['type'] = umls_dict[subj_mapped]['sem_type']\n\t\tsubject_dict['score'] = umls_dict[subj_mapped]['score']\n\t\tcount_dict['n_entities_mapped'] += 1\n\telse:\n\t\tsubject_dict['cui'] = ''\n\t\tsubject_dict['name'] = ''\n\t\tsubject_dict['type'] = ''\n\t\tsubject_dict['score'] = ''\n\n\tif object_source in umls_dict:\n\t\tobject_dict['cui'] = umls_dict[object_source]['cui']\n\t\tname = umls_dict[object_source]['umls_term']\n\t\tobject_dict['name'] = \"_\".join(name.split())\n\t\tobject_dict['type'] = umls_dict[object_source]['sem_type']\n\t\tobject_dict['score'] = umls_dict[object_source]['score']\n\t\tcount_dict['n_entities_mapped'] += 1\n\telif obj_mapped in umls_dict:\n\t\tobject_dict['cui'] = umls_dict[obj_mapped]['cui']\n\t\tname = umls_dict[obj_mapped]['umls_term']\n\t\tobject_dict['name'] = \"_\".join(name.split())\n\t\tobject_dict['type'] = umls_dict[obj_mapped]['sem_type']\n\t\tobject_dict['score'] = umls_dict[obj_mapped]['score']\n\t\tcount_dict['n_entities_mapped'] += 1\n\telse:\n\t\tobject_dict['cui'] = ''\n\t\tobject_dict['name'] = ''\n\t\tobject_dict['type'] = ''\n\t\tobject_dict['score'] = '' \n\n\treturn subject_dict, object_dict\n\ndef extract_statements(file_path, umls_dict):\n\n\tresult_dict = {\n\t\t'seq': [],\n\t\t'pmid': [],\n\t\t'subject_source': [],\n\t\t'object_source': [],\n\t\t'belief': [],\n\t\t'predicate': [],\n\t\t'sentence': [],\n\t\t'subj_map_reach': [],\n\t\t'obj_map_reach': [],\n\t\t'year': [],\n\t\t'subject_cui': [],\n\t\t'object_cui': [],\n\t\t'subject_name': [],\n\t\t'object_name': [],\n\t\t'subject_type': [],\n\t\t'object_type': [],\n\t\t'subject_score': [],\n\t\t'object_score': [],\n\t\t'umls_flag': [],\n\t\t'subj_reach_grounding': [],\n\t\t'obj_reach_grounding': []\n\t}\n\n\tstmts = stmts_from_json_file(file_path)\n\tcount_dict['n_statements'] += len(stmts)\n\tseq = 0\n\tfor statement in stmts:\n\t\tpredicate = type(statement).__name__\n\t\tfields = dir(statement.evidence[0])\n\t\tif predicate not in predicates_exclude:\n\t\t\ttry:\n\t\t\t\tresult_dict['seq'].append(seq)\n\t\t\t\tif 'pmid' in fields:\n\t\t\t\t\tpmid = statement.evidence[0].pmid\n\t\t\t\telse:\n\t\t\t\t\tpmid = ''\n\t\t\t\tresult_dict['pmid'].append(pmid)\n\n\t\t\t\tif 'subj' in dir(statement):\n\t\t\t\t\tif statement.subj:\n\t\t\t\t\t\tsubject_source = str(statement.subj.name)\n\t\t\t\t\t\tif statement.subj.db_refs:\n\t\t\t\t\t\t\tif 'TEXT' in statement.subj.db_refs:\n\t\t\t\t\t\t\t\tsubject_source_text = str(statement.subj.db_refs['TEXT'])\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tsubject_source_text = subject_source\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tsubject_source_text = subject_source\n\t\t\t\t\telse:\n\t\t\t\t\t\tsubject_source = ''\n\t\t\t\t\t\tsubject_source_text = ''\n\t\t\t\t\tif statement.obj:\n\t\t\t\t\t\tobject_source = str(statement.obj.name)\n\t\t\t\t\t\tif statement.obj.db_refs:\n\t\t\t\t\t\t\tif 'TEXT' in statement.obj.db_refs:\n\t\t\t\t\t\t\t\tobject_source_text = str(statement.obj.db_refs['TEXT'])\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tobject_source_text = object_source\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tobject_source_text = object_source\n\t\t\t\t\telse:\n\t\t\t\t\t\tobject_source = ''\n\t\t\t\t\t\tobject_source_text = ''\n\t\t\t\telif 'enz' in dir(statement):\n\t\t\t\t\tif statement.enz:\n\t\t\t\t\t\tsubject_source = str(statement.enz.name)\n\t\t\t\t\t\tif statement.enz.db_refs:\n\t\t\t\t\t\t\tif 'TEXT' in statement.enz.db_refs:\n\t\t\t\t\t\t\t\tsubject_source_text = str(statement.enz.db_refs['TEXT'])\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tsubject_source_text = subject_source\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tsubject_source_text = subject_source\n\t\t\t\t\telse:\n\t\t\t\t\t\tsubject_source = ''\n\t\t\t\t\t\tsubject_source_text = ''\n\t\t\t\t\tif statement.sub:\n\t\t\t\t\t\tobject_source = str(statement.sub.name)\n\t\t\t\t\t\tif statement.sub.db_refs:\n\t\t\t\t\t\t\tif 'TEXT' in statement.sub.db_refs:\n\t\t\t\t\t\t\t\tobject_source_text = str(statement.sub.db_refs['TEXT'])\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tobject_source_text = object_source\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tobject_source_text = object_source\n\t\t\t\t\telse:\n\t\t\t\t\t\tobject_source = ''\n\t\t\t\t\t\tobject_source_text = ''\n\t\t\t\telse:\n\t\t\t\t\tsubject_source = ''\n\t\t\t\t\tobject_source = ''\n\t\t\t\t\tsubject_source_text = ''\n\t\t\t\t\tobject_source_text = ''\n\t\t\t\tsubject_source = re.sub(r'\\(|\\)', '', subject_source)\n\t\t\t\tsubject_source = re.sub(r'[^\\x00-\\x7F]+',' ', subject_source)\n\t\t\t\tobject_source = re.sub(r'\\(|\\)', '', object_source)\n\t\t\t\tobject_source = re.sub(r'[^\\x00-\\x7F]+',' ', object_source)\n\t\t\t\tsubject_source = subject_source.strip()\n\t\t\t\tobject_source = object_source.strip()\n\t\t\t\tresult_dict['subject_source'].append(subject_source)\n\t\t\t\tresult_dict['object_source'].append(object_source)\n\t\t\t\tresult_dict['belief'].append(statement.belief)\n\t\t\t\tresult_dict['predicate'].append(predicate)\n\t\t\t\tresult_dict['sentence'].append(statement.evidence[0].text)\n\n\t\t\t\t#query umls dictionary to get CUIs etc. Will save empty strings if unmapped\n\t\t\t\tif save_umls:\n\t\t\t\t\tsubject_source_text = re.sub(r'\\(|\\)', '', subject_source_text)\n\t\t\t\t\tsubject_source_text = re.sub(r'[^\\x00-\\x7F]+',' ', subject_source_text)\n\t\t\t\t\tobject_source_text = re.sub(r'\\(|\\)', '', object_source_text)\n\t\t\t\t\tobject_source_text = re.sub(r'[^\\x00-\\x7F]+',' ', object_source_text)\n\t\t\t\t\tsubject_source_text = subject_source_text.strip()\n\t\t\t\t\tobject_source_text = object_source_text.strip()\n\t\t\t\t\tsubj, obj = get_umls_concepts(subject_source_text, object_source_text, umls_dict, subject_source, object_source)\n\t\t\t\t\tresult_dict['subject_cui'].append(subj['cui'])\n\t\t\t\t\tresult_dict['subject_name'].append(subj['name'])\n\t\t\t\t\tresult_dict['subject_type'].append(subj['type'])\n\t\t\t\t\tresult_dict['subject_score'].append(subj['score'])\n\t\t\t\t\tresult_dict['object_cui'].append(obj['cui'])\n\t\t\t\t\tresult_dict['object_name'].append(obj['name'])\n\t\t\t\t\tresult_dict['object_type'].append(obj['type'])\n\t\t\t\t\tresult_dict['object_score'].append(obj['score'])\n\n\t\t\t\tif result_dict['subject_cui']:\n\t\t\t\t\tresult_dict['umls_flag'].append(1)\n\t\t\t\telse:\n\t\t\t\t\tresult_dict['umls_flag'].append(0)\n\n\t\t\t\tgroundings = statement.evidence[0].annotations['agents']['raw_grounding']\n\t\t\t\tif len(groundings):\n\t\t\t\t\tresult_dict['subj_map_reach'].append(groundings[0])\n\t\t\t\t\tif len(groundings) == 2:\n\t\t\t\t\t\tresult_dict['obj_map_reach'].append(groundings[1])\n\t\t\t\t\n\t\t\t\tagent_list = statement.agent_list()\n\t\t\t\tif len(agent_list):\n\t\t\t\t\tif agent_list[0]:\n\t\t\t\t\t\tresult_dict['subj_reach_grounding'].append(agent_list[0].get_grounding())\n\t\t\t\t\telse:\n\t\t\t\t\t\tresult_dict['subj_reach_grounding'].append('')\n\t\t\t\t\tif len(agent_list) == 2:\n\t\t\t\t\t\tif agent_list[1]:\n\t\t\t\t\t\t\tresult_dict['obj_reach_grounding'].append(agent_list[1].get_grounding())\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tresult_dict['obj_reach_grounding'].append('')\n\t\t\t\telse:\n\t\t\t\t\tresult_dict['subj_reach_grounding'].append('')\n\t\t\t\t\tresult_dict['obj_reach_grounding'].append('')\n\n\t\t\t\tpub_year = get_publication_year(pmid)\n\t\t\t\tresult_dict['year'].append(pub_year)\n\t\t\t\tseq += 1\n\n\t\t\t\tcount_dict['n_statements_extracted'] += 1\n\t\t\texcept Exception as e:\n\t\t\t\tprint(e)\n\n\treturn result_dict\n\nif __name__ == '__main__':\n\n\tt0=datetime.now()\n\tlog_file = open(logDir+'json_extract_log'+str(t0)+'.txt', 'a')\n\tlog_file.write('\\nStarting REACH files extraction')\n\n\tfile_d = open(file_umls, 'rb')\n\tumls_dict = pickle.load(file_d)\n\t#Option 1: use assembled file\n\tif '.' in reach_file:\n\t\treach_dict = extract_statements(reach_file, umls_dict)\n\t#Option 2: separate option for multiple files\n\telse:\n\t\treach_statements = []\n\t\treach_files = os.listdir(reach_file)\n\t\tfor file in reach_files:\n\t\t\tif file.endswith(\".json\"):\n\t\t\t\tpmid = file.split('_')[0]\n\t\t\t\tfilepath = reach_file+file\n\t\t\t\trp = reach.process_json_file(filepath, citation=pmid)\n\t\t\t\tif rp is not None:\n\t\t\t\t\treach_statements += rp.statements\n\t\treach_statements_assembled = run_assembly_pipeline(reach_statements)\n\t\toutJSONFname = reachDir + 'reach_output_assembly_all.json'\n\t\tstmts_to_json_file(reach_statements_assembled, outJSONFname)\n\t\treach_dict = extract_statements(outJSONFname, umls_dict)\n\n\treach_result = pd.DataFrame(data=reach_dict)\n\treach_result.to_csv(outputDir+'greentea_pmid_all_predicates_umls_20220112.tsv', sep='\\t', index=False,\n\t\t\t\t\tcolumns=['seq', 'pmid', 'subject_cui', 'subject_name', 'subject_type', 'subject_source', 'subj_map_reach',\n\t\t\t\t\t'predicate', 'object_source', 'object_cui', 'object_name', 'object_type', 'obj_map_reach', 'belief',\n\t\t\t\t\t'sentence', 'year', 'subject_score', 'object_score', 'umls_flag', 'subj_reach_grounding', 'obj_reach_grounding'])\n\tt1=datetime.now()\n\tseconds=timedelta.total_seconds(t1-t0)\n\tlog_file.write('\\nTotal time: '+ str(seconds)+' seconds')\n\t#print count dictionary to log file\n\tlog_file.write('\\nStatements: '+str(count_dict['n_statements']))\n\tlog_file.write('\\nStatements_extracted: '+str(count_dict['n_statements_extracted']))\n\tlog_file.write('\\nEntities: '+str(count_dict['n_entities_total']))\n\tlog_file.write('\\nEntities_mapped: '+str(count_dict['n_entities_mapped']))\n\n\n\n\n\n\n"
] |
[
[
"pandas.DataFrame"
]
] |
Tarpelite/UniNLP
|
[
"176c2a0f88c8054bf69e1f92693d353737367c34"
] |
[
"examples/run_glue.py"
] |
[
"# coding=utf-8\r\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\r\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\"\"\" Finetuning the library models for sequence classification on GLUE (Bert, XLM, XLNet, RoBERTa).\"\"\"\r\n\r\nfrom __future__ import absolute_import, division, print_function\r\n\r\nimport argparse\r\nimport glob\r\nimport logging\r\nimport os\r\nimport random\r\n\r\nimport numpy as np\r\nimport torch\r\nfrom torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,\r\n TensorDataset)\r\nfrom torch.utils.data.distributed import DistributedSampler\r\n\r\ntry:\r\n from torch.utils.tensorboard import SummaryWriter\r\nexcept:\r\n from tensorboardX import SummaryWriter\r\n\r\nfrom tqdm import tqdm, trange\r\n\r\nfrom transformers import (WEIGHTS_NAME, BertConfig,\r\n BertForSequenceClassification, BertTokenizer,\r\n RobertaConfig,\r\n RobertaForSequenceClassification,\r\n RobertaTokenizer,\r\n XLMConfig, XLMForSequenceClassification,\r\n XLMTokenizer, XLNetConfig,\r\n XLNetForSequenceClassification,\r\n XLNetTokenizer,\r\n DistilBertConfig,\r\n DistilBertForSequenceClassification,\r\n DistilBertTokenizer)\r\n\r\nfrom transformers import AdamW, get_linear_schedule_with_warmup\r\n\r\nfrom transformers import glue_compute_metrics as compute_metrics\r\nfrom transformers import glue_output_modes as output_modes\r\nfrom transformers import glue_processors as processors\r\nfrom transformers import glue_convert_examples_to_features as convert_examples_to_features\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\nALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in (BertConfig, XLNetConfig, XLMConfig, \r\n RobertaConfig, DistilBertConfig)), ())\r\n\r\nMODEL_CLASSES = {\r\n 'bert': (BertConfig, BertForSequenceClassification, BertTokenizer),\r\n 'xlnet': (XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer),\r\n 'xlm': (XLMConfig, XLMForSequenceClassification, XLMTokenizer),\r\n 'roberta': (RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer),\r\n 'distilbert': (DistilBertConfig, DistilBertForSequenceClassification, DistilBertTokenizer)\r\n}\r\n\r\n\r\ndef set_seed(args):\r\n random.seed(args.seed)\r\n np.random.seed(args.seed)\r\n torch.manual_seed(args.seed)\r\n if args.n_gpu > 0:\r\n torch.cuda.manual_seed_all(args.seed)\r\n\r\n\r\ndef train(args, train_dataset, model, tokenizer):\r\n \"\"\" Train the model \"\"\"\r\n if args.local_rank in [-1, 0]:\r\n tb_writer = SummaryWriter()\r\n\r\n args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)\r\n train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)\r\n train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)\r\n\r\n if args.max_steps > 0:\r\n t_total = args.max_steps\r\n args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1\r\n else:\r\n t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs\r\n\r\n # Prepare optimizer and schedule (linear warmup and decay)\r\n no_decay = ['bias', 'LayerNorm.weight']\r\n optimizer_grouped_parameters = [\r\n {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay},\r\n {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\r\n ]\r\n optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\r\n scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total)\r\n if args.fp16:\r\n try:\r\n from apex import amp\r\n except ImportError:\r\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.\")\r\n model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)\r\n\r\n # multi-gpu training (should be after apex fp16 initialization)\r\n if args.n_gpu > 1:\r\n model = torch.nn.DataParallel(model)\r\n\r\n # Distributed training (should be after apex fp16 initialization)\r\n if args.local_rank != -1:\r\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],\r\n output_device=args.local_rank,\r\n find_unused_parameters=True)\r\n\r\n # Train!\r\n logger.info(\"***** Running training *****\")\r\n logger.info(\" Num examples = %d\", len(train_dataset))\r\n logger.info(\" Num Epochs = %d\", args.num_train_epochs)\r\n logger.info(\" Instantaneous batch size per GPU = %d\", args.per_gpu_train_batch_size)\r\n logger.info(\" Total train batch size (w. parallel, distributed & accumulation) = %d\",\r\n args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1))\r\n logger.info(\" Gradient Accumulation steps = %d\", args.gradient_accumulation_steps)\r\n logger.info(\" Total optimization steps = %d\", t_total)\r\n\r\n global_step = 0\r\n tr_loss, logging_loss = 0.0, 0.0\r\n model.zero_grad()\r\n train_iterator = trange(int(args.num_train_epochs), desc=\"Epoch\", disable=args.local_rank not in [-1, 0])\r\n set_seed(args) # Added here for reproductibility (even between python 2 and 3)\r\n for _ in train_iterator:\r\n epoch_iterator = tqdm(train_dataloader, desc=\"Iteration\", disable=args.local_rank not in [-1, 0])\r\n for step, batch in enumerate(epoch_iterator):\r\n model.train()\r\n batch = tuple(t.to(args.device) for t in batch)\r\n inputs = {'input_ids': batch[0],\r\n 'attention_mask': batch[1],\r\n 'labels': batch[3]}\r\n if args.model_type != 'distilbert':\r\n inputs['token_type_ids'] = batch[2] if args.model_type in ['bert', 'xlnet'] else None # XLM, DistilBERT and RoBERTa don't use segment_ids\r\n outputs = model(**inputs)\r\n loss = outputs[0] # model outputs are always tuple in transformers (see doc)\r\n\r\n if args.n_gpu > 1:\r\n loss = loss.mean() # mean() to average on multi-gpu parallel training\r\n if args.gradient_accumulation_steps > 1:\r\n loss = loss / args.gradient_accumulation_steps\r\n\r\n if args.fp16:\r\n with amp.scale_loss(loss, optimizer) as scaled_loss:\r\n scaled_loss.backward()\r\n else:\r\n loss.backward()\r\n\r\n tr_loss += loss.item()\r\n if (step + 1) % args.gradient_accumulation_steps == 0:\r\n if args.fp16:\r\n torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)\r\n else:\r\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\r\n\r\n optimizer.step()\r\n scheduler.step() # Update learning rate schedule\r\n model.zero_grad()\r\n global_step += 1\r\n\r\n if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:\r\n # Log metrics\r\n if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well\r\n results = evaluate(args, model, tokenizer)\r\n for key, value in results.items():\r\n tb_writer.add_scalar('eval_{}'.format(key), value, global_step)\r\n tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step)\r\n tb_writer.add_scalar('loss', (tr_loss - logging_loss)/args.logging_steps, global_step)\r\n logging_loss = tr_loss\r\n\r\n if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:\r\n # Save model checkpoint\r\n output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step))\r\n if not os.path.exists(output_dir):\r\n os.makedirs(output_dir)\r\n model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training\r\n model_to_save.save_pretrained(output_dir)\r\n torch.save(args, os.path.join(output_dir, 'training_args.bin'))\r\n logger.info(\"Saving model checkpoint to %s\", output_dir)\r\n\r\n if args.max_steps > 0 and global_step > args.max_steps:\r\n epoch_iterator.close()\r\n break\r\n if args.max_steps > 0 and global_step > args.max_steps:\r\n train_iterator.close()\r\n break\r\n\r\n if args.local_rank in [-1, 0]:\r\n tb_writer.close()\r\n\r\n return global_step, tr_loss / global_step\r\n\r\n\r\ndef evaluate(args, model, tokenizer, prefix=\"\"):\r\n # Loop to handle MNLI double evaluation (matched, mis-matched)\r\n eval_task_names = (\"mnli\", \"mnli-mm\") if args.task_name == \"mnli\" else (args.task_name,)\r\n eval_outputs_dirs = (args.output_dir, args.output_dir + '-MM') if args.task_name == \"mnli\" else (args.output_dir,)\r\n\r\n results = {}\r\n for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):\r\n eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True)\r\n\r\n if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:\r\n os.makedirs(eval_output_dir)\r\n\r\n args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)\r\n # Note that DistributedSampler samples randomly\r\n eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)\r\n eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)\r\n\r\n # multi-gpu eval\r\n if args.n_gpu > 1:\r\n model = torch.nn.DataParallel(model)\r\n\r\n # Eval!\r\n logger.info(\"***** Running evaluation {} *****\".format(prefix))\r\n logger.info(\" Num examples = %d\", len(eval_dataset))\r\n logger.info(\" Batch size = %d\", args.eval_batch_size)\r\n eval_loss = 0.0\r\n nb_eval_steps = 0\r\n preds = None\r\n out_label_ids = None\r\n for batch in tqdm(eval_dataloader, desc=\"Evaluating\"):\r\n model.eval()\r\n batch = tuple(t.to(args.device) for t in batch)\r\n\r\n with torch.no_grad():\r\n inputs = {'input_ids': batch[0],\r\n 'attention_mask': batch[1],\r\n 'labels': batch[3]}\r\n if args.model_type != 'distilbert':\r\n inputs['token_type_ids'] = batch[2] if args.model_type in ['bert', 'xlnet'] else None # XLM, DistilBERT and RoBERTa don't use segment_ids\r\n outputs = model(**inputs)\r\n tmp_eval_loss, logits = outputs[:2]\r\n\r\n eval_loss += tmp_eval_loss.mean().item()\r\n nb_eval_steps += 1\r\n if preds is None:\r\n preds = logits.detach().cpu().numpy()\r\n out_label_ids = inputs['labels'].detach().cpu().numpy()\r\n else:\r\n preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)\r\n out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0)\r\n\r\n eval_loss = eval_loss / nb_eval_steps\r\n if args.output_mode == \"classification\":\r\n preds = np.argmax(preds, axis=1)\r\n elif args.output_mode == \"regression\":\r\n preds = np.squeeze(preds)\r\n result = compute_metrics(eval_task, preds, out_label_ids)\r\n results.update(result)\r\n\r\n output_eval_file = os.path.join(eval_output_dir, prefix, \"eval_results.txt\")\r\n with open(output_eval_file, \"w\") as writer:\r\n logger.info(\"***** Eval results {} *****\".format(prefix))\r\n for key in sorted(result.keys()):\r\n logger.info(\" %s = %s\", key, str(result[key]))\r\n writer.write(\"%s = %s\\n\" % (key, str(result[key])))\r\n\r\n return results\r\n\r\n\r\ndef load_and_cache_examples(args, task, tokenizer, evaluate=False):\r\n if args.local_rank not in [-1, 0] and not evaluate:\r\n torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache\r\n\r\n processor = processors[task]()\r\n output_mode = output_modes[task]\r\n # Load data features from cache or dataset file\r\n cached_features_file = os.path.join(args.data_dir, 'cached_{}_{}_{}_{}'.format(\r\n 'dev' if evaluate else 'train',\r\n list(filter(None, args.model_name_or_path.split('/'))).pop(),\r\n str(args.max_seq_length),\r\n str(task)))\r\n if os.path.exists(cached_features_file) and not args.overwrite_cache:\r\n logger.info(\"Loading features from cached file %s\", cached_features_file)\r\n features = torch.load(cached_features_file)\r\n else:\r\n logger.info(\"Creating features from dataset file at %s\", args.data_dir)\r\n label_list = processor.get_labels()\r\n if task in ['mnli', 'mnli-mm'] and args.model_type in ['roberta']:\r\n # HACK(label indices are swapped in RoBERTa pretrained model)\r\n label_list[1], label_list[2] = label_list[2], label_list[1] \r\n examples = processor.get_dev_examples(args.data_dir) if evaluate else processor.get_train_examples(args.data_dir)\r\n features = convert_examples_to_features(examples,\r\n tokenizer,\r\n label_list=label_list,\r\n max_length=args.max_seq_length,\r\n output_mode=output_mode,\r\n pad_on_left=bool(args.model_type in ['xlnet']), # pad on the left for xlnet\r\n pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],\r\n pad_token_segment_id=4 if args.model_type in ['xlnet'] else 0,\r\n )\r\n if args.local_rank in [-1, 0]:\r\n logger.info(\"Saving features into cached file %s\", cached_features_file)\r\n torch.save(features, cached_features_file)\r\n\r\n if args.local_rank == 0 and not evaluate:\r\n torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache\r\n\r\n # Convert to Tensors and build dataset\r\n all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)\r\n all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)\r\n all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)\r\n if output_mode == \"classification\":\r\n all_labels = torch.tensor([f.label for f in features], dtype=torch.long)\r\n elif output_mode == \"regression\":\r\n all_labels = torch.tensor([f.label for f in features], dtype=torch.float)\r\n\r\n dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels)\r\n return dataset\r\n\r\n\r\ndef main():\r\n parser = argparse.ArgumentParser()\r\n\r\n ## Required parameters\r\n parser.add_argument(\"--data_dir\", default=None, type=str, required=True,\r\n help=\"The input data dir. Should contain the .tsv files (or other data files) for the task.\")\r\n parser.add_argument(\"--model_type\", default=None, type=str, required=True,\r\n help=\"Model type selected in the list: \" + \", \".join(MODEL_CLASSES.keys()))\r\n parser.add_argument(\"--model_name_or_path\", default=None, type=str, required=True,\r\n help=\"Path to pre-trained model or shortcut name selected in the list: \" + \", \".join(ALL_MODELS))\r\n parser.add_argument(\"--task_name\", default=None, type=str, required=True,\r\n help=\"The name of the task to train selected in the list: \" + \", \".join(processors.keys()))\r\n parser.add_argument(\"--output_dir\", default=None, type=str, required=True,\r\n help=\"The output directory where the model predictions and checkpoints will be written.\")\r\n\r\n ## Other parameters\r\n parser.add_argument(\"--config_name\", default=\"\", type=str,\r\n help=\"Pretrained config name or path if not the same as model_name\")\r\n parser.add_argument(\"--tokenizer_name\", default=\"\", type=str,\r\n help=\"Pretrained tokenizer name or path if not the same as model_name\")\r\n parser.add_argument(\"--cache_dir\", default=\"\", type=str,\r\n help=\"Where do you want to store the pre-trained models downloaded from s3\")\r\n parser.add_argument(\"--max_seq_length\", default=128, type=int,\r\n help=\"The maximum total input sequence length after tokenization. Sequences longer \"\r\n \"than this will be truncated, sequences shorter will be padded.\")\r\n parser.add_argument(\"--do_train\", action='store_true',\r\n help=\"Whether to run training.\")\r\n parser.add_argument(\"--do_eval\", action='store_true',\r\n help=\"Whether to run eval on the dev set.\")\r\n parser.add_argument(\"--evaluate_during_training\", action='store_true',\r\n help=\"Rul evaluation during training at each logging step.\")\r\n parser.add_argument(\"--do_lower_case\", action='store_true',\r\n help=\"Set this flag if you are using an uncased model.\")\r\n\r\n parser.add_argument(\"--per_gpu_train_batch_size\", default=8, type=int,\r\n help=\"Batch size per GPU/CPU for training.\")\r\n parser.add_argument(\"--per_gpu_eval_batch_size\", default=8, type=int,\r\n help=\"Batch size per GPU/CPU for evaluation.\")\r\n parser.add_argument('--gradient_accumulation_steps', type=int, default=1,\r\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\")\r\n parser.add_argument(\"--learning_rate\", default=5e-5, type=float,\r\n help=\"The initial learning rate for Adam.\")\r\n parser.add_argument(\"--weight_decay\", default=0.0, type=float,\r\n help=\"Weight deay if we apply some.\")\r\n parser.add_argument(\"--adam_epsilon\", default=1e-8, type=float,\r\n help=\"Epsilon for Adam optimizer.\")\r\n parser.add_argument(\"--max_grad_norm\", default=1.0, type=float,\r\n help=\"Max gradient norm.\")\r\n parser.add_argument(\"--num_train_epochs\", default=3.0, type=float,\r\n help=\"Total number of training epochs to perform.\")\r\n parser.add_argument(\"--max_steps\", default=-1, type=int,\r\n help=\"If > 0: set total number of training steps to perform. Override num_train_epochs.\")\r\n parser.add_argument(\"--warmup_steps\", default=0, type=int,\r\n help=\"Linear warmup over warmup_steps.\")\r\n\r\n parser.add_argument('--logging_steps', type=int, default=50,\r\n help=\"Log every X updates steps.\")\r\n parser.add_argument('--save_steps', type=int, default=50,\r\n help=\"Save checkpoint every X updates steps.\")\r\n parser.add_argument(\"--eval_all_checkpoints\", action='store_true',\r\n help=\"Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number\")\r\n parser.add_argument(\"--no_cuda\", action='store_true',\r\n help=\"Avoid using CUDA when available\")\r\n parser.add_argument('--overwrite_output_dir', action='store_true',\r\n help=\"Overwrite the content of the output directory\")\r\n parser.add_argument('--overwrite_cache', action='store_true',\r\n help=\"Overwrite the cached training and evaluation sets\")\r\n parser.add_argument('--seed', type=int, default=42,\r\n help=\"random seed for initialization\")\r\n\r\n parser.add_argument('--fp16', action='store_true',\r\n help=\"Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit\")\r\n parser.add_argument('--fp16_opt_level', type=str, default='O1',\r\n help=\"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].\"\r\n \"See details at https://nvidia.github.io/apex/amp.html\")\r\n parser.add_argument(\"--local_rank\", type=int, default=-1,\r\n help=\"For distributed training: local_rank\")\r\n parser.add_argument('--server_ip', type=str, default='', help=\"For distant debugging.\")\r\n parser.add_argument('--server_port', type=str, default='', help=\"For distant debugging.\")\r\n args = parser.parse_args()\r\n\r\n if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir:\r\n raise ValueError(\"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.\".format(args.output_dir))\r\n\r\n # Setup distant debugging if needed\r\n if args.server_ip and args.server_port:\r\n # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script\r\n import ptvsd\r\n print(\"Waiting for debugger attach\")\r\n ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)\r\n ptvsd.wait_for_attach()\r\n\r\n # Setup CUDA, GPU & distributed training\r\n if args.local_rank == -1 or args.no_cuda:\r\n device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\r\n args.n_gpu = torch.cuda.device_count()\r\n else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs\r\n torch.cuda.set_device(args.local_rank)\r\n device = torch.device(\"cuda\", args.local_rank)\r\n torch.distributed.init_process_group(backend='nccl')\r\n args.n_gpu = 1\r\n args.device = device\r\n\r\n # Setup logging\r\n logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',\r\n datefmt = '%m/%d/%Y %H:%M:%S',\r\n level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN)\r\n logger.warning(\"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s\",\r\n args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)\r\n\r\n # Set seed\r\n set_seed(args)\r\n\r\n # Prepare GLUE task\r\n args.task_name = args.task_name.lower()\r\n if args.task_name not in processors:\r\n raise ValueError(\"Task not found: %s\" % (args.task_name))\r\n processor = processors[args.task_name]()\r\n args.output_mode = output_modes[args.task_name]\r\n label_list = processor.get_labels()\r\n num_labels = len(label_list)\r\n\r\n # Load pretrained model and tokenizer\r\n if args.local_rank not in [-1, 0]:\r\n torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab\r\n\r\n args.model_type = args.model_type.lower()\r\n config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]\r\n config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path,\r\n num_labels=num_labels,\r\n finetuning_task=args.task_name,\r\n cache_dir=args.cache_dir if args.cache_dir else None)\r\n tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,\r\n do_lower_case=args.do_lower_case,\r\n cache_dir=args.cache_dir if args.cache_dir else None)\r\n model = model_class.from_pretrained(args.model_name_or_path,\r\n from_tf=bool('.ckpt' in args.model_name_or_path),\r\n config=config,\r\n cache_dir=args.cache_dir if args.cache_dir else None)\r\n\r\n if args.local_rank == 0:\r\n torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab\r\n\r\n model.to(args.device)\r\n\r\n logger.info(\"Training/evaluation parameters %s\", args)\r\n\r\n\r\n # Training\r\n if args.do_train:\r\n train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False)\r\n global_step, tr_loss = train(args, train_dataset, model, tokenizer)\r\n logger.info(\" global_step = %s, average loss = %s\", global_step, tr_loss)\r\n\r\n\r\n # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()\r\n if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):\r\n # Create output directory if needed\r\n if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:\r\n os.makedirs(args.output_dir)\r\n\r\n logger.info(\"Saving model checkpoint to %s\", args.output_dir)\r\n # Save a trained model, configuration and tokenizer using `save_pretrained()`.\r\n # They can then be reloaded using `from_pretrained()`\r\n model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training\r\n model_to_save.save_pretrained(args.output_dir)\r\n tokenizer.save_pretrained(args.output_dir)\r\n\r\n # Good practice: save your training arguments together with the trained model\r\n torch.save(args, os.path.join(args.output_dir, 'training_args.bin'))\r\n\r\n # Load a trained model and vocabulary that you have fine-tuned\r\n model = model_class.from_pretrained(args.output_dir)\r\n tokenizer = tokenizer_class.from_pretrained(args.output_dir)\r\n model.to(args.device)\r\n\r\n\r\n # Evaluation\r\n results = {}\r\n if args.do_eval and args.local_rank in [-1, 0]:\r\n tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)\r\n checkpoints = [args.output_dir]\r\n if args.eval_all_checkpoints:\r\n checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True)))\r\n logging.getLogger(\"transformers.modeling_utils\").setLevel(logging.WARN) # Reduce logging\r\n logger.info(\"Evaluate the following checkpoints: %s\", checkpoints)\r\n for checkpoint in checkpoints:\r\n global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else \"\"\r\n prefix = checkpoint.split('/')[-1] if checkpoint.find('checkpoint') != -1 else \"\"\r\n \r\n model = model_class.from_pretrained(checkpoint)\r\n model.to(args.device)\r\n result = evaluate(args, model, tokenizer, prefix=prefix)\r\n result = dict((k + '_{}'.format(global_step), v) for k, v in result.items())\r\n results.update(result)\r\n\r\n return results\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n"
] |
[
[
"torch.load",
"numpy.squeeze",
"torch.utils.data.DataLoader",
"torch.no_grad",
"torch.cuda.manual_seed_all",
"torch.cuda.is_available",
"torch.device",
"torch.distributed.get_rank",
"torch.save",
"torch.distributed.init_process_group",
"torch.utils.data.distributed.DistributedSampler",
"torch.utils.data.TensorDataset",
"torch.distributed.barrier",
"torch.tensor",
"numpy.argmax",
"torch.cuda.device_count",
"torch.distributed.get_world_size",
"torch.nn.parallel.DistributedDataParallel",
"numpy.random.seed",
"torch.cuda.set_device",
"torch.manual_seed",
"torch.utils.data.SequentialSampler",
"torch.utils.data.RandomSampler",
"torch.nn.DataParallel"
]
] |
dstansby/solarsynoptic
|
[
"e8f2a4d58d8beb3fd5e8d85f6294148653e79e50"
] |
[
"aia_helpers.py"
] |
[
"from datetime import datetime, timedelta\nimport pathlib\nimport shutil\nimport urllib.request\nimport urllib.error\n\nfrom astropy.coordinates import SkyCoord, Longitude\nfrom astropy.time import Time\nimport astropy.units as u\nfrom astropy.wcs import WCS\nfrom reproject import reproject_interp\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sunpy.net import vso\nfrom sunpy.net import attrs as a\nfrom sunpy.net import Fido\nfrom sunpy.map import Map, make_fitswcs_header\nfrom sunpy.coordinates import get_earth\nimport sunpy.sun.constants\n\nfrom aiapy.calibrate import (update_pointing, fix_observer_location,\n correct_degradation, normalize_exposure)\nfrom aiapy.calibrate.util import get_correction_table\n\nfrom time_helpers import start_of_day\n\n\n# The directory in which maps are downloaded to and read from\nmap_dir = pathlib.Path('/Volumes/Work/Data/aia_new')\nif not map_dir.exists():\n raise RuntimeError(f'Map directory {map_dir} does not exist')\ncorrection_table = get_correction_table()\n\n\ndef map_path(dtime, wlen):\n \"\"\"\n Get the path of an AIA map at a given date and wavelength.\n \"\"\"\n datestr = dtime.strftime('%Y%m%d')\n return map_dir / f'aia_{wlen}_{datestr}.fits'\n\n\ndef synoptic_map_path(dtime, wlen):\n \"\"\"\n Get the path of an AIA synoptic map at a given date and wavelength.\n \"\"\"\n datestr = dtime.strftime('%Y%m%d')\n return map_dir / f'aia_{wlen}_synoptic_{datestr}.fits'\n\n\ndef download_start_of_day_map(dtime, wlen):\n \"\"\"\n Download the first map available on a given date, at a given wavelength.\n \"\"\"\n if dtime > datetime.now():\n raise RuntimeError(f'No map available for {dtime}')\n dtime = start_of_day(dtime)\n print(f'Fetching map for {dtime}')\n query = (a.Time(dtime, dtime + timedelta(days=1), dtime),\n a.Instrument('AIA'),\n a.Wavelength(wlen * u.Angstrom))\n result = Fido.search(*query)\n try:\n download_path = Fido.fetch(result[0, 0])[0]\n except IndexError as e:\n raise RuntimeError(f'No map available for {dtime}')\n download_path = pathlib.Path(download_path)\n shutil.move(download_path, map_path(dtime, wlen))\n\n\ndef load_start_of_day_map(dtime, wlen):\n \"\"\"\n Load the first map available on a given date, at a given wavelength.\n \"\"\"\n dtime = start_of_day(dtime)\n mappath = map_path(dtime, wlen)\n if not mappath.exists():\n download_start_of_day_map(dtime, wlen)\n\n print(f'Loading AIA {wlen} map for {dtime}')\n return Map(str(mappath))\n\n\ndef prep(m):\n \"\"\"\n Prep an AIA map.\n\n This runs (in order):\n - `aiapy.calibrate.update_pointing`\n - `aiapy.calibrate.fix_observer_location`\n - `aiapy.caibrate.correct_degradation`\n - `aiapy.calibrate.normalize_exposure`\n \"\"\"\n print('Prepping map')\n if m.exposure_time <= 0 * u.s:\n raise RuntimeError('Exposure time <= 0')\n m = update_pointing(m)\n m = fix_observer_location(m)\n m = correct_degradation(m, correction_table=correction_table)\n m = normalize_exposure(m)\n return m\n\n\ndef synop_header(shape_out, dtime):\n frame_out = SkyCoord(0, 0, unit=u.deg,\n frame=\"heliographic_carrington\",\n obstime=dtime,\n observer='earth')\n header = make_fitswcs_header(\n shape_out, frame_out,\n scale=[180 / shape_out[0],\n 360 / shape_out[1]] * u.deg / u.pix,\n projection_code=\"CAR\")\n return header\n\n\ndef helioproj_header(shape_out, dtime):\n sun_width_arscec = 2000\n frame_out = SkyCoord(0, 0, unit=u.deg,\n frame='helioprojective',\n obstime=dtime, observer='earth')\n header = make_fitswcs_header(\n shape_out, frame_out,\n scale=[sun_width_arscec / shape_out[0],\n sun_width_arscec / shape_out[1]] * u.arcsec / u.pix)\n return header\n\n\ndef synop_reproject(m, shape_out, wlen):\n synop_map_path = synoptic_map_path(m.date, wlen)\n if not synop_map_path.exists():\n print(f'Reprojecting AIA {wlen} map')\n m = prep(m)\n m.meta['rsun_ref'] = sunpy.sun.constants.radius.to_value(u.m)\n header = synop_header(shape_out, m.date)\n wcs = WCS(header)\n wcs.heliographic_observer = m.observer_coordinate\n with np.errstate(invalid='ignore'):\n array, footprint = reproject_interp(m, wcs, shape_out=shape_out)\n new_map = Map((array, header))\n new_map.save(str(synop_map_path))\n\n new_map = Map(synop_map_path)\n new_map.plot_settings = m.plot_settings\n return new_map\n\n\ndef long_weights(longs, l0):\n \"\"\"\n Weights to use when adding synoptic maps.\n\n Parameters\n ----------\n longs :\n The longitude coordinates of each pixel in a map.\n l0 :\n The observer longitude.\n \"\"\"\n dcenterlong = (longs - l0 + 180) % 360 - 180\n weights = np.exp(-(dcenterlong / 15)**2)\n weights[weights < 0] = 0\n return weights / np.nanmax(weights)\n\n\ndef create_synoptic_map(endtime, wlen):\n \"\"\"\n Create a synoptic map, using 27 daily SDO/AIA maps ending on the\n endtime given. Note that the maps are taken from the start of each day.\n\n Returns\n -------\n sunpy.map.Map : synoptic map\n \"\"\"\n shape = [720, 1440]\n data = np.zeros(shape)\n weight_sum = np.zeros(shape)\n nmaps = 27\n recent_time = None\n for i in range(nmaps):\n dtime = endtime - timedelta(days=i)\n try:\n aia_map = load_start_of_day_map(dtime, wlen)\n aia_synop_map = synop_reproject(aia_map, shape, wlen)\n except (RuntimeError, KeyError) as e:\n print('\\U0001F6A8 ' + str(e))\n continue\n\n if recent_time is None:\n recent_time = dtime.strftime('%Y-%m-%dT%H:%M:%S')\n\n # Create weights\n coord = sunpy.map.all_coordinates_from_map(aia_synop_map)\n longs = coord.lon.to(u.deg).value\n l0 = sunpy.coordinates.sun.L0(dtime).to(u.deg).value\n weights = long_weights(longs, l0)\n\n aia_data = aia_synop_map.data\n # Cast missing data to zero for now, to avoid adding NaNs to the total\n aia_data[~np.isfinite(aia_data)] = 0\n\n data += (aia_data * weights)\n weight_sum += weights\n\n data /= weight_sum\n data[data == 0] = np.nan\n\n meta = aia_synop_map.meta\n meta['date-obs'] = recent_time\n meta['instrume'] = 'AIA' # Set so sunpy recognises this as an AIA map\n meta['telescop'] = 'SDO'\n meta['wavelnth'] = str(wlen)\n meta['waveunit'] = 'Angstrom'\n\n synop_map = Map((data, meta))\n synop_map.plot_settings = aia_synop_map.plot_settings\n return synop_map\n\n\ndef aia_fov(dtime):\n l0 = sunpy.coordinates.sun.L0(dtime)\n bounds = Longitude([l0 - 90 * u.deg, l0 + 90 * u.deg])\n return bounds\n\n\nif __name__ == '__main__':\n map = create_synoptic_map(datetime(2018, 11, 10), 193)\n # Norm the data\n # data = map.data\n # data = map.plot_settings['norm'](data)\n\n # map = Map((data, map.meta))\n datestr = datetime.now().strftime('%Y%m%d')\n map.save(f'aia193_synoptic_latest_{datestr}.fits')\n"
] |
[
[
"numpy.nanmax",
"numpy.isfinite",
"numpy.errstate",
"numpy.exp",
"numpy.zeros"
]
] |
qwerty-Bk/dla_hw1
|
[
"305c71a4bb55c192bd0b6cbdd7dbb11b4731ab22"
] |
[
"hw_asr/collate_fn/collate.py"
] |
[
"import logging\nimport numpy as np\nimport torch\nfrom typing import List\nfrom speechbrain.utils.data_utils import pad_right_to\n\nlogger = logging.getLogger(__name__)\n\n\ndef collate_fn(dataset_items: List[dict]):\n \"\"\"\n Collate and pad fields in dataset items\n \"\"\"\n\n result_batch = {}\n # TODO: your code here\n tensor_keys = ('audio', 'spectrogram', 'text_encoded')\n # for k, v in result_batch.items():\n # if k not in tensor_keys:\n # result_batch[k] = [v]\n\n max_tens = {k: 0 for k in tensor_keys}\n for d in dataset_items:\n for k in tensor_keys:\n # print(k, d[k].shape)\n if max_tens[k] == 0:\n max_tens[k] = list(d[k].shape)\n else:\n max_tens[k] = [max(x, y) for (x, y) in zip(max_tens[k], list(d[k].shape))]\n\n for i in range(len(dataset_items)):\n for k, v in dataset_items[i].items():\n if k in tensor_keys:\n value, prev_size = pad_right_to(v, max_tens[k])\n if i == 0:\n result_batch[k] = value\n result_batch[k + '_length'] = [v.shape[-1]]\n else:\n result_batch[k] = torch.cat((result_batch[k], value))\n result_batch[k + '_length'].append(v.shape[-1])\n else:\n if i == 0:\n result_batch[k] = []\n result_batch[k].append(v)\n for k in tensor_keys:\n result_batch[k + '_length'] = torch.from_numpy(np.array(result_batch[k + '_length']))\n result_batch['spectrogram'] = torch.transpose(result_batch['spectrogram'], 1, 2)\n return result_batch\n"
] |
[
[
"numpy.array",
"torch.transpose",
"torch.cat"
]
] |
andy971022/acg-project
|
[
"e42b0f9010f5f9fc2c7eb4b9b306ae6321ddfd51"
] |
[
"taichi-experiment/sdf_renderer.py"
] |
[
"import math\nimport time\n\nimport numpy as np\n\nimport taichi as ti\n\nti.init(arch=ti.gpu)\nres = 1280, 720\ncolor_buffer = ti.Vector.field(3, dtype=ti.f32, shape=res)\nmax_ray_depth = 6\neps = 1e-4\ninf = 1e10\n\nfov = 0.23\ndist_limit = 100\n\ncamera_pos = ti.Vector([0.0, 0.32, 3.7])\nlight_pos = [-1.5, 0.6, 0.3]\nlight_normal = [1.0, 0.0, 0.0]\nlight_radius = 2.0\n\n\[email protected]\ndef intersect_light(pos, d):\n light_loc = ti.Vector(light_pos)\n dot = -d.dot(ti.Vector(light_normal))\n dist = d.dot(light_loc - pos)\n dist_to_light = inf\n if dot > 0 and dist > 0:\n D = dist / dot\n dist_to_center = (light_loc - (pos + D * d)).norm_sqr()\n if dist_to_center < light_radius**2:\n dist_to_light = D\n return dist_to_light\n\n\[email protected]\ndef out_dir(n):\n u = ti.Vector([1.0, 0.0, 0.0])\n if abs(n[1]) < 1 - eps:\n u = n.cross(ti.Vector([0.0, 1.0, 0.0])).normalized()\n v = n.cross(u)\n phi = 2 * math.pi * ti.random()\n ay = ti.sqrt(ti.random())\n ax = ti.sqrt(1 - ay**2)\n return ax * (ti.cos(phi) * u + ti.sin(phi) * v) + ay * n\n\n\[email protected]\ndef make_nested(f):\n f = f * 40\n i = int(f)\n if f < 0:\n if i % 2 == 1:\n f -= ti.floor(f)\n else:\n f = ti.floor(f) + 1 - f\n f = (f - 0.2) / 40\n return f\n\n\n# https://www.iquilezles.org/www/articles/distfunctions/distfunctions.htm\[email protected]\ndef sdf(o):\n wall = min(o[1] + 0.1, o[2] + 0.4)\n sphere = (o - ti.Vector([0.0, 0.35, 0.0])).norm() - 0.36\n\n q = ti.abs(o - ti.Vector([0.8, 0.3, 0])) - ti.Vector([0.3, 0.3, 0.3])\n box = ti.Vector([max(0, q[0]), max(0, q[1]),\n max(0, q[2])]).norm() + min(q.max(), 0)\n\n O = o - ti.Vector([-0.8, 0.3, 0])\n d = ti.Vector([ti.Vector([O[0], O[2]]).norm() - 0.3, abs(O[1]) - 0.3])\n cylinder = min(d.max(), 0.0) + ti.Vector([max(0, d[0]),\n max(0, d[1])]).norm()\n\n geometry = make_nested(min(sphere, box, cylinder))\n geometry = max(geometry, -(0.32 - (o[1] * 0.6 + o[2] * 0.8)))\n return min(wall, geometry)\n\n\[email protected]\ndef ray_march(p, d):\n j = 0\n dist = 0.0\n while j < 100 and sdf(p + dist * d) > 1e-6 and dist < inf:\n dist += sdf(p + dist * d)\n j += 1\n return min(inf, dist)\n\n\[email protected]\ndef sdf_normal(p):\n d = 1e-3\n n = ti.Vector([0.0, 0.0, 0.0])\n sdf_center = sdf(p)\n for i in ti.static(range(3)):\n inc = p\n inc[i] += d\n n[i] = (1 / d) * (sdf(inc) - sdf_center)\n return n.normalized()\n\n\[email protected]\ndef next_hit(pos, d):\n closest, normal, c = inf, ti.Vector.zero(ti.f32,\n 3), ti.Vector.zero(ti.f32, 3)\n ray_march_dist = ray_march(pos, d)\n if ray_march_dist < dist_limit and ray_march_dist < closest:\n closest = ray_march_dist\n normal = sdf_normal(pos + d * closest)\n hit_pos = pos + d * closest\n t = int((hit_pos[0] + 10) * 1.1 + 0.5) % 3\n c = ti.Vector(\n [0.4 + 0.3 * (t == 0), 0.4 + 0.2 * (t == 1), 0.4 + 0.3 * (t == 2)])\n return closest, normal, c\n\n\[email protected]\ndef render():\n for u, v in color_buffer:\n aspect_ratio = res[0] / res[1]\n pos = camera_pos\n d = ti.Vector([\n (2 * fov * (u + ti.random()) / res[1] - fov * aspect_ratio - 1e-5),\n 2 * fov * (v + ti.random()) / res[1] - fov - 1e-5, -1.0\n ])\n d = d.normalized()\n\n throughput = ti.Vector([1.0, 1.0, 1.0])\n\n depth = 0\n hit_light = 0.00\n\n while depth < max_ray_depth:\n closest, normal, c = next_hit(pos, d)\n depth += 1\n dist_to_light = intersect_light(pos, d)\n if dist_to_light < closest:\n hit_light = 1\n depth = max_ray_depth\n else:\n hit_pos = pos + closest * d\n if normal.norm_sqr() != 0:\n d = out_dir(normal)\n pos = hit_pos + 1e-4 * d\n throughput *= c\n else:\n depth = max_ray_depth\n color_buffer[u, v] += throughput * hit_light\n\n\ngui = ti.GUI('SDF Path Tracer', res)\nlast_t = 0\nfor i in range(50000):\n render()\n interval = 10\n if i % interval == 0 and i > 0:\n print(\"{:.2f} samples/s\".format(interval / (time.time() - last_t)))\n last_t = time.time()\n img = color_buffer.to_numpy() * (1 / (i + 1))\n img = img / img.mean() * 0.24\n gui.set_image(np.sqrt(img))\n gui.show()"
] |
[
[
"numpy.sqrt"
]
] |
thoth291/holoviews
|
[
"98b657262e3c9322ad7848b950053d171d661c35"
] |
[
"tests/operation/testdatashader.py"
] |
[
"from unittest import SkipTest\nfrom nose.plugins.attrib import attr\n\nimport numpy as np\nfrom holoviews import (Dimension, Curve, Points, Image, Dataset, RGB, Path,\n Graph, TriMesh, QuadMesh, NdOverlay, Contours)\nfrom holoviews.element.comparison import ComparisonTestCase\nfrom holoviews.core.util import pd\n\ntry:\n import datashader as ds\n from holoviews.operation.datashader import (\n aggregate, regrid, ds_version, stack, directly_connect_edges,\n shade, rasterize\n )\nexcept:\n ds_version = None\n\n\n@attr(optional=1)\nclass DatashaderAggregateTests(ComparisonTestCase):\n \"\"\"\n Tests for datashader aggregation\n \"\"\"\n\n def test_aggregate_points(self):\n points = Points([(0.2, 0.3), (0.4, 0.7), (0, 0.99)])\n img = aggregate(points, dynamic=False, x_range=(0, 1), y_range=(0, 1),\n width=2, height=2)\n expected = Image(([0.25, 0.75], [0.25, 0.75], [[1, 0], [2, 0]]),\n vdims=['Count'])\n self.assertEqual(img, expected)\n\n def test_aggregate_points_target(self):\n points = Points([(0.2, 0.3), (0.4, 0.7), (0, 0.99)])\n expected = Image(([0.25, 0.75], [0.25, 0.75], [[1, 0], [2, 0]]),\n vdims=['Count'])\n img = aggregate(points, dynamic=False, target=expected)\n self.assertEqual(img, expected)\n\n def test_aggregate_points_sampling(self):\n points = Points([(0.2, 0.3), (0.4, 0.7), (0, 0.99)])\n expected = Image(([0.25, 0.75], [0.25, 0.75], [[1, 0], [2, 0]]),\n vdims=['Count'])\n img = aggregate(points, dynamic=False, x_range=(0, 1), y_range=(0, 1),\n x_sampling=0.5, y_sampling=0.5)\n self.assertEqual(img, expected)\n\n def test_aggregate_points_categorical(self):\n points = Points([(0.2, 0.3, 'A'), (0.4, 0.7, 'B'), (0, 0.99, 'C')], vdims='z')\n img = aggregate(points, dynamic=False, x_range=(0, 1), y_range=(0, 1),\n width=2, height=2, aggregator=ds.count_cat('z'))\n xs, ys = [0.25, 0.75], [0.25, 0.75]\n expected = NdOverlay({'A': Image((xs, ys, [[1, 0], [0, 0]]), vdims='z Count'),\n 'B': Image((xs, ys, [[0, 0], [1, 0]]), vdims='z Count'),\n 'C': Image((xs, ys, [[0, 0], [1, 0]]), vdims='z Count')},\n kdims=['z'])\n self.assertEqual(img, expected)\n\n def test_aggregate_curve(self):\n curve = Curve([(0.2, 0.3), (0.4, 0.7), (0.8, 0.99)])\n expected = Image(([0.25, 0.75], [0.25, 0.75], [[1, 0], [1, 1]]),\n vdims=['Count'])\n img = aggregate(curve, dynamic=False, x_range=(0, 1), y_range=(0, 1),\n width=2, height=2)\n self.assertEqual(img, expected)\n\n def test_aggregate_curve_datetimes(self):\n dates = pd.date_range(start=\"2016-01-01\", end=\"2016-01-03\", freq='1D')\n curve = Curve((dates, [1, 2, 3]))\n img = aggregate(curve, width=2, height=2, dynamic=False)\n bounds = (np.datetime64('2016-01-01T00:00:00.000000'), 1.0,\n np.datetime64('2016-01-03T00:00:00.000000'), 3.0)\n dates = [np.datetime64('2016-01-01T12:00:00.000000000'),\n np.datetime64('2016-01-02T12:00:00.000000000')]\n expected = Image((dates, [1.5, 2.5], [[1, 0], [0, 2]]),\n datatype=['xarray'], bounds=bounds, vdims='Count')\n self.assertEqual(img, expected)\n\n def test_aggregate_curve_datetimes_microsecond_timebase(self):\n dates = pd.date_range(start=\"2016-01-01\", end=\"2016-01-03\", freq='1D')\n xstart = np.datetime64('2015-12-31T23:59:59.723518000', 'us')\n xend = np.datetime64('2016-01-03T00:00:00.276482000', 'us')\n curve = Curve((dates, [1, 2, 3]))\n img = aggregate(curve, width=2, height=2, x_range=(xstart, xend), dynamic=False)\n bounds = (np.datetime64('2015-12-31T23:59:59.723518'), 1.0,\n np.datetime64('2016-01-03T00:00:00.276482'), 3.0)\n dates = [np.datetime64('2016-01-01T11:59:59.861759000',),\n np.datetime64('2016-01-02T12:00:00.138241000')]\n expected = Image((dates, [1.5, 2.5], [[1, 0], [0, 2]]),\n datatype=['xarray'], bounds=bounds, vdims='Count')\n self.assertEqual(img, expected)\n\n def test_aggregate_ndoverlay(self):\n ds = Dataset([(0.2, 0.3, 0), (0.4, 0.7, 1), (0, 0.99, 2)], kdims=['x', 'y', 'z'])\n ndoverlay = ds.to(Points, ['x', 'y'], [], 'z').overlay()\n expected = Image(([0.25, 0.75], [0.25, 0.75], [[1, 0], [2, 0]]),\n vdims=['Count'])\n img = aggregate(ndoverlay, dynamic=False, x_range=(0, 1), y_range=(0, 1),\n width=2, height=2)\n self.assertEqual(img, expected)\n\n def test_aggregate_path(self):\n path = Path([[(0.2, 0.3), (0.4, 0.7)], [(0.4, 0.7), (0.8, 0.99)]])\n expected = Image(([0.25, 0.75], [0.25, 0.75], [[1, 0], [2, 1]]),\n vdims=['Count'])\n img = aggregate(path, dynamic=False, x_range=(0, 1), y_range=(0, 1),\n width=2, height=2)\n self.assertEqual(img, expected)\n\n def test_aggregate_contours_with_vdim(self):\n contours = Contours([[(0.2, 0.3, 1), (0.4, 0.7, 1)], [(0.4, 0.7, 2), (0.8, 0.99, 2)]], vdims='z')\n img = rasterize(contours, dynamic=False)\n self.assertEqual(img.vdims, ['z'])\n\n def test_aggregate_contours_without_vdim(self):\n contours = Contours([[(0.2, 0.3), (0.4, 0.7)], [(0.4, 0.7), (0.8, 0.99)]])\n img = rasterize(contours, dynamic=False)\n self.assertEqual(img.vdims, ['Count'])\n\n def test_aggregate_dframe_nan_path(self):\n path = Path([Path([[(0.2, 0.3), (0.4, 0.7)], [(0.4, 0.7), (0.8, 0.99)]]).dframe()])\n expected = Image(([0.25, 0.75], [0.25, 0.75], [[1, 0], [2, 1]]),\n vdims=['Count'])\n img = aggregate(path, dynamic=False, x_range=(0, 1), y_range=(0, 1),\n width=2, height=2)\n self.assertEqual(img, expected)\n\n\n@attr(optional=1)\nclass DatashaderShadeTests(ComparisonTestCase):\n\n def test_shade_categorical_images_xarray(self):\n xs, ys = [0.25, 0.75], [0.25, 0.75]\n data = NdOverlay({'A': Image((xs, ys, [[1, 0], [0, 0]]), datatype=['xarray'], vdims='z Count'),\n 'B': Image((xs, ys, [[0, 0], [1, 0]]), datatype=['xarray'], vdims='z Count'),\n 'C': Image((xs, ys, [[0, 0], [1, 0]]), datatype=['xarray'], vdims='z Count')},\n kdims=['z'])\n shaded = shade(data)\n r = [[228, 255], [66, 255]]\n g = [[26, 255], [150, 255]]\n b = [[28, 255], [129, 255]]\n a = [[40, 0], [255, 0]]\n expected = RGB((xs, ys, r, g, b, a), datatype=['grid'],\n vdims=RGB.vdims+[Dimension('A', range=(0, 1))])\n self.assertEqual(shaded, expected)\n\n def test_shade_categorical_images_grid(self):\n xs, ys = [0.25, 0.75], [0.25, 0.75]\n data = NdOverlay({'A': Image((xs, ys, [[1, 0], [0, 0]]), datatype=['grid'], vdims='z Count'),\n 'B': Image((xs, ys, [[0, 0], [1, 0]]), datatype=['grid'], vdims='z Count'),\n 'C': Image((xs, ys, [[0, 0], [1, 0]]), datatype=['grid'], vdims='z Count')},\n kdims=['z'])\n shaded = shade(data)\n r = [[228, 255], [66, 255]]\n g = [[26, 255], [150, 255]]\n b = [[28, 255], [129, 255]]\n a = [[40, 0], [255, 0]]\n expected = RGB((xs, ys, r, g, b, a), datatype=['grid'],\n vdims=RGB.vdims+[Dimension('A', range=(0, 1))])\n self.assertEqual(shaded, expected)\n\n\n\n@attr(optional=1)\nclass DatashaderRegridTests(ComparisonTestCase):\n \"\"\"\n Tests for datashader aggregation\n \"\"\"\n\n def setUp(self):\n if ds_version is None or ds_version <= '0.5.0':\n raise SkipTest('Regridding operations require datashader>=0.6.0')\n\n def test_regrid_mean(self):\n img = Image((range(10), range(5), np.arange(10) * np.arange(5)[np.newaxis].T))\n regridded = regrid(img, width=2, height=2, dynamic=False)\n expected = Image(([2., 7.], [0.75, 3.25], [[1, 5], [6, 22]]))\n self.assertEqual(regridded, expected)\n\n def test_regrid_mean_xarray_transposed(self):\n img = Image((range(10), range(5), np.arange(10) * np.arange(5)[np.newaxis].T),\n datatype=['xarray'])\n img.data = img.data.transpose()\n regridded = regrid(img, width=2, height=2, dynamic=False)\n expected = Image(([2., 7.], [0.75, 3.25], [[1, 5], [6, 22]]))\n self.assertEqual(regridded, expected)\n\n def test_regrid_rgb_mean(self):\n arr = (np.arange(10) * np.arange(5)[np.newaxis].T).astype('f')\n rgb = RGB((range(10), range(5), arr, arr*2, arr*2))\n regridded = regrid(rgb, width=2, height=2, dynamic=False)\n new_arr = np.array([[1.6, 5.6], [6.4, 22.4]])\n expected = RGB(([2., 7.], [0.75, 3.25], new_arr, new_arr*2, new_arr*2), datatype=['xarray'])\n self.assertEqual(regridded, expected)\n\n def test_regrid_max(self):\n img = Image((range(10), range(5), np.arange(10) * np.arange(5)[np.newaxis].T))\n regridded = regrid(img, aggregator='max', width=2, height=2, dynamic=False)\n expected = Image(([2., 7.], [0.75, 3.25], [[8, 18], [16, 36]]))\n self.assertEqual(regridded, expected)\n\n def test_regrid_upsampling(self):\n img = Image(([0.5, 1.5], [0.5, 1.5], [[0, 1], [2, 3]]))\n regridded = regrid(img, width=4, height=4, upsample=True, dynamic=False)\n expected = Image(([0.25, 0.75, 1.25, 1.75], [0.25, 0.75, 1.25, 1.75],\n [[0, 0, 1, 1],\n [0, 0, 1, 1],\n [2, 2, 3, 3],\n [2, 2, 3, 3]]))\n self.assertEqual(regridded, expected)\n\n def test_regrid_upsampling_linear(self):\n ### This test causes a numba error using 0.35.0 - temporarily disabled ###\n return\n img = Image(([0.5, 1.5], [0.5, 1.5], [[0, 1], [2, 3]]))\n regridded = regrid(img, width=4, height=4, upsample=True, interpolation='linear', dynamic=False)\n expected = Image(([0.25, 0.75, 1.25, 1.75], [0.25, 0.75, 1.25, 1.75],\n [[0, 0, 0, 1],\n [0, 1, 1, 1],\n [1, 1, 2, 2],\n [2, 2, 2, 3]]))\n self.assertEqual(regridded, expected)\n\n def test_regrid_disabled_upsampling(self):\n img = Image(([0.5, 1.5], [0.5, 1.5], [[0, 1], [2, 3]]))\n regridded = regrid(img, width=3, height=3, dynamic=False, upsample=False)\n self.assertEqual(regridded, img)\n\n def test_regrid_disabled_expand(self):\n img = Image(([0.5, 1.5], [0.5, 1.5], [[0., 1.], [2., 3.]]))\n regridded = regrid(img, width=2, height=2, x_range=(-2, 4), y_range=(-2, 4), expand=False,\n dynamic=False)\n self.assertEqual(regridded, img)\n\n\n@attr(optional=1)\nclass DatashaderRasterizeTests(ComparisonTestCase):\n \"\"\"\n Tests for datashader aggregation\n \"\"\"\n\n def setUp(self):\n if ds_version is None or ds_version <= '0.6.4':\n raise SkipTest('Regridding operations require datashader>=0.7.0')\n\n def test_rasterize_trimesh_no_vdims(self):\n simplices = [(0, 1, 2), (3, 2, 1)]\n vertices = [(0., 0.), (0., 1.), (1., 0), (1, 1)]\n trimesh = TriMesh((simplices, vertices))\n img = rasterize(trimesh, width=3, height=3, dynamic=False)\n image = Image(np.array([[2, 1, 2], [1, 2, 1], [2, 1, 2]]),\n bounds=(0, 0, 1, 1), vdims='Count')\n self.assertEqual(img, image)\n\n def test_rasterize_trimesh(self):\n simplices = [(0, 1, 2, 0.5), (3, 2, 1, 1.5)]\n vertices = [(0., 0.), (0., 1.), (1., 0), (1, 1)]\n trimesh = TriMesh((simplices, vertices), vdims=['z'])\n img = rasterize(trimesh, width=3, height=3, dynamic=False)\n image = Image(np.array([[1.5, 1.5, np.NaN], [0.5, 1.5, np.NaN], [np.NaN, np.NaN, np.NaN]]),\n bounds=(0, 0, 1, 1))\n self.assertEqual(img, image)\n\n def test_rasterize_trimesh_vertex_vdims(self):\n simplices = [(0, 1, 2), (3, 2, 1)]\n vertices = [(0., 0., 1), (0., 1., 2), (1., 0., 3), (1., 1., 4)]\n trimesh = TriMesh((simplices, Points(vertices, vdims='z')))\n img = rasterize(trimesh, width=3, height=3, dynamic=False)\n image = Image(np.array([[2., 3., np.NaN], [1.5, 2.5, np.NaN], [np.NaN, np.NaN, np.NaN]]),\n bounds=(0, 0, 1, 1), vdims='z')\n self.assertEqual(img, image)\n\n def test_rasterize_trimesh_ds_aggregator(self):\n simplices = [(0, 1, 2, 0.5), (3, 2, 1, 1.5)]\n vertices = [(0., 0.), (0., 1.), (1., 0), (1, 1)]\n trimesh = TriMesh((simplices, vertices), vdims=['z'])\n img = rasterize(trimesh, width=3, height=3, dynamic=False, aggregator=ds.mean('z'))\n image = Image(np.array([[1.5, 1.5, np.NaN], [0.5, 1.5, np.NaN], [np.NaN, np.NaN, np.NaN]]),\n bounds=(0, 0, 1, 1))\n self.assertEqual(img, image)\n\n def test_rasterize_trimesh_string_aggregator(self):\n simplices = [(0, 1, 2, 0.5), (3, 2, 1, 1.5)]\n vertices = [(0., 0.), (0., 1.), (1., 0), (1, 1)]\n trimesh = TriMesh((simplices, vertices), vdims=['z'])\n img = rasterize(trimesh, width=3, height=3, dynamic=False, aggregator='mean')\n image = Image(np.array([[1.5, 1.5, np.NaN], [0.5, 1.5, np.NaN], [np.NaN, np.NaN, np.NaN]]),\n bounds=(0, 0, 1, 1))\n self.assertEqual(img, image)\n\n def test_rasterize_quadmesh(self):\n qmesh = QuadMesh(([0, 1], [0, 1], np.array([[0, 1], [2, 3]])))\n img = rasterize(qmesh, width=3, height=3, dynamic=False, aggregator=ds.mean('z'))\n image = Image(np.array([[2., 3., np.NaN], [0, 1, np.NaN], [np.NaN, np.NaN, np.NaN]]),\n bounds=(-.5, -.5, 1.5, 1.5))\n self.assertEqual(img, image)\n\n def test_rasterize_quadmesh_string_aggregator(self):\n qmesh = QuadMesh(([0, 1], [0, 1], np.array([[0, 1], [2, 3]])))\n img = rasterize(qmesh, width=3, height=3, dynamic=False, aggregator='mean')\n image = Image(np.array([[2., 3., np.NaN], [0, 1, np.NaN], [np.NaN, np.NaN, np.NaN]]),\n bounds=(-.5, -.5, 1.5, 1.5))\n self.assertEqual(img, image)\n\n def test_rasterize_points(self):\n points = Points([(0.2, 0.3), (0.4, 0.7), (0, 0.99)])\n img = rasterize(points, dynamic=False, x_range=(0, 1), y_range=(0, 1),\n width=2, height=2)\n expected = Image(([0.25, 0.75], [0.25, 0.75], [[1, 0], [2, 0]]),\n vdims=['Count'])\n self.assertEqual(img, expected)\n\n def test_rasterize_curve(self):\n curve = Curve([(0.2, 0.3), (0.4, 0.7), (0.8, 0.99)])\n expected = Image(([0.25, 0.75], [0.25, 0.75], [[1, 0], [1, 1]]),\n vdims=['Count'])\n img = rasterize(curve, dynamic=False, x_range=(0, 1), y_range=(0, 1),\n width=2, height=2)\n self.assertEqual(img, expected)\n\n def test_rasterize_ndoverlay(self):\n ds = Dataset([(0.2, 0.3, 0), (0.4, 0.7, 1), (0, 0.99, 2)], kdims=['x', 'y', 'z'])\n ndoverlay = ds.to(Points, ['x', 'y'], [], 'z').overlay()\n expected = Image(([0.25, 0.75], [0.25, 0.75], [[1, 0], [2, 0]]),\n vdims=['Count'])\n img = rasterize(ndoverlay, dynamic=False, x_range=(0, 1), y_range=(0, 1),\n width=2, height=2)\n self.assertEqual(img, expected)\n\n def test_rasterize_path(self):\n path = Path([[(0.2, 0.3), (0.4, 0.7)], [(0.4, 0.7), (0.8, 0.99)]])\n expected = Image(([0.25, 0.75], [0.25, 0.75], [[1, 0], [2, 1]]),\n vdims=['Count'])\n img = rasterize(path, dynamic=False, x_range=(0, 1), y_range=(0, 1),\n width=2, height=2)\n self.assertEqual(img, expected)\n\n def test_rasterize_image(self):\n img = Image((range(10), range(5), np.arange(10) * np.arange(5)[np.newaxis].T))\n regridded = regrid(img, width=2, height=2, dynamic=False)\n expected = Image(([2., 7.], [0.75, 3.25], [[1, 5], [6, 22]]))\n self.assertEqual(regridded, expected)\n\n def test_rasterize_image_string_aggregator(self):\n img = Image((range(10), range(5), np.arange(10) * np.arange(5)[np.newaxis].T))\n regridded = regrid(img, width=2, height=2, dynamic=False, aggregator='mean')\n expected = Image(([2., 7.], [0.75, 3.25], [[1, 5], [6, 22]]))\n self.assertEqual(regridded, expected)\n\n\n\n@attr(optional=1)\nclass DatashaderStackTests(ComparisonTestCase):\n\n def setUp(self):\n self.rgb1_arr = np.array([[[0, 1], [1, 0]],\n [[1, 0], [0, 1]],\n [[0, 0], [0, 0]]], dtype=np.uint8).T*255\n self.rgb2_arr = np.array([[[0, 0], [0, 0]],\n [[0, 0], [0, 0]],\n [[1, 0], [0, 1]]], dtype=np.uint8).T*255\n self.rgb1 = RGB(self.rgb1_arr)\n self.rgb2 = RGB(self.rgb2_arr)\n\n\n def test_stack_add_compositor(self):\n combined = stack(self.rgb1*self.rgb2, compositor='add')\n arr = np.array([[[0, 255, 255], [255,0, 0]], [[255, 0, 0], [0, 255, 255]]], dtype=np.uint8)\n expected = RGB(arr)\n self.assertEqual(combined, expected)\n\n def test_stack_over_compositor(self):\n combined = stack(self.rgb1*self.rgb2, compositor='over')\n self.assertEqual(combined, self.rgb2)\n\n def test_stack_over_compositor_reverse(self):\n combined = stack(self.rgb2*self.rgb1, compositor='over')\n self.assertEqual(combined, self.rgb1)\n\n def test_stack_saturate_compositor(self):\n combined = stack(self.rgb1*self.rgb2, compositor='saturate')\n self.assertEqual(combined, self.rgb1)\n\n def test_stack_saturate_compositor_reverse(self):\n combined = stack(self.rgb2*self.rgb1, compositor='saturate')\n self.assertEqual(combined, self.rgb2)\n\n\n@attr(optional=1)\nclass GraphBundlingTests(ComparisonTestCase):\n\n def setUp(self):\n if ds_version is None or ds_version <= '0.7.0':\n raise SkipTest('Regridding operations require datashader>=0.7.0')\n self.source = np.arange(8)\n self.target = np.zeros(8)\n self.graph = Graph(((self.source, self.target),))\n\n def test_directly_connect_paths(self):\n direct = directly_connect_edges(self.graph)._split_edgepaths\n self.assertEqual(direct, self.graph.edgepaths)\n"
] |
[
[
"numpy.arange",
"numpy.array",
"numpy.zeros",
"numpy.datetime64"
]
] |
u6k/investment-machine-predict-prices
|
[
"54b433719ad80179043797c2720ed9900d8bd20c",
"54b433719ad80179043797c2720ed9900d8bd20c"
] |
[
"investment_stocks_predict_trend/predict_3.py",
"investment_stocks_predict_trend/agent_2.py"
] |
[
"import argparse\n\nfrom sklearn import ensemble\nfrom predict_base import PredictClassificationBase\n\n\nclass PredictClassification_3(PredictClassificationBase):\n def model_fit(self, x_train, y_train):\n return ensemble.RandomForestClassifier(n_estimators=200).fit(x_train, y_train)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--task\", help=\"preprocess, or train\")\n parser.add_argument(\"--simulate-group\", help=\"simulate trade group\")\n parser.add_argument(\"--suffix\", help=\"folder name suffix (default: test)\", default=\"test\")\n args = parser.parse_args()\n\n pred = PredictClassification_3(\n train_start_date=\"2008-01-01\",\n train_end_date=\"2017-12-31\",\n test_start_date=\"2018-01-01\",\n test_end_date=\"2018-12-31\",\n s3_bucket=\"u6k\",\n input_preprocess_base_path=f\"ml-data/stocks/preprocess_2.{args.suffix}\",\n input_simulate_base_path=f\"ml-data/stocks/simulate_trade_{args.simulate_group}.{args.suffix}\",\n output_base_path=f\"ml-data/stocks/predict_3.simulate_trade_{args.simulate_group}.{args.suffix}\"\n )\n\n if args.task == \"preprocess\":\n pred.preprocess()\n elif args.task == \"train\":\n pred.train()\n else:\n parser.print_help()\n",
"import pandas as pd\nimport numpy as np\nimport chainer\nimport chainerrl\nimport matplotlib.pyplot as plt\n\n\ndef execute(experiment):\n df = preprocessing()\n env = build_env(df)\n agent = build_agent(env, experiment)\n learn_agent(env, agent, experiment)\n df_result = simulate_agent(env, agent, experiment)\n build_figure_win_vs_lose(df_result, experiment)\n build_figure_reward(df_result, experiment)\n\n\ndef preprocessing():\n df_csv = pd.read_csv(\"local/nikkei_averages.csv\")\n df_csv.info()\n print(df_csv.head())\n print(df_csv.tail())\n\n df = df_csv.copy()\n df = df[[\"date\", \"opening_price\", \"high_price\", \"low_price\", \"close_price\"]]\n df = df.sort_values(\"date\")\n df = df.drop_duplicates()\n df = df.assign(id=np.arange(len(df)))\n df = df.set_index(\"id\")\n\n df = df.assign(rolling_5_mean=df[\"close_price\"].rolling(window=5).mean())\n df.info()\n print(df.head())\n print(df.tail())\n\n return df\n\n\nclass LearnEnv():\n def __init__(self, df_input, start_id, end_id):\n self.DF = df_input.copy()\n self.START_ID = start_id\n self.END_ID = end_id\n\n self.reset()\n\n self.data_len = self.END_ID - self.START_ID\n self.action_size = 2 # 0...何もしない、1...購入or売却\n self.observation_size = len(self.observe())\n\n def reset(self):\n self.total_reward = 0.0\n self.funds = 0.0\n self.current_id = self.START_ID\n self.buy_price = 0.0\n self.done = False\n self.win = 0\n self.lose = 0\n\n self.df_action = self.DF.copy()\n self.df_action = self.df_action.assign(reward=0.)\n self.df_action = self.df_action.assign(funds=0.)\n self.df_action = self.df_action.assign(buy=0)\n self.df_action = self.df_action.assign(sell=0)\n self.df_action = self.df_action.assign(win=0)\n self.df_action = self.df_action.assign(lose=0)\n\n return self.observe()\n\n def step(self, action):\n if action == 0:\n reward = 0.0\n elif self.buy_price == 0.0:\n # buy\n self.buy_price = self.df_action.at[self.current_id, \"opening_price\"]\n self.funds -= self.buy_price\n reward = 0.0\n\n self.df_action.at[self.current_id, \"buy\"] = 1\n elif self.buy_price != 0.0:\n # sell\n sell_price = self.df_action.at[self.current_id, \"close_price\"]\n self.funds += sell_price\n reward = sell_price - self.buy_price\n self.total_reward += reward\n self.buy_price = 0.0\n\n if reward > 0:\n self.win += 1\n else:\n self.lose += 1\n\n self.df_action.at[self.current_id, \"sell\"] = 1\n\n self.df_action.at[self.current_id, \"reward\"] = self.total_reward\n self.df_action.at[self.current_id, \"funds\"] = self.funds\n self.df_action.at[self.current_id, \"win\"] = self.win\n self.df_action.at[self.current_id, \"lose\"] = self.lose\n\n self.current_id += 1\n if self.current_id >= self.END_ID:\n self.done = True\n\n return self.observe(), reward, self.done, {}\n\n def render(self):\n print(self.df_action.loc[self.current_id-1])\n\n def observe(self):\n obs = np.array(\n [self.df_action.at[self.current_id - i, \"rolling_5_mean\"] for i in range(1, 11)],\n dtype=np.float32\n )\n\n return obs\n\n def random_action(self):\n return np.random.randint(0, 2)\n\n\ndef build_env(df):\n env = LearnEnv(df, 19090-250, 19090)\n\n return env\n\n\ndef build_agent(env, experiment=None):\n hyper_params = {\n \"n_hidden_layers\": 3,\n \"obs_size\": env.observation_size,\n \"n_actions\": env.action_size,\n \"n_hidden_channels\": env.observation_size * env.action_size,\n \"adam_eps\": 1e-2,\n \"gamma\": 0.95,\n \"start_epsilon\": 1.0,\n \"end_epsilon\": 0.3,\n \"decay_steps\": 200 * 200,\n \"replay_buffer_capacity\": 10 ** 6,\n \"ddqn_replay_start_size\": 500,\n \"ddqn_update_interval\": 1,\n \"ddqn_target_update_interval\": 100\n }\n if experiment is not None:\n experiment.log_parameters(hyper_params)\n\n q_func = chainerrl.q_functions.FCStateQFunctionWithDiscreteAction(\n hyper_params[\"obs_size\"],\n hyper_params[\"n_actions\"],\n n_hidden_layers=hyper_params[\"n_hidden_layers\"],\n n_hidden_channels=hyper_params[\"n_hidden_channels\"]\n )\n # q_func.to_gpu(0)\n\n optimizer = chainer.optimizers.Adam(eps=hyper_params[\"adam_eps\"])\n optimizer.setup(q_func)\n\n explorer = chainerrl.explorers.LinearDecayEpsilonGreedy(\n start_epsilon=hyper_params[\"start_epsilon\"],\n end_epsilon=hyper_params[\"end_epsilon\"],\n decay_steps=hyper_params[\"decay_steps\"],\n random_action_func=env.random_action\n )\n\n replay_buffer = chainerrl.replay_buffer.ReplayBuffer(capacity=hyper_params[\"replay_buffer_capacity\"])\n\n agent = chainerrl.agents.DoubleDQN(\n q_func,\n optimizer,\n replay_buffer,\n hyper_params[\"gamma\"],\n explorer,\n replay_start_size=hyper_params[\"ddqn_replay_start_size\"],\n update_interval=hyper_params[\"ddqn_update_interval\"],\n target_update_interval=hyper_params[\"ddqn_target_update_interval\"]\n )\n\n return agent\n\n\ndef learn_agent(env, agent, experiment=None):\n n_episodes = 500\n\n for i in range(1, n_episodes + 1):\n obs = env.reset()\n reward = 0\n done = False\n R = 0\n\n while not done:\n action = agent.act_and_train(obs, reward)\n obs, reward, done, _ = env.step(action)\n R += reward\n\n agent.stop_episode_and_train(obs, reward, done)\n\n metrics = {\n \"reward\": R,\n \"epsilon\": agent.explorer.epsilon,\n \"win\": env.win,\n \"lose\": env.lose,\n \"funds\": env.funds + env.buy_price\n }\n if experiment is not None:\n experiment.log_metrics(metrics, step=i)\n\n if i % 10 == 0:\n print(\"episode:\", i, \", R:\", R, \", statistics:\", agent.get_statistics(), \", epsilon:\", agent.explorer.epsilon)\n env.render()\n\n\ndef simulate_agent(env, agent, experiment=None):\n obs = env.reset()\n done = False\n\n while not done:\n action = agent.act(obs)\n obs, reward, done, _ = env.step(action)\n\n env.render()\n\n agent.stop_episode()\n\n df_result = env.df_action.query(\"18840 <= id <= 19090\").copy()\n\n if experiment is not None:\n experiment.log_asset_data(df_result.to_csv(), file_name=\"result.csv\")\n\n return df_result\n\n\ndef build_figure_win_vs_lose(df_result, experiment=None):\n fig = plt.figure(figsize=(20, 5))\n subplot = fig.add_subplot(111)\n subplot.plot(df_result[\"win\"], label=\"win\")\n subplot.plot(df_result[\"lose\"], label=\"lose\")\n subplot.legend()\n\n plt.show()\n\n if experiment is not None:\n experiment.log_figure(figure_name=\"win_vs_lose\", figure=fig)\n\n\ndef build_figure_reward(df_result, experiment=None):\n fig = plt.figure(figsize=(20, 5))\n subplot = fig.add_subplot(222)\n subplot.plot(df_result[\"reward\"], label=\"reward\")\n subplot.legend()\n\n plt.show()\n\n if experiment is not None:\n experiment.log_figure(figure_name=\"reward\", figure=fig)\n"
] |
[
[
"sklearn.ensemble.RandomForestClassifier"
],
[
"pandas.read_csv",
"numpy.random.randint",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
kandluis/droidlet
|
[
"3851f0bdac7bc63100cfbcf1c206a94658790352"
] |
[
"craftassist/agent/dialogue_objects/schematic_helper.py"
] |
[
"\"\"\"\nCopyright (c) Facebook, Inc. and its affiliates.\n\"\"\"\n\nimport random\nimport re\nimport numpy as np\nfrom typing import cast, List, Tuple, Union, Optional\n\n# TODO with subinterpret\nfrom base_agent.dialogue_objects import get_repeat_num\nimport block_data\nimport size_words\nfrom .block_helpers import get_block_type\nfrom base_agent.base_util import ErrorWithResponse\nfrom mc_util import Block, most_common_idm\n\nfrom word2number.w2n import word_to_num\nfrom word_maps import SPECIAL_SHAPE_FNS, SPECIAL_SHAPES_CANONICALIZE\n\n\ndef get_properties_from_triples(triples_list, p):\n return [x.get(\"obj_text\") for x in triples_list if p in x.values()]\n\n\ndef get_attrs_from_triples(triples, interpreter):\n numeric_keys = {\n \"has_thickness\": get_properties_from_triples(triples, \"has_thickness\"),\n \"has_radius\": get_properties_from_triples(triples, \"has_radius\"),\n \"has_depth\": get_properties_from_triples(triples, \"has_depth\"),\n \"has_width\": get_properties_from_triples(triples, \"has_width\"),\n \"has_height\": get_properties_from_triples(triples, \"has_height\"),\n \"has_length\": get_properties_from_triples(triples, \"has_length\"),\n \"has_slope\": get_properties_from_triples(triples, \"has_slope\"),\n \"has_distance\": get_properties_from_triples(triples, \"has_distance\"),\n \"has_base\": get_properties_from_triples(triples, \"has_base\"),\n }\n\n attrs = {key[4:]: word_to_num(val[0]) for key, val in numeric_keys.items() if any(val)}\n\n text_keys = {\n \"has_orientation\": get_properties_from_triples(triples, \"has_orientation\"),\n \"has_size\": get_properties_from_triples(triples, \"has_size\"),\n \"has_block_type\": get_properties_from_triples(triples, \"has_block_type\"),\n \"has_colour\": get_properties_from_triples(triples, \"has_colour\"),\n }\n\n if any(text_keys[\"has_orientation\"]):\n attrs[\"orient\"] = text_keys[\"has_orientation\"][0]\n\n if any(text_keys[\"has_size\"]):\n attrs[\"size\"] = interpret_size(interpreter, text_keys[\"has_size\"][0])\n\n if any(text_keys[\"has_block_type\"]):\n block_type = get_block_type(text_keys[\"has_block_type\"][0])\n attrs[\"bid\"] = block_type\n elif any(text_keys[\"has_colour\"]):\n c = block_data.COLOR_BID_MAP.get(text_keys[\"has_colour\"][0])\n if c is not None:\n attrs[\"bid\"] = random.choice(c)\n\n return attrs\n\n\n# FIXME merge with shape_schematic\n# FIXME we should be able to do fancy stuff here, like fill the x with (copies of) schematic y\ndef interpret_fill_schematic(\n interpreter, speaker, d, hole_locs, hole_idm\n) -> Tuple[List[Block], List[Tuple[str, str]]]:\n \"\"\"Return a tuple of 2 values:\n - the schematic blocks, list[(xyz, idm)]\n - a list of (pred, val) tags\n\n the \"hole\" input is a list of xyz coordinates giving a \"mold\" to be filled.\n \"\"\"\n\n filters_d = d.get(\"filters\", {})\n triples = filters_d.get(\"triples\", [])\n attrs = get_attrs_from_triples(triples, interpreter)\n\n h = attrs.get(\"height\") or attrs.get(\"depth\") or attrs.get(\"thickness\")\n bid = attrs.get(\"bid\") or hole_idm or (1, 0)\n origin = np.min(hole_locs, axis=0)\n ymin = origin[1]\n if h:\n blocks_list = [((x, y, z), bid) for (x, y, z) in hole_locs if y - ymin < h]\n else:\n blocks_list = [((x, y, z), bid) for (x, y, z) in hole_locs]\n tags = []\n for t in triples:\n key = t.get(\"pred_text\", \"\")\n if key.startswith(\"has_\"):\n val = t.get(\"obj_text\", \"\")\n stemmed_val = val\n if val:\n tags.append((key, stemmed_val))\n\n return blocks_list, tags\n\n\ndef interpret_shape_schematic(\n interpreter, speaker, d, shapename=None\n) -> Tuple[List[Block], List[Tuple[str, str]]]:\n \"\"\"Return a tuple of 2 values:\n - the schematic blocks, list[(xyz, idm)]\n - a list of (pred, val) tags\n\n warning: if multiple possibilities are given for the same tag, current\n heursitic just picks one. e.g. if the lf is \n \"triples\" : [{\"pred_text\": \"has_colour\", \"obj_text\": \"red\"}, \n {\"pred_text\": \"has_colour\", \"obj_text\": \"blue\"}]\n will currently just pick red. Same for other properties encoded in triples\n \"\"\"\n # FIXME this is not compositional, and does not properly use FILTERS\n filters_d = d.get(\"filters\", {})\n triples = filters_d.get(\"triples\", [{\"pred_text\": \"has_shape\", \"obj_text\": \"cube\"}])\n if shapename is not None:\n shape = shapename\n else:\n # For sentences like \"Stack\" and \"Place\" that have the shapename in dict\n shapes = get_properties_from_triples(triples, \"has_shape\")\n if any(shapes):\n # see warning above w.r.t. 0\n shape = shapes[0]\n\n attrs = get_attrs_from_triples(triples, interpreter)\n\n tags = []\n for t in triples:\n key = t.get(\"pred_text\", \"\")\n if key.startswith(\"has_\"):\n val = t.get(\"obj_text\", \"\")\n stemmed_val = val\n if val:\n tags.append((key, stemmed_val))\n\n return SPECIAL_SHAPE_FNS[shape](**attrs), tags\n\n\ndef interpret_size(interpreter, text) -> Union[int, List[int]]:\n \"\"\"Processes the has_size_ span value and returns int or list[int]\"\"\"\n nums = re.findall(\"[-0-9]+\", text)\n if len(nums) == 1:\n # handle \"3\", \"three\", etc.\n return word_to_num(nums[0])\n elif len(nums) > 1:\n # handle \"3 x 3\", \"four by five\", etc.\n return [word_to_num(n) for n in nums]\n else:\n # handle \"big\", \"really huge\", etc.\n if hasattr(interpreter.agent, \"size_str_to_int\"):\n return interpreter.agent.size_str_to_int(text)\n else:\n return size_words.size_str_to_int(text)\n\n\ndef interpret_named_schematic(\n interpreter, speaker, d\n) -> Tuple[List[Block], Optional[str], List[Tuple[str, str]]]:\n \"\"\"Return a tuple of 3 values:\n - the schematic blocks, list[(xyz, idm)]\n - a SchematicNode memid, or None\n - a list of (pred, val) tags\n\n warning: if multiple possibilities are given for the same tag, current\n heursitic just picks one. e.g. if the lf is \n \"triples\" : [{\"pred_text\": \"has_colour\", \"obj_text\": \"red\"}, \n {\"pred_text\": \"has_colour\", \"obj_text\": \"blue\"}]\n will currently just pick red. Same for other properties encoded in triples\n \"\"\"\n # FIXME! this is not compositional, and is not using full FILTERS handlers\n filters_d = d.get(\"filters\", {})\n triples = filters_d.get(\"triples\", [])\n names = get_properties_from_triples(triples, \"has_name\")\n if not any(names):\n raise ErrorWithResponse(\"I don't know what you want me to build.\")\n name = names[0]\n stemmed_name = name.strip(\"s\") # why aren't we using stemmer anymore?\n shapename = SPECIAL_SHAPES_CANONICALIZE.get(name) or SPECIAL_SHAPES_CANONICALIZE.get(\n stemmed_name\n )\n if shapename:\n shape_blocks, tags = interpret_shape_schematic(\n interpreter, speaker, d, shapename=shapename\n )\n return shape_blocks, None, tags\n\n schematic = interpreter.memory.get_schematic_by_name(name)\n if schematic is None:\n schematic = interpreter.memory.get_schematic_by_name(stemmed_name)\n if schematic is None:\n raise ErrorWithResponse(\"I don't know what you want me to build.\")\n tags = [(p, v) for (_, p, v) in interpreter.memory.get_triples(subj=schematic.memid)]\n blocks = schematic.blocks\n # TODO generalize to more general block properties\n # Longer term: remove and put a call to the modify model here\n colours = get_properties_from_triples(triples, \"has_colour\")\n if any(colours):\n colour = colours[0]\n old_idm = most_common_idm(blocks.values())\n c = block_data.COLOR_BID_MAP.get(colour)\n if c is not None:\n new_idm = random.choice(c)\n for l in blocks:\n if blocks[l] == old_idm:\n blocks[l] = new_idm\n return list(blocks.items()), schematic.memid, tags\n\n\ndef interpret_schematic(\n interpreter, speaker, d, repeat_dict=None\n) -> List[Tuple[List[Block], Optional[str], List[Tuple[str, str]]]]:\n \"\"\"Return a list of 3-tuples, each with values:\n - the schematic blocks, list[(xyz, idm)]\n - a SchematicNode memid, or None\n - a list of (pred, val) tags\n \"\"\"\n # hack, fixme in grammar/standardize. sometimes the repeat is a sibling of action\n if repeat_dict is not None:\n repeat = cast(int, get_repeat_num(repeat_dict))\n else:\n repeat = cast(int, get_repeat_num(d))\n assert type(repeat) == int, \"bad repeat={}\".format(repeat)\n\n # FIXME! this is not compositional, and is not using full FILTERS handlers\n filters_d = d.get(\"filters\", {})\n triples = filters_d.get(\"triples\", [{\"pred_text\": \"has_shape\", \"obj_text\": \"cube\"}])\n shapes = get_properties_from_triples(triples, \"has_shape\")\n if any(shapes):\n blocks, tags = interpret_shape_schematic(interpreter, speaker, d)\n return [(blocks, None, tags)] * repeat\n else:\n return [interpret_named_schematic(interpreter, speaker, d)] * repeat\n\n\ndef get_repeat_dir(d):\n if \"repeat\" in d:\n direction_name = d.get(\"repeat\", {}).get(\"repeat_dir\", \"FRONT\")\n elif \"schematic\" in d:\n direction_name = d[\"schematic\"].get(\"repeat\", {}).get(\"repeat_dir\", \"FRONT\")\n else:\n direction_name = None\n return direction_name\n"
] |
[
[
"numpy.min"
]
] |
RadostW/stochastic
|
[
"1d437900e0314f18678353fd4794ecefb197761d"
] |
[
"examples/sample.py"
] |
[
"from pychastic.problems.kp_4_27 import KloedenPlaten4_27\nfrom pychastic.sde_solver import SDESolver\n\nproblem = KloedenPlaten4_27\nsolver = SDESolver()\nsolver.dt = 0.01\nsolution = solver.solve(problem)\n\nexact = problem.exact_solution(problem.x0, solution['time_values'], solution['wiener_values'])\n\nimport matplotlib.pyplot as plt\nplt.plot(solution['time_values'], solution['solution_values'], marker='x', label='simulated')\nplt.plot(solution['time_values'], exact, label='true')\nplt.legend()\nplt.savefig('sol.png')\nplt.close()\n\nplt.plot(solution['time_values'], solution['solution_values']-exact)\nplt.title('Error')\nplt.savefig('error.png')\nplt.close()\n"
] |
[
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close"
]
] |
proxodilka/ibis
|
[
"1943b6065d2ba89c35856b2d9b8399f15af11c01"
] |
[
"ibis/pandas/execution/tests/test_functions.py"
] |
[
"import decimal\nimport functools\nimport math\nimport operator\nfrom operator import methodcaller\n\nimport numpy as np\nimport pandas as pd\nimport pandas.util.testing as tm # noqa: E402\nimport pytest\n\nimport ibis\nimport ibis.expr.datatypes as dt # noqa: E402\nfrom ibis.pandas.udf import udf\n\npytestmark = pytest.mark.pandas\n\n\[email protected](\n 'op',\n [\n # comparison\n operator.eq,\n operator.ne,\n operator.lt,\n operator.le,\n operator.gt,\n operator.ge,\n ],\n)\ndef test_binary_operations(t, df, op):\n expr = op(t.plain_float64, t.plain_int64)\n result = expr.execute()\n expected = op(df.plain_float64, df.plain_int64)\n tm.assert_series_equal(result, expected)\n\n\[email protected]('op', [operator.and_, operator.or_, operator.xor])\ndef test_binary_boolean_operations(t, df, op):\n expr = op(t.plain_int64 == 1, t.plain_int64 == 2)\n result = expr.execute()\n expected = op(df.plain_int64 == 1, df.plain_int64 == 2)\n tm.assert_series_equal(result, expected)\n\n\ndef operate(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except decimal.InvalidOperation:\n return decimal.Decimal('NaN')\n\n return wrapper\n\n\[email protected](\n ('ibis_func', 'pandas_func'),\n [\n (methodcaller('round'), lambda x: np.int64(round(x))),\n (\n methodcaller('round', 2),\n lambda x: x.quantize(decimal.Decimal('.00')),\n ),\n (\n methodcaller('round', 0),\n lambda x: x.quantize(decimal.Decimal('0.')),\n ),\n (methodcaller('ceil'), lambda x: decimal.Decimal(math.ceil(x))),\n (methodcaller('floor'), lambda x: decimal.Decimal(math.floor(x))),\n (methodcaller('exp'), methodcaller('exp')),\n (\n methodcaller('sign'),\n lambda x: x if not x else decimal.Decimal(1).copy_sign(x),\n ),\n (methodcaller('sqrt'), operate(lambda x: x.sqrt())),\n (\n methodcaller('log', 2),\n operate(lambda x: x.ln() / decimal.Decimal(2).ln()),\n ),\n (methodcaller('ln'), operate(lambda x: x.ln())),\n (\n methodcaller('log2'),\n operate(lambda x: x.ln() / decimal.Decimal(2).ln()),\n ),\n (methodcaller('log10'), operate(lambda x: x.log10())),\n ],\n)\ndef test_math_functions_decimal(t, df, ibis_func, pandas_func):\n dtype = dt.Decimal(12, 3)\n result = ibis_func(t.float64_as_strings.cast(dtype)).execute()\n context = decimal.Context(prec=dtype.precision)\n expected = df.float64_as_strings.apply(\n lambda x: context.create_decimal(x).quantize(\n decimal.Decimal(\n '{}.{}'.format(\n '0' * (dtype.precision - dtype.scale), '0' * dtype.scale\n )\n )\n )\n ).apply(pandas_func)\n\n result[result.apply(math.isnan)] = -99999\n expected[expected.apply(math.isnan)] = -99999\n tm.assert_series_equal(result, expected)\n\n\ndef test_round_decimal_with_negative_places(t, df):\n type = dt.Decimal(12, 3)\n expr = t.float64_as_strings.cast(type).round(-1)\n result = expr.execute()\n expected = pd.Series(\n list(map(decimal.Decimal, ['1.0E+2', '2.3E+2', '-1.00E+3'])),\n name='float64_as_strings',\n )\n tm.assert_series_equal(result, expected)\n\n\[email protected](\n ('ibis_func', 'pandas_func'),\n [\n (lambda x: x.clip(lower=0), lambda x: x.clip(lower=0)),\n (lambda x: x.clip(lower=0.0), lambda x: x.clip(lower=0.0)),\n (lambda x: x.clip(upper=0), lambda x: x.clip(upper=0)),\n (\n lambda x: x.clip(lower=x - 1, upper=x + 1),\n lambda x: x.clip(lower=x - 1, upper=x + 1),\n ),\n (\n lambda x: x.clip(lower=0, upper=1),\n lambda x: x.clip(lower=0, upper=1),\n ),\n (\n lambda x: x.clip(lower=0, upper=1.0),\n lambda x: x.clip(lower=0, upper=1.0),\n ),\n ],\n)\ndef test_clip(t, df, ibis_func, pandas_func):\n result = ibis_func(t.float64_with_zeros).execute()\n expected = pandas_func(df.float64_with_zeros)\n tm.assert_series_equal(result, expected)\n\n\[email protected](\n ('ibis_func', 'pandas_func'),\n [\n (\n lambda x: x.quantile([0.25, 0.75]),\n lambda x: list(x.quantile([0.25, 0.75])),\n )\n ],\n)\[email protected]('column', ['float64_with_zeros', 'int64_with_zeros'])\ndef test_quantile_list(t, df, ibis_func, pandas_func, column):\n expr = ibis_func(t[column])\n result = expr.execute()\n expected = pandas_func(df[column])\n assert result == expected\n\n\[email protected](\n ('ibis_func', 'pandas_func'),\n [\n (lambda x: x.quantile(0), lambda x: x.quantile(0)),\n (lambda x: x.quantile(1), lambda x: x.quantile(1)),\n (\n lambda x: x.quantile(0.5, interpolation='linear'),\n lambda x: x.quantile(0.5, interpolation='linear'),\n ),\n ],\n)\ndef test_quantile_scalar(t, df, ibis_func, pandas_func):\n result = ibis_func(t.float64_with_zeros).execute()\n expected = pandas_func(df.float64_with_zeros)\n\n result = ibis_func(t.int64_with_zeros).execute()\n expected = pandas_func(df.int64_with_zeros)\n assert result == expected\n\n\[email protected](\n ('ibis_func', 'exc'),\n [\n # no lower/upper specified\n (lambda x: x.clip(), ValueError),\n # out of range on quantile\n (lambda x: x.quantile(5.0), ValueError),\n # invalid interpolation arg\n (lambda x: x.quantile(0.5, interpolation='foo'), ValueError),\n ],\n)\ndef test_arraylike_functions_transform_errors(t, df, ibis_func, exc):\n with pytest.raises(exc):\n ibis_func(t.float64_with_zeros).execute()\n\n\ndef test_quantile_array_access(client, t, df):\n quantile = t.float64_with_zeros.quantile([0.25, 0.5])\n expr = quantile[0], quantile[1]\n result = tuple(map(client.execute, expr))\n expected = tuple(df.float64_with_zeros.quantile([0.25, 0.5]))\n assert result == expected\n\n\[email protected](\n (\n 'left',\n 'right',\n 'expected_value',\n 'expected_type',\n 'left_dtype',\n 'right_dtype',\n ),\n [\n (True, 1, True, bool, dt.boolean, dt.int64),\n (True, 1.0, True, bool, dt.boolean, dt.float64),\n (True, True, True, bool, dt.boolean, dt.boolean),\n (False, 0, False, bool, dt.boolean, dt.int64),\n (False, 0.0, False, bool, dt.boolean, dt.float64),\n (False, False, False, bool, dt.boolean, dt.boolean),\n (1, True, 1, int, dt.int64, dt.boolean),\n (1, 1.0, 1, int, dt.int64, dt.float64),\n (1, 1, 1, int, dt.int64, dt.int64),\n (0, False, 0, int, dt.int64, dt.boolean),\n (0, 0.0, 0, int, dt.int64, dt.float64),\n (0, 0, 0, int, dt.int64, dt.int64),\n (1.0, True, 1.0, float, dt.float64, dt.boolean),\n (1.0, 1, 1.0, float, dt.float64, dt.int64),\n (1.0, 1.0, 1.0, float, dt.float64, dt.float64),\n (0.0, False, 0.0, float, dt.float64, dt.boolean),\n (0.0, 0, 0.0, float, dt.float64, dt.int64),\n (0.0, 0.0, 0.0, float, dt.float64, dt.float64),\n ],\n)\ndef test_execute_with_same_hash_value_in_scope(\n left, right, expected_value, expected_type, left_dtype, right_dtype\n):\n @udf.elementwise([left_dtype, right_dtype], left_dtype)\n def my_func(x, y):\n return x\n\n expr = my_func(left, right)\n result = ibis.pandas.execute(expr)\n assert type(result) is expected_type\n assert result == expected_value\n\n\ndef test_ifelse_returning_bool():\n one = ibis.literal(1)\n two = ibis.literal(2)\n true = ibis.literal(True)\n false = ibis.literal(False)\n expr = ibis.ifelse(one + one == two, true, false)\n result = ibis.pandas.execute(expr)\n assert result is True\n\n\[email protected](\n ('dtype', 'value'),\n [\n pytest.param(dt.float64, 1, id='float_int'),\n pytest.param(dt.float64, True, id='float_bool'),\n pytest.param(dt.int64, 1.0, id='int_float'),\n pytest.param(dt.int64, True, id='int_bool'),\n pytest.param(dt.boolean, 1.0, id='bool_float'),\n pytest.param(dt.boolean, 1, id='bool_int'),\n ],\n)\ndef test_signature_does_not_match_input_type(dtype, value):\n @udf.elementwise([dtype], dtype)\n def func(x):\n return x\n\n expr = func(value)\n result = ibis.pandas.execute(expr)\n assert type(result) == type(value)\n assert result == value\n"
] |
[
[
"pandas.util.testing.assert_series_equal"
]
] |
rkalahasty/PyHealth
|
[
"1ee0859d8d39a7fc6f8df48ef8d2bf6c17dcf4a5",
"1ee0859d8d39a7fc6f8df48ef8d2bf6c17dcf4a5"
] |
[
"pyhealth/models/ecg/rcrnet.py",
"pyhealth/models/text/_loss.py"
] |
[
"# -*- coding: utf-8 -*-\n\n# License: BSD 2 clause\n\nimport os\nimport torch\nimport torch.nn as nn\nimport pickle\nimport warnings\nimport torchvision.models as models\nfrom ._loss import callLoss\nfrom ._dlbase import BaseControler\nfrom pyhealth.data.data_reader.ecg import rcrnet_reader\nfrom collections import OrderedDict\nfrom torch import Tensor\nimport torch.nn.functional as F\nfrom torch.nn import LSTM\nfrom torch.autograd import Variable\nimport numpy as np\n\nwarnings.filterwarnings('ignore')\n\nclass _ResnetBlock(nn.Module):\n\n def __init__(self, n_in_channel, n_embed_channel):\n\n super(_ResnetBlock, self).__init__()\n \n self.convs = nn.Sequential(OrderedDict([\n ('conv1', nn.Conv1d(n_in_channel, n_embed_channel, kernel_size=1, stride=2, bias=True)),\n ('norm1', nn.BatchNorm1d(n_embed_channel)),\n ('relu1', nn.ReLU(inplace=True)),\n ('conv2', nn.Conv1d(n_embed_channel, n_embed_channel, kernel_size=3, stride=1, padding=1, bias=True)),\n ('norm2', nn.BatchNorm1d(n_embed_channel)),\n ('relu3', nn.ReLU(inplace=True)),\n ('conv3', nn.Conv1d(n_embed_channel, 4 * n_embed_channel, kernel_size=1, stride=1, bias=True)),\n ('norm3', nn.BatchNorm1d(4 * n_embed_channel))\n ]))\n self.shortcut = nn.Sequential(OrderedDict([\n ('shortcut', nn.Conv1d(n_in_channel, 4 * n_embed_channel, kernel_size=3, stride=2, padding=1, bias=True)),\n ('norm0', nn.BatchNorm1d(4 * n_embed_channel))\n ]))\n\n def forward(self, x):\n return F.relu(self.convs(x) + self.shortcut(x))\n\n\nclass callPredictor(nn.Module):\n\n def __init__(self, \n in_channel, \n n_visit,\n n_fc = 128,\n drop_rate=0, \n label_size=4\n ):\n\n super(callPredictor, self).__init__()\n self.features = nn.Sequential(OrderedDict([\n ('conv0', nn.Conv1d(in_channel, 32, kernel_size=5, stride=2, padding=0, bias=True)) ,\n ('norm0', nn.BatchNorm1d(32)),\n ('relu0', nn.ReLU(inplace=True)),\n ('pool0', nn.MaxPool1d(kernel_size=5, stride=2, padding=2)),\n ]))\n\n block1 = _ResnetBlock(32, 32)\n self.features.add_module('resnetblock_1', block1)\n\n block2 = _ResnetBlock(128, 64)\n self.features.add_module('resnetblock_2', block2)\n\n block3 = _ResnetBlock(256, 128)\n self.features.add_module('resnetblock_3', block3)\n\n block4 = _ResnetBlock(512, 256)\n self.features.add_module('resnetblock_4', block4)\n \n self.rnn = LSTM(1024, n_fc, bias = True, bidirectional = True)\n \n self.fc = nn.Linear(n_fc * 2, label_size)\n \n def forward(self, data):\n x = data['X']\n n_case, n_visit, n_channel, n_feat = x.shape\n feat_x = x.view(n_case*n_visit, n_channel, n_feat)\n conv_x = self.features(feat_x)\n conv_x = nn.AdaptiveAvgPool1d(1)(conv_x)\n conv_x = torch.flatten(conv_x, 1)\n conv_x = conv_x.view(n_case, n_visit, 1024)\n rnn_x, _ = self.rnn(conv_x)\n out_x = torch.sum(rnn_x * data['cur_M'].unsqueeze(-1), 1)\n hat_y = self.fc(out_x)\n return hat_y\n\nclass RCRNet(BaseControler):\n\n def __init__(self, \n expmodel_id = 'test.new', \n n_epoch = 100,\n n_batchsize = 5,\n fc_size = 128,\n learn_ratio = 1e-4,\n weight_decay = 1e-4,\n n_epoch_saved = 1,\n loss_name = 'L1LossSoftmax',\n aggregate = 'sum',\n optimizer_name = 'adam',\n use_gpu = False,\n gpu_ids = '0'\n ):\n \"\"\"\n RCR-net consists of a 33-layer stacked residual block [He et al., 2016], 1-layer recurrent block and 1-layer fully connected block. \n\n\n Parameters\n\n ----------\n exp_id : str, optional (default='init.test') \n name of current experiment\n \n n_epoch : int, optional (default = 100)\n number of epochs with the initial learning rate\n \n n_batchsize : int, optional (default = 5)\n batch size for model training\n\n fc_size : int, optional (default = 128)\n size of fc layer\n\n learn_ratio : float, optional (default = 1e-4)\n initial learning rate for adam\n \n weight_decay : float, optional (default = 1e-4)\n weight decay (L2 penalty)\n \n n_epoch_saved : int, optional (default = 1)\n frequency of saving checkpoints at the end of epochs\n\n loss_name : str, optional (default='SigmoidCELoss') \n Name or objective function.\n\n use_gpu : bool, optional (default=False) \n If yes, use GPU recources; else use CPU recources \n\n\t\t\t\tgpu_ids : str, optional (default='') \n\t\t\t\t\t\t\t\t\t\tIf yes, assign concrete used gpu ids such as '0,2,6'; else use '0' \n\n \"\"\"\n \n super(RCRNet, self).__init__(expmodel_id)\n self.n_batchsize = n_batchsize\n self.n_epoch = n_epoch\n self.learn_ratio = learn_ratio\n self.fc_size = fc_size\n self.weight_decay = weight_decay\n self.n_epoch_saved = n_epoch_saved\n self.loss_name = loss_name\n self.aggregate = aggregate\n self.optimizer_name = optimizer_name\n self.use_gpu = use_gpu\n self.gpu_ids = gpu_ids\n self._args_check()\n \n def _train_model(self, train_loader):\n \n \"\"\"\n Parameters\n\n ----------\n\n train_loader : dataloader of train data\n \n Combines a dataset and a sampler, and provides single- or multi-process iterators over the dataset.\n\n refer to torch.utils.data.dataloader\n\n \"\"\"\n\n loss_v = []\n self.predictor.train()\n for batch_idx, databatch in enumerate(train_loader):\n inputs = databatch['X']\n cur_M = databatch['cur_M']\n targets = databatch['Y']\n inputs = Variable(inputs).float().to(self.device)\n cur_M = Variable(cur_M).float().to(self.device)\n targets = Variable(targets).float().to(self.device)\n outputs = self.predictor({'X': inputs, 'cur_M': cur_M})\n loss = self.criterion({'hat_y': outputs, 'y': targets})\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n loss_v.append(loss.cpu().data.numpy())\n self.acc['train'].append(np.mean(np.array(loss_v)))\n\n def _valid_model(self, valid_loader):\n \"\"\"\n Parameters\n\n ----------\n\n valid_loader : dataloader of valid data\n \n Combines a dataset and a sampler, and provides single- or multi-process iterators over the dataset.\n\n refer to torch.utils.data.dataloader\n\n \"\"\"\n\n loss_v = []\n for batch_idx, databatch in enumerate(valid_loader):\n inputs = databatch['X']\n cur_M = databatch['cur_M']\n targets = databatch['Y']\n inputs = Variable(inputs).float().to(self.device)\n cur_M = Variable(cur_M).float().to(self.device)\n targets = Variable(targets).float().to(self.device)\n outputs = self.predictor({'X': inputs, 'cur_M': cur_M})\n loss = self.criterion({'hat_y': outputs, 'y': targets})\n loss_v.append(loss.cpu().data.numpy())\n self.acc['valid'].append(np.mean(np.array(loss_v)))\n\n def _test_model(self, test_loader):\n \"\"\"\n Parameters\n\n ----------\n\n test_loader : dataloader of test data\n \n Combines a dataset and a sampler, and provides single- or multi-process iterators over the dataset.\n\n refer to torch.utils.data.dataloader\n\n \"\"\"\n\n # switch to train mode\n self.predictor.eval()\n pre_v = []\n prob_v = []\n real_v = []\n for batch_idx, databatch in enumerate(test_loader):\n inputs = databatch['X']\n cur_M = databatch['cur_M']\n targets = databatch['Y']\n inputs = Variable(inputs).float().to(self.device)\n cur_M = Variable(cur_M).float().to(self.device)\n targets = Variable(targets).float().to(self.device)\n outputs = self.predictor({'X': inputs, 'cur_M': cur_M})\n\n if self.task_type in ['multiclass']:\n prob_h = F.softmax(outputs, dim = -1)\n else:\n prob_h = F.sigmoid(outputs)\n pre_v.append(outputs.cpu().detach().numpy())\n prob_v.append(prob_h.cpu().detach().numpy())\n real_v.append(targets.cpu().detach().numpy())\n pickle.dump(np.concatenate(pre_v, 0), open(os.path.join(self.result_dir, 'hat_ori_y.'+self._loaded_epoch),'wb'))\n pickle.dump(np.concatenate(prob_v, 0), open(os.path.join(self.result_dir, 'hat_y.'+self._loaded_epoch),'wb'))\n pickle.dump(np.concatenate(real_v, 0), open(os.path.join(self.result_dir, 'y.'+self._loaded_epoch),'wb'))\n\n def _build_model(self):\n \"\"\"\n \n Build the crucial components for model training \n \n \n \"\"\"\n if self.is_loadmodel is False: \n _config = {\n 'in_channel': self.n_channel,\n 'n_visit': self.n_visit,\n 'n_fc': self.fc_size,\n 'label_size': self.label_size\n }\n self.predictor = callPredictor(**_config).to(self.device)\n self._save_predictor_config(_config)\n \n if self.dataparallal:\n self.predictor= torch.nn.DataParallel(self.predictor)\n self.criterion = callLoss(task = self.task_type,\n loss_name = self.loss_name,\n aggregate = self.aggregate)\n self.optimizer = self._get_optimizer(self.optimizer_name)\n\n def _get_reader(self, data, dtype = 'train'):\n \"\"\"\n Parameters\n\n ----------\n\n data : {\n 'x':list[episode_file_path], \n 'y':list[label], \n 'l':list[seq_len], \n 'feat_n': n of feature space, \n 'label_n': n of label space\n }\n\n The input samples dict.\n \n dtype: str, (default='train')\n \n dtype in ['train','valid','test'], different type imapct whether use shuffle for data\n \n Return\n \n ----------\n \n data_loader : dataloader of input data dict\n \n Combines a dataset and a sampler, and provides single- or multi-process iterators over the dataset.\n\n refer to torch.utils.data.dataloader\n \n \"\"\"\n _dataset = rcrnet_reader.DatasetReader(data) \n self.n_channel = _dataset.n_channel\n self.n_visit = _dataset.max_n_visits\n _loader = torch.utils.data.DataLoader(_dataset,\n batch_size=self.n_batchsize,\n drop_last = True,\n shuffle=True if dtype == 'train' else False)\n return _loader\n\n\n def fit(self, train_data, valid_data, assign_task_type = None):\n \n \"\"\"\n Parameters\n\n ----------\n\n train_data : {\n 'x':list[episode_file_path], \n 'y':list[label], \n 'l':list[seq_len], \n 'feat_n': n of feature space, \n 'label_n': n of label space\n }\n\n The input train samples dict.\n \n valid_data : {\n 'x':list[episode_file_path], \n 'y':list[label], \n 'l':list[seq_len], \n 'feat_n': n of feature space, \n 'label_n': n of label space\n }\n\n The input valid samples dict.\n\n assign_task_type: str (default = None)\n predifine task type to model mapping <feature, label>\n current support ['binary','multiclass','multilabel','regression']\n\n Returns\n\n -------\n\n self : object\n\n Fitted estimator.\n\n \"\"\"\n self.task_type = assign_task_type\n self._data_check([train_data, valid_data])\n train_reader = self._get_reader(train_data, 'train')\n valid_reader = self._get_reader(valid_data, 'valid')\n self._build_model()\n self._fit_model(train_reader, valid_reader)\n \n def load_model(self, \n loaded_epoch = '',\n config_file_path = '',\n model_file_path = ''):\n \"\"\"\n Parameters\n\n ----------\n\n loaded_epoch : str, loaded model name \n \n we save the model by <epoch_count>.epoch, latest.epoch, best.epoch\n\n Returns\n\n -------\n\n self : object\n\n loaded estimator.\n\n \"\"\"\n\n predictor_config = self._load_predictor_config(config_file_path)\n self.predictor = callPredictor(**predictor_config).to(self.device)\n self._load_model(loaded_epoch, model_file_path)\n \n\n def _args_check(self):\n \"\"\"\n \n Check args whether valid/not and give tips\n \n \n \"\"\"\n assert isinstance(self.n_batchsize,int) and self.n_batchsize>0, \\\n 'fill in correct n_batchsize (int, >0)'\n assert isinstance(self.n_epoch,int) and self.n_epoch>0, \\\n 'fill in correct n_epoch (int, >0)'\n assert isinstance(self.learn_ratio,float) and self.learn_ratio>0., \\\n 'fill in correct learn_ratio (float, >0.)'\n assert isinstance(self.weight_decay,float) and self.weight_decay>=0., \\\n 'fill in correct weight_decay (float, >=0.)'\n assert isinstance(self.fc_size,int) and self.fc_size>0, \\\n 'fill in correct fc_size (int, >0)'\n assert isinstance(self.n_epoch_saved,int) and self.n_epoch_saved>0 and self.n_epoch_saved < self.n_epoch, \\\n 'fill in correct n_epoch (int, >0 and <{0}).format(self.n_epoch)'\n assert isinstance(self.aggregate,str) and self.aggregate in ['sum','avg'], \\\n 'fill in correct aggregate (str, [\\'sum\\',\\'avg\\'])'\n assert isinstance(self.optimizer_name,str) and self.optimizer_name in ['adam'], \\\n 'fill in correct optimizer_name (str, [\\'adam\\'])'\n assert isinstance(self.use_gpu,bool), \\\n 'fill in correct use_gpu (bool)'\n assert isinstance(self.loss_name,str), \\\n 'fill in correct optimizer_name (str)'\n self.device = self._get_device()\n\n",
"# -*- coding: utf-8 -*-\n\n# Author: Zhi Qiao <[email protected]>\n\n# License: BSD 2 clause\n\nimport os\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\n\nfrom torch.nn import L1Loss\nfrom torch.nn import MSELoss\nfrom torch.nn import NLLLoss\nfrom torch.nn import BCELoss\n\nfrom torch.nn import Sigmoid\nfrom torch.nn import LogSigmoid\nfrom torch.nn import Softmax\nfrom torch.nn import LogSoftmax\n\nclass md_NNLoss(nn.Module):\n \n def __init__(self,\n reduction = 'sum'):\n super(md_NNLoss, self).__init__()\n self.reduction = reduction\n\n def forward(self, neg_log_hat_y, y):\n if self.reduction == 'sum':\n return -1 * torch.sum(neg_log_hat_y*y)\n elif self.reduction == 'mean':\n return -1 * torch.mean(neg_log_hat_y*y)\n\nloss_dict = {\n 'multilabel': {\n 'L1LossSigmoid': {'activate': Sigmoid, 'lossfunc': L1Loss},\n 'L1LossSoftmax': {'activate': Softmax, 'lossfunc': L1Loss},\n 'MSELossSigmoid': {'activate': Sigmoid, 'lossfunc': MSELoss},\n 'MSELossSoftmax': {'activate': Softmax, 'lossfunc': MSELoss},\n 'CELossSigmoid': {'activate': LogSigmoid, 'lossfunc': md_NNLoss},\n 'CELossSoftmax': {'activate': LogSoftmax, 'lossfunc': md_NNLoss}\n },\n 'binaryclass': {\n 'L1LossSigmoid': {'activate': Sigmoid, 'lossfunc': L1Loss},\n 'MSELossSigmoid': {'activate': Sigmoid, 'lossfunc': MSELoss},\n 'BCELossSigmoid': {'activate': Sigmoid, 'lossfunc': BCELoss}\n }\n}\n\nclass callLoss(nn.Module):\n \n def __init__(self,\n task = 'multilabel',\n loss_name = 'L1LossSigmoid',\n target_repl = False,\n target_repl_coef = 0,\n aggregate = 'sum'):\n super(callLoss, self).__init__()\n self.loss_fn = loss_dict[task][loss_name]['lossfunc'](reduction = aggregate)\n if 'Softmax' in loss_name:\n self.activate_fn = loss_dict[task][loss_name]['activate'](dim=-1)\n else:\n self.activate_fn = loss_dict[task][loss_name]['activate']()\n self.target_repl = target_repl \n self.target_repl_coef = target_repl_coef\n\n def forward(self, data):\n \"\"\"\n \n Parameters\n \n ----------\n data = {\n 'hat_y': shape (batchsize, n_label)\n \n 'y': shape (batchsize, n_label)\n \n 'mask': [optional] shape (batchsize, n_timestep)\n when target_repl is True\n \n 'all_hat_y': [optional] shape (batchsize, n_timestep, n_label) \n when target_repl is True\n }\n \n \n \"\"\"\n \n hat_y, y = data['hat_y'], data['y']\n n_sample, n_label = y.size()\n y = y.view(-1, n_label)\n hat_y = hat_y.view(-1, n_label)\n hat_y = self.activate_fn(hat_y)\n single_loss = self.loss_fn(hat_y, y)\n if self.target_repl:\n all_hat_y, mask = data['all_hat_y'], data['mask']\n _, n_timestep = mask.size()\n mask_flag = mask.unsqueeze(-1)\n all_hat_y = self.activate_fn(all_hat_y) * mask_flag\n all_hat_y = all_hat_y.view(-1, n_label)\n all_y = y.unsqueeze(1).repeat(1,n_timestep,1) * mask_flag\n all_y = all_y.view(-1, n_label)\n all_loss = self.loss_fn(all_hat_y, all_y)\n loss = (1-self.target_repl_coef) * single_loss + self.target_repl_coef * all_loss\n else:\n loss = single_loss\n return loss\n\n"
] |
[
[
"torch.nn.BatchNorm1d",
"torch.nn.functional.softmax",
"torch.nn.LSTM",
"torch.nn.ReLU",
"torch.utils.data.DataLoader",
"numpy.concatenate",
"torch.nn.Linear",
"torch.nn.functional.sigmoid",
"torch.nn.Conv1d",
"torch.nn.MaxPool1d",
"torch.flatten",
"torch.nn.AdaptiveAvgPool1d",
"torch.nn.DataParallel",
"numpy.array",
"torch.autograd.Variable"
],
[
"torch.mean",
"torch.sum"
]
] |
nulinspiratie/Qcodes
|
[
"d050d38ac83f532523a39549c3247dfa6096a36e"
] |
[
"qcodes/instrument_drivers/AlazarTech/ATS.py"
] |
[
"import socket\nimport ctypes\nfrom ctypes import wintypes as wt\nimport logging\nimport numpy as np\nimport os\n\nfrom qcodes.instrument.base import Instrument\nfrom qcodes.utils import validators\nfrom qcodes.instrument.parameter import Parameter, MultiParameter\n\n# TODO(damazter) (C) logging\n\n# these items are important for generalizing this code to multiple alazar cards\n# TODO(damazter) (W) remove 8 bits per sample requirement\n# TODO(damazter) (W) some alazar cards have a different number of channels :(\n# this driver only works with 2-channel cards\n\n# TODO(damazter) (S) tests to do:\n# acquisition that would overflow the board if measurement is not stopped\n# quickly enough. can this be solved by not reposting the buffers?\n\nlogger = logging.getLogger(__name__)\n\nclass AlazarTech_ATS(Instrument):\n \"\"\"\n This is the qcodes driver for Alazar data acquisition cards\n\n status: beta-version\n\n this driver is written with the ATS9870 in mind\n updates might/will be necessary for other versions of Alazar cards\n\n Args:\n\n name: name for this instrument, passed to the base instrument\n system_id: target system id for this instrument\n board_id: target board id within the system for this instrument\n dll_path: string contianing the path of the ATS driver dll\n\n \"\"\"\n # override dll_path in your init script or in the board constructor\n # if you have it somewhere else\n dll_path = 'C:\\\\WINDOWS\\\\System32\\\\ATSApi'\n\n # override channels in a subclass if needed\n channels = 2\n\n _success = 512\n\n _error_codes = {\n 513: 'ApiFailed',\n 514: 'ApiAccessDenied',\n 515: 'ApiDmaChannelUnavailable',\n 516: 'ApiDmaChannelInvalid',\n 517: 'ApiDmaChannelTypeError',\n 518: 'ApiDmaInProgress',\n 519: 'ApiDmaDone',\n 520: 'ApiDmaPaused',\n 521: 'ApiDmaNotPaused',\n 522: 'ApiDmaCommandInvalid',\n 523: 'ApiDmaManReady',\n 524: 'ApiDmaManNotReady',\n 525: 'ApiDmaInvalidChannelPriority',\n 526: 'ApiDmaManCorrupted',\n 527: 'ApiDmaInvalidElementIndex',\n 528: 'ApiDmaNoMoreElements',\n 529: 'ApiDmaSglInvalid',\n 530: 'ApiDmaSglQueueFull',\n 531: 'ApiNullParam',\n 532: 'ApiInvalidBusIndex',\n 533: 'ApiUnsupportedFunction',\n 534: 'ApiInvalidPciSpace',\n 535: 'ApiInvalidIopSpace',\n 536: 'ApiInvalidSize',\n 537: 'ApiInvalidAddress',\n 538: 'ApiInvalidAccessType',\n 539: 'ApiInvalidIndex',\n 540: 'ApiMuNotReady',\n 541: 'ApiMuFifoEmpty',\n 542: 'ApiMuFifoFull',\n 543: 'ApiInvalidRegister',\n 544: 'ApiDoorbellClearFailed',\n 545: 'ApiInvalidUserPin',\n 546: 'ApiInvalidUserState',\n 547: 'ApiEepromNotPresent',\n 548: 'ApiEepromTypeNotSupported',\n 549: 'ApiEepromBlank',\n 550: 'ApiConfigAccessFailed',\n 551: 'ApiInvalidDeviceInfo',\n 552: 'ApiNoActiveDriver',\n 553: 'ApiInsufficientResources',\n 554: 'ApiObjectAlreadyAllocated',\n 555: 'ApiAlreadyInitialized',\n 556: 'ApiNotInitialized',\n 557: 'ApiBadConfigRegEndianMode',\n 558: 'ApiInvalidPowerState',\n 559: 'ApiPowerDown',\n 560: 'ApiFlybyNotSupported',\n 561: 'ApiNotSupportThisChannel',\n 562: 'ApiNoAction',\n 563: 'ApiHSNotSupported',\n 564: 'ApiVPDNotSupported',\n 565: 'ApiVpdNotEnabled',\n 566: 'ApiNoMoreCap',\n 567: 'ApiInvalidOffset',\n 568: 'ApiBadPinDirection',\n 569: 'ApiPciTimeout',\n 570: 'ApiDmaChannelClosed',\n 571: 'ApiDmaChannelError',\n 572: 'ApiInvalidHandle',\n 573: 'ApiBufferNotReady',\n 574: 'ApiInvalidData',\n 575: 'ApiDoNothing',\n 576: 'ApiDmaSglBuildFailed',\n 577: 'ApiPMNotSupported',\n 578: 'ApiInvalidDriverVersion',\n 579: ('ApiWaitTimeout: operation did not finish during '\n 'timeout interval. Check your trigger.'),\n 580: 'ApiWaitCanceled',\n 581: 'ApiBufferTooSmall',\n 582: ('ApiBufferOverflow:rate of acquiring data > rate of '\n 'transferring data to local memory. Try reducing sample rate, '\n 'reducing number of enabled channels, increasing size of each '\n 'DMA buffer or increase number of DMA buffers.'),\n 583: 'ApiInvalidBuffer',\n 584: 'ApiInvalidRecordsPerBuffer',\n 585: ('ApiDmaPending:Async I/O operation was successfully started, '\n 'it will be completed when sufficient trigger events are '\n 'supplied to fill the buffer.'),\n 586: ('ApiLockAndProbePagesFailed:Driver or operating system was '\n 'unable to prepare the specified buffer for DMA transfer. '\n 'Try reducing buffer size or total number of buffers.'),\n 587: 'ApiWaitAbandoned',\n 588: 'ApiWaitFailed',\n 589: ('ApiTransferComplete:This buffer is last in the current '\n 'acquisition.'),\n 590: 'ApiPllNotLocked:hardware error, contact AlazarTech',\n 591: ('ApiNotSupportedInDualChannelMode:Requested number of samples '\n 'per channel is too large to fit in on-board memory. Try '\n 'reducing number of samples per channel, or switch to '\n 'single channel mode.')\n }\n\n _board_names = {\n 1: 'ATS850',\n 2: 'ATS310',\n 3: 'ATS330',\n 4: 'ATS855',\n 5: 'ATS315',\n 6: 'ATS335',\n 7: 'ATS460',\n 8: 'ATS860',\n 9: 'ATS660',\n 10: 'ATS665',\n 11: 'ATS9462',\n 12: 'ATS9434',\n 13: 'ATS9870',\n 14: 'ATS9350',\n 15: 'ATS9325',\n 16: 'ATS9440',\n 17: 'ATS9410',\n 18: 'ATS9351',\n 19: 'ATS9310',\n 20: 'ATS9461',\n 21: 'ATS9850',\n 22: 'ATS9625',\n 23: 'ATG6500',\n 24: 'ATS9626',\n 25: 'ATS9360',\n 26: 'AXI9870',\n 27: 'ATS9370',\n 28: 'ATU7825',\n 29: 'ATS9373',\n 30: 'ATS9416'\n }\n\n @classmethod\n def find_boards(cls, dll_path=None):\n \"\"\"\n Find Alazar boards connected\n\n Args:\n dll_path: (string) path of the Alazar driver dll\n\n Returns:\n list: list of board info for each connected board\n \"\"\"\n dll = ctypes.cdll.LoadLibrary(dll_path or cls.dll_path)\n\n system_count = dll.AlazarNumOfSystems()\n boards = []\n for system_id in range(1, system_count + 1):\n board_count = dll.AlazarBoardsInSystemBySystemID(system_id)\n for board_id in range(1, board_count + 1):\n boards.append(cls.get_board_info(dll, system_id, board_id))\n return boards\n\n @classmethod\n def get_board_info(cls, dll, system_id, board_id):\n \"\"\"\n Get the information from a connected Alazar board\n\n Args:\n dll (string): path of the Alazar driver dll\n system_id: id of the Alazar system\n board_id: id of the board within the alazar system\n\n Return:\n\n Dictionary containing\n\n - system_id\n - board_id\n - board_kind (as string)\n - max_samples\n - bits_per_sample\n \"\"\"\n\n # make a temporary instrument for this board, to make it easier\n # to get its info\n board = cls('temp', system_id=system_id, board_id=board_id,\n server_name=None)\n handle = board._handle\n board_kind = cls._board_names[dll.AlazarGetBoardKind(handle)]\n\n max_s, bps = board._get_channel_info(handle)\n return {\n 'system_id': system_id,\n 'board_id': board_id,\n 'board_kind': board_kind,\n 'max_samples': max_s,\n 'bits_per_sample': bps\n }\n\n def __init__(self, name, system_id=1, board_id=1, dll_path=None, **kwargs):\n super().__init__(name, **kwargs)\n self._ATS_dll = ctypes.cdll.LoadLibrary(dll_path or self.dll_path)\n\n self._handle = self._ATS_dll.AlazarGetBoardBySystemID(system_id,\n board_id)\n if not self._handle:\n raise Exception('AlazarTech_ATS not found at '\n 'system {}, board {}'.format(system_id, board_id))\n\n self.buffer_list = []\n\n # Some ATS models do not support a bwlimit. This flag defines if the\n # ATS supports a bwlimit or not. True by default.\n self._bwlimit_support = True\n\n # get channel info\n max_s, bps = self._get_channel_info(self._handle)\n self.add_parameter(name='bits_per_sample',\n set_cmd=None,\n initial_value=bps)\n self.add_parameter(name='bytes_per_sample',\n set_cmd=None,\n initial_value=int((bps + 7)//8))\n self.add_parameter(name='maximum_samples',\n set_cmd=None,\n initial_value=max_s)\n\n # Buffers can be pre-allocated using ATS.preallocate_buffers.\n # See docstring for details\n self._preallocated_buffers = []\n\n def preallocate_buffers(self, num_buffers: int, samples_per_buffer: int):\n \"\"\"Pre-allocate buffers for acquisition\n\n This method is especially useful when using 64-bit Python.\n In this case, the buffer memory address can exceed 32 bits, which\n causes an error because the ATS cannot handle such memory addresses.\n This issue appears more frequently for long acquisitions.\n\n If the buffers are pre-allocated at the start of a Python session,\n there is a much higher chance that a memory address is available below\n 32 bits (the lowest available memory address is chosen).\n\n Args:\n num_buffers: Number of buffers to pre-allocate.\n An error will be raised in an acquisition if the number of\n required buffers does not match the number of pre-allocated buffers.\n samples_per_buffer: Samples per buffer for each channel.\n An error will be raised in an acquisition if the required\n samples_per_buffer exceeds the value given here.\n This value should therefore be chosen well above the expected\n maximum number of samples per buffer.\n\n Returns:\n Pre-allocated buffer list\n \"\"\"\n assert all(not buffer._allocated for buffer in self._preallocated_buffers)\n\n self._preallocated_buffers = [\n Buffer(\n bits_per_sample=self.bits_per_sample(),\n samples_per_buffer=int(samples_per_buffer),\n number_of_channels=len(self.channels)\n )\n for _ in range(num_buffers)\n ]\n\n def get_idn(self):\n \"\"\"\n This methods gets the most relevant information of this instrument\n\n Returns:\n\n Dictionary containing\n\n - 'firmware': None\n - 'model': as string\n - 'serial': board serial number\n - 'vendor': 'AlazarTech',\n - 'CPLD_version': version of the CPLD\n - 'driver_version': version of the driver dll\n - 'SDK_version': version of the SDK\n - 'latest_cal_date': date of the latest calibration (as string)\n - 'memory_size': size of the memory in samples,\n - 'asopc_type': type of asopc (as decimal number),\n - 'pcie_link_speed': the speed of a single pcie link (in GB/s),\n - 'pcie_link_width': number of pcie links\n \"\"\"\n board_kind = self._board_names[\n self._ATS_dll.AlazarGetBoardKind(self._handle)]\n\n major = np.array([0], dtype=np.uint8)\n minor = np.array([0], dtype=np.uint8)\n revision = np.array([0], dtype=np.uint8)\n self._call_dll('AlazarGetCPLDVersion',\n self._handle,\n major.ctypes.data,\n minor.ctypes.data)\n cpld_ver = str(major[0])+\".\"+str(minor[0])\n\n # Use error_check=False, because in some cases the driver version\n # cannot be obtained.\n self._call_dll('AlazarGetDriverVersion',\n major.ctypes.data,\n minor.ctypes.data,\n revision.ctypes.data, error_check=False)\n driver_ver = str(major[0])+\".\"+str(minor[0])+\".\"+str(revision[0])\n\n self._call_dll('AlazarGetSDKVersion',\n major.ctypes.data,\n minor.ctypes.data,\n revision.ctypes.data)\n sdk_ver = str(major[0])+\".\"+str(minor[0])+\".\"+str(revision[0])\n\n value = np.array([0], dtype=np.uint32)\n self._call_dll('AlazarQueryCapability',\n self._handle, 0x10000024, 0, value.ctypes.data)\n serial = str(value[0])\n self._call_dll('AlazarQueryCapability',\n self._handle, 0x10000026, 0, value.ctypes.data)\n latest_cal_date = (str(value[0])[0:2] + \"-\" +\n str(value[0])[2:4] + \"-\" +\n str(value[0])[4:6])\n\n self._call_dll('AlazarQueryCapability',\n self._handle, 0x1000002A, 0, value.ctypes.data)\n memory_size = str(value[0])\n self._call_dll('AlazarQueryCapability',\n self._handle, 0x1000002C, 0, value.ctypes.data)\n asopc_type = str(value[0])\n\n # see the ATS-SDK programmer's guide\n # about the encoding of the link speed\n self._call_dll('AlazarQueryCapability',\n self._handle, 0x10000030, 0, value.ctypes.data)\n pcie_link_speed = str(value[0]*2.5/10)+\"GB/s\"\n self._call_dll('AlazarQueryCapability',\n self._handle, 0x10000031, 0, value.ctypes.data)\n pcie_link_width = str(value[0])\n\n\n return {'firmware': None,\n 'model': board_kind,\n 'serial': serial,\n 'vendor': 'AlazarTech',\n 'CPLD_version': cpld_ver,\n 'driver_version': driver_ver,\n 'SDK_version': sdk_ver,\n 'latest_cal_date': latest_cal_date,\n 'memory_size': memory_size,\n 'asopc_type': asopc_type,\n 'pcie_link_speed': pcie_link_speed,\n 'pcie_link_width': pcie_link_width}\n\n def config(self, clock_source=None, sample_rate=None, clock_edge=None,\n decimation=None, coupling=None, channel_range=None,\n impedance=None, bwlimit=None, trigger_operation=None,\n trigger_engine1=None, trigger_source1=None,\n trigger_slope1=None, trigger_level1=None,\n trigger_engine2=None, trigger_source2=None,\n trigger_slope2=None, trigger_level2=None,\n external_trigger_coupling=None, external_trigger_range=None,\n trigger_delay=None, timeout_ticks=None):\n \"\"\"\n configure the ATS board and set the corresponding parameters to the\n appropriate values.\n For documentation of the parameters, see ATS-SDK programmer's guide\n\n Args:\n clock_source:\n sample_rate:\n clock_edge:\n decimation:\n coupling:\n channel_range:\n impedance:\n bwlimit:\n trigger_operation:\n trigger_engine1:\n trigger_source1:\n trigger_slope1:\n trigger_level1:\n trigger_engine2:\n trigger_source2:\n trigger_slope2:\n trigger_level2:\n external_trigger_coupling:\n external_trigger_range:\n trigger_delay:\n timeout_ticks:\n\n Returns:\n None\n \"\"\"\n # region set parameters from args\n\n self._set_if_present('clock_source', clock_source)\n self._set_if_present('sample_rate', sample_rate)\n self._set_if_present('clock_edge', clock_edge)\n self._set_if_present('decimation', decimation)\n\n self._set_list_if_present('coupling', coupling)\n self._set_list_if_present('channel_range', channel_range)\n self._set_list_if_present('impedance', impedance)\n self._set_list_if_present('bwlimit', bwlimit)\n\n self._set_if_present('trigger_operation', trigger_operation)\n self._set_if_present('trigger_engine1', trigger_engine1)\n self._set_if_present('trigger_source1', trigger_source1)\n self._set_if_present('trigger_slope1', trigger_slope1)\n self._set_if_present('trigger_level1', trigger_level1)\n\n self._set_if_present('trigger_engine2', trigger_engine2)\n self._set_if_present('trigger_source2', trigger_source2)\n self._set_if_present('trigger_slope2', trigger_slope2)\n self._set_if_present('trigger_level2', trigger_level2)\n\n self._set_if_present('external_trigger_coupling',\n external_trigger_coupling)\n self._set_if_present('external_trigger_range',\n external_trigger_range)\n self._set_if_present('trigger_delay', trigger_delay)\n self._set_if_present('timeout_ticks', timeout_ticks)\n # endregion\n\n self._call_dll('AlazarSetCaptureClock',\n self._handle, self.clock_source, self.sample_rate,\n self.clock_edge, self.decimation)\n\n for i, ch in enumerate(self.channels):\n self._call_dll('AlazarInputControl',\n self._handle, 2**i, # Channel in binary format\n self.parameters['coupling' + ch],\n self.parameters['channel_range' + ch],\n self.parameters['impedance' + ch])\n if self._bwlimit_support:\n self._call_dll('AlazarSetBWLimit',\n self._handle, i - 1,\n self.parameters['bwlimit' + ch])\n\n self._call_dll('AlazarSetTriggerOperation',\n self._handle, self.trigger_operation,\n self.trigger_engine1, self.trigger_source1,\n self.trigger_slope1, self.trigger_level1,\n self.trigger_engine2, self.trigger_source2,\n self.trigger_slope2, self.trigger_level2)\n\n self._call_dll('AlazarSetExternalTrigger',\n self._handle, self.external_trigger_coupling,\n self.external_trigger_range)\n\n self._call_dll('AlazarSetTriggerDelay',\n self._handle, self.trigger_delay)\n\n self._call_dll('AlazarSetTriggerTimeOut',\n self._handle, self.timeout_ticks)\n\n # TODO(damazter) (W) config AUXIO\n\n def _get_channel_info(self, handle):\n bps = np.array([0], dtype=np.uint8) # bps bits per sample\n max_s = np.array([0], dtype=np.uint32) # max_s memory size in samples\n self._call_dll('AlazarGetChannelInfo',\n handle, max_s.ctypes.data, bps.ctypes.data)\n return max_s[0], bps[0]\n\n def acquire(self, mode=None, samples_per_record=None,\n records_per_buffer=None, buffers_per_acquisition=None,\n channel_selection=None, transfer_offset=None,\n external_startcapture=None, enable_record_headers=None,\n alloc_buffers=None, fifo_only_streaming=None,\n interleave_samples=None, get_processed_data=None,\n allocated_buffers=None, buffer_timeout=None,\n acquisition_controller=None):\n \"\"\"\n perform a single acquisition with the Alazar board, and set certain\n parameters to the appropriate values\n for the parameters, see the ATS-SDK programmer's guide\n\n Args:\n mode:\n samples_per_record:\n records_per_buffer:\n buffers_per_acquisition:\n channel_selection:\n transfer_offset:\n external_startcapture:\n enable_record_headers:\n alloc_buffers:\n fifo_only_streaming:\n interleave_samples:\n get_processed_data:\n allocated_buffers:\n buffer_timeout:\n acquisition_controller: An instance of an acquisition controller\n that handles the dataflow of an acquisition\n\n Returns:\n Whatever is given by acquisition_controller.post_acquire method\n \"\"\"\n # region set parameters from args\n self._set_if_present('mode', mode)\n self._set_if_present('samples_per_record', samples_per_record)\n self._set_if_present('records_per_buffer', records_per_buffer)\n self._set_if_present('buffers_per_acquisition',\n buffers_per_acquisition)\n self._set_if_present('channel_selection', channel_selection)\n self._set_if_present('transfer_offset', transfer_offset)\n self._set_if_present('external_startcapture', external_startcapture)\n self._set_if_present('enable_record_headers', enable_record_headers)\n self._set_if_present('alloc_buffers', alloc_buffers)\n self._set_if_present('fifo_only_streaming', fifo_only_streaming)\n self._set_if_present('interleave_samples', interleave_samples)\n self._set_if_present('get_processed_data', get_processed_data)\n self._set_if_present('allocated_buffers', allocated_buffers)\n self._set_if_present('buffer_timeout', buffer_timeout)\n\n # endregion\n self.mode._set_updated()\n mode = self.mode.get()\n if mode not in ('TS', 'NPT', 'CS'):\n raise Exception(\"Only the 'TS', 'CS', 'NPT' modes are implemented \"\n \"at this point\")\n\n # -----set final configurations-----\n\n # Abort any previous measurement\n self._call_dll('AlazarAbortAsyncRead', self._handle)\n\n # Set record size for NPT mode\n if mode in ['CS', 'NPT']:\n pretriggersize = 0 # pretriggersize is 0 for NPT and CS always\n post_trigger_size = self.samples_per_record._get_byte()\n self._call_dll('AlazarSetRecordSize',\n self._handle, pretriggersize,\n post_trigger_size)\n\n number_of_channels = len(self.channel_selection.get_latest())\n samples_per_buffer = 0\n buffers_per_acquisition = self.buffers_per_acquisition._get_byte()\n samples_per_record = self.samples_per_record._get_byte()\n acquire_flags = (self.mode._get_byte() |\n self.external_startcapture._get_byte() |\n self.enable_record_headers._get_byte() |\n self.alloc_buffers._get_byte() |\n self.fifo_only_streaming._get_byte() |\n self.interleave_samples._get_byte() |\n self.get_processed_data._get_byte())\n\n # set acquisition parameters here for NPT, TS, CS mode\n if mode == 'NPT':\n records_per_buffer = self.records_per_buffer._get_byte()\n records_per_acquisition = (\n records_per_buffer * buffers_per_acquisition)\n samples_per_buffer = samples_per_record * records_per_buffer\n\n self._call_dll('AlazarBeforeAsyncRead',\n self._handle, self.channel_selection,\n self.transfer_offset, samples_per_record,\n records_per_buffer, records_per_acquisition,\n acquire_flags)\n\n elif mode == 'TS':\n if (samples_per_record % buffers_per_acquisition != 0):\n logging.warning('buffers_per_acquisition is not a divisor of '\n 'samples per record which it should be in '\n 'TS mode, rounding down in samples per buffer '\n 'calculation')\n samples_per_buffer = int(samples_per_record /\n buffers_per_acquisition)\n if self.records_per_buffer._get_byte() != 1:\n logging.warning('records_per_buffer should be 1 in TS mode, '\n 'defauling to 1')\n self.records_per_buffer._set(1)\n records_per_buffer = self.records_per_buffer._get_byte()\n\n self._call_dll('AlazarBeforeAsyncRead',\n self._handle, self.channel_selection,\n self.transfer_offset, samples_per_buffer,\n self.records_per_buffer, buffers_per_acquisition,\n acquire_flags)\n\n elif mode == 'CS':\n if self.records_per_buffer._get_byte() != 1:\n logging.warning('records_per_buffer should be 1 in TS mode, '\n 'defauling to 1')\n self.records_per_buffer._set(1)\n\n samples_per_buffer = samples_per_record\n\n self._call_dll('AlazarBeforeAsyncRead',\n self._handle, self.channel_selection,\n self.transfer_offset, samples_per_buffer,\n self.records_per_buffer, buffers_per_acquisition,\n acquire_flags)\n\n self.samples_per_record._set_updated()\n self.records_per_buffer._set_updated()\n self.buffers_per_acquisition._set_updated()\n self.channel_selection._set_updated()\n self.transfer_offset._set_updated()\n self.external_startcapture._set_updated()\n self.enable_record_headers._set_updated()\n self.alloc_buffers._set_updated()\n self.fifo_only_streaming._set_updated()\n self.interleave_samples._set_updated()\n self.get_processed_data._set_updated()\n\n # create buffers for acquisition\n self.clear_buffers(free_memory=(not self._preallocated_buffers))\n\n # make sure that allocated_buffers <= buffers_per_acquisition and\n # buffer acquisition is not in acquire indefinite mode (0x7FFFFFFF)\n if (not self.buffers_per_acquisition._get_byte() == 0x7FFFFFFF) and \\\n (self.allocated_buffers._get_byte() >\n self.buffers_per_acquisition._get_byte()):\n logging.warning(\n \"'allocated_buffers' should be smaller than or equal to\"\n \"'buffers_per_acquisition'. Defaulting 'allocated_buffers' to'\"\n \"\" + str(self.buffers_per_acquisition._get_byte()))\n self.allocated_buffers._set(\n self.buffers_per_acquisition._get_byte())\n\n allocated_buffers = self.allocated_buffers._get_byte()\n\n try:\n if self._preallocated_buffers:\n # Buffers are already pre-allocated\n assert allocated_buffers <= len(self._preallocated_buffers)\n max_samples = self._preallocated_buffers[0].samples_per_buffer\n assert samples_per_buffer <= max_samples\n\n # format the numpy array to a subset of the allocated memory\n for buffer in self._preallocated_buffers[:allocated_buffers]:\n buffer.create_array(samples_per_buffer=samples_per_buffer)\n self.buffer_list.append(buffer)\n else:\n # Create new buffers\n for k in range(allocated_buffers):\n buffer = Buffer(\n self.bits_per_sample(),\n samples_per_buffer,\n number_of_channels\n )\n self.buffer_list.append(buffer)\n except:\n self.clear_buffers(free_memory=(not self._preallocated_buffers))\n raise\n\n # post buffers to Alazar\n for buffer in self.buffer_list:\n self._call_dll('AlazarPostAsyncBuffer',\n self._handle, buffer.addr, buffer.size_bytes)\n self.allocated_buffers._set_updated()\n\n # -----start capture here-----\n acquisition_controller.pre_start_capture()\n # call the startcapture method\n self._call_dll('AlazarStartCapture', self._handle)\n\n acquisition_controller.pre_acquire()\n # buffer handling from acquisition\n buffers_completed = 0\n buffer_timeout = self.buffer_timeout._get_byte()\n self.buffer_timeout._set_updated()\n\n # Recycle buffers either if using continuous streaming mode or if\n # more buffers are needed than the number of allocated buffers\n buffer_recycling = \\\n (self.buffers_per_acquisition._get_byte() == 0x7FFFFFFF) or \\\n (self.buffers_per_acquisition._get_byte() >\n self.allocated_buffers._get_byte())\n\n while acquisition_controller.requires_buffer(buffers_completed):\n buffer = self.buffer_list[buffers_completed % allocated_buffers]\n\n self._call_dll('AlazarWaitAsyncBufferComplete',\n self._handle, buffer.addr, buffer_timeout)\n\n # TODO(damazter) (C) last series of buffers must be handled\n # exceptionally\n # (and I want to test the difference) by changing buffer\n # recycling for the last series of buffers\n\n # if buffers must be recycled, extract data and repost them\n # otherwise continue to next buffer\n if buffer_recycling:\n acquisition_controller.handle_buffer(buffer.buffer)\n self._call_dll('AlazarPostAsyncBuffer',\n self._handle, buffer.addr, buffer.size_bytes)\n buffers_completed += 1\n\n # stop measurement here\n self._call_dll('AlazarAbortAsyncRead', self._handle)\n\n # -----cleanup here-----\n # extract data if not yet done\n if not buffer_recycling:\n for buffer in self.buffer_list:\n acquisition_controller.handle_buffer(buffer.buffer)\n\n # free up memory if not using preallocated buffers\n self.clear_buffers(free_memory=(not self._preallocated_buffers))\n\n # check if all parameters are up to date\n for p in self.parameters.values():\n try:\n p.get()\n except (OSError, ctypes.ArgumentError):\n if p.name == 'IDN':\n pass\n else:\n raise\n\n # return result\n return acquisition_controller.post_acquire()\n\n def triggered(self):\n \"\"\"\n Checks if the ATS has received at least one trigger.\n Returns:\n 1 if there has been a trigger, 0 otherwise\n \"\"\"\n return self._call_dll('AlazarTriggered', self._handle,\n error_check=False)\n\n def get_status(self):\n \"\"\"\n Returns t\n Returns:\n\n \"\"\"\n return self._call_dll('AlazarGetStatus', self._handle,\n error_check=False)\n\n def _set_if_present(self, param_name, value):\n if value is not None:\n self.parameters[param_name]._set(value)\n\n def _set_list_if_present(self, param_base, values):\n if values is not None:\n # Create list of identical values if a single value is given\n if not isinstance(values, list):\n values = [values] * len(self.channels)\n for val, ch in zip(values, self.channels):\n if param_base + ch in self.parameters.keys():\n self.parameters[param_base + ch]._set(val)\n\n def _call_dll(self, func_name, *args, error_check=True):\n \"\"\"\n Execute a dll function `func_name`, passing it the given arguments\n\n For each argument in the list\n - If an arg is a parameter of this instrument, the parameter\n value from `._get_bytes()` is used. If the call succeeds, these\n parameters will be marked as updated using their `._set_updated()`\n method\n - Otherwise the arg is used directly\n \"\"\"\n # create the argument list\n args_out = []\n update_params = []\n for arg in args:\n if isinstance(arg,AlazarParameter):\n args_out.append(arg._get_byte())\n update_params.append(arg)\n else:\n args_out.append(arg)\n\n # run the function\n func = getattr(self._ATS_dll, func_name)\n return_code = func(*args_out)\n\n # check for errors\n if error_check and (return_code not in [self._success, 518]):\n # TODO(damazter) (C) log error\n\n argrepr = repr(args_out)\n if len(argrepr) > 100:\n argrepr = argrepr[:96] + '...]'\n\n if return_code not in self._error_codes:\n raise RuntimeError(\n 'unknown error {} from function {} with args: {}'.format(\n return_code, func_name, argrepr))\n raise RuntimeError(\n 'error {}: {} from function {} with args: {}'.format(\n return_code, self._error_codes[return_code], func_name,\n argrepr))\n elif not error_check:\n return return_code\n\n # mark parameters updated (only after we've checked for errors)\n for param in update_params:\n param._set_updated()\n\n def clear_buffers(self, free_memory=True):\n \"\"\"\n This method uncommits all buffers that were committed by the driver.\n This method only has to be called when the acquistion crashes, otherwise\n the driver will uncommit the buffers itself\n :return: None\n \"\"\"\n if free_memory:\n for b in self.buffer_list:\n b.free_mem()\n self.buffer_list = []\n\n def signal_to_volt(self, channel, signal):\n \"\"\"\n convert a value from a buffer to an actual value in volts based on the\n ranges of the channel\n\n Args:\n channel: number of the channel where the signal value came from\n signal: the value that needs to be converted\n\n Returns:\n the corresponding value in volts\n \"\"\"\n # TODO(damazter) (S) check this\n # TODO(damazter) (M) use byte value if range{channel}\n return (((signal - 127.5) / 127.5) *\n (self.parameters['channel_range' + str(channel)].get()))\n\n def get_sample_rate(self):\n \"\"\"\n Obtain the effective sampling rate of the acquisition\n based on clock speed and decimation\n\n Returns:\n the number of samples (per channel) per second\n \"\"\"\n if self.sample_rate.get() == 'EXTERNAL_CLOCK':\n raise Exception('External clock is used, alazar driver '\n 'could not determine sample speed.')\n\n rate = self.sample_rate.get()\n if rate == '1GHz_REFERENCE_CLOCK':\n rate = 1e9\n\n decimation = self.decimation.get()\n if decimation > 0:\n return rate / decimation\n else:\n return rate\n\n\nclass AlazarParameter(Parameter):\n \"\"\"\n This class represents of many parameters that are relevant for the Alazar\n driver. This parameters only have a private set method, because the values\n are set by the Alazar driver. They do have a get function which return a\n human readable value. Internally the value is stored as an Alazar readable\n value.\n\n These parameters also keep track the up-to-dateness of the value of this\n parameter. If the private set_function is called incorrectly, this parameter\n raises an error when the get_function is called to warn the user that the\n value is out-of-date\n\n Args:\n name: see Parameter class\n label: see Parameter class\n unit: see Parameter class\n instrument: see Parameter class\n value: default value\n byte_to_value_dict: dictionary that maps byte values (readable to the\n alazar) to values that are readable to humans\n vals: see Parameter class, should not be set if byte_to_value_dict is\n provided\n \"\"\"\n def __init__(self, name=None, label=None, unit=None, instrument=None,\n value=None, byte_to_value_dict=None, vals=None,\n **kwargs):\n if vals is None:\n if byte_to_value_dict is None:\n vals = validators.Anything()\n else:\n # TODO(damazter) (S) test this validator\n vals = validators.Enum(*byte_to_value_dict.values())\n\n super().__init__(name=name, label=label, unit=unit, vals=vals,\n instrument=instrument, **kwargs)\n self.instrument = instrument\n self._byte = None\n self._uptodate_flag = False\n\n # TODO(damazter) (M) check this block\n if byte_to_value_dict is None:\n self._byte_to_value_dict = TrivialDictionary()\n self._value_to_byte_dict = TrivialDictionary()\n else:\n self._byte_to_value_dict = byte_to_value_dict\n self._value_to_byte_dict = {\n v: k for k, v in self._byte_to_value_dict.items()}\n\n self._set(value)\n\n def get_raw(self):\n \"\"\"\n This method returns the name of the value set for this parameter\n :return: value\n \"\"\"\n # TODO(damazter) (S) test this exception\n if self._uptodate_flag is False:\n raise Exception('The value of this parameter (' + self.name +\n ') is not up to date with the actual value in '\n 'the instrument.\\n'\n 'Most probable cause is illegal usage of ._set() '\n 'method of this parameter.\\n'\n 'Don\\'t use private methods if you do not know '\n 'what you are doing!')\n return self._byte_to_value_dict[self._byte]\n\n def _get_byte(self):\n \"\"\"\n this method gets the byte representation of the value of the parameter\n :return: byte representation\n \"\"\"\n return self._byte\n\n def _set(self, value):\n \"\"\"\n This method sets the value of this parameter\n This method is private to ensure that all values in the instruments\n are up to date always\n :param value: the new value (e.g. 'NPT', 0.5, ...)\n :return: None\n \"\"\"\n\n # TODO(damazter) (S) test this validation\n self.validate(value)\n self._byte = self._value_to_byte_dict[value]\n self._uptodate_flag = False\n self._save_val(value)\n return None\n\n def _set_updated(self):\n \"\"\"\n This method is used to keep track of which parameters are updated in the\n instrument. If the end-user starts messing with this function, things\n can go wrong.\n\n Do not use this function if you do not know what you are doing\n :return: None\n \"\"\"\n self._uptodate_flag = True\n\n\nclass Buffer:\n \"\"\"\n This class represents a single buffer used for the data acquisition\n\n Args:\n bits_per_sample: the number of bits needed to store a sample\n samples_per_buffer: the number of samples needed per buffer(per channel)\n number_of_channels: the number of channels that will be stored in the\n buffer\n \"\"\"\n logger = False\n\n def __init__(self, bits_per_sample, samples_per_buffer,\n number_of_channels):\n\n if os.name != 'nt':\n raise Exception(\"Buffer: only Windows supported at this moment\")\n\n self.samples_per_buffer = samples_per_buffer\n self.number_of_channels = number_of_channels\n\n self._allocated = True\n\n self.bytes_per_sample = int((bits_per_sample + 7)//8)\n self.np_sample_type = {1: np.uint8, 2: np.uint16}[self.bytes_per_sample]\n\n # try to allocate memory\n mem_commit = 0x1000\n page_readwrite = 0x4\n\n self.size_bytes = self.bytes_per_sample * samples_per_buffer * \\\n number_of_channels\n\n # for documentation please see:\n # https://msdn.microsoft.com/en-us/library/windows/desktop/aa366887(v=vs.85).aspx\n # https://stackoverflow.com/questions/61590363/enforce-virtualalloc-address-less-than-32-bits-on-64-bit-machine\n VirtualAlloc = ctypes.windll.kernel32.VirtualAlloc\n VirtualAlloc.argtypes = [wt.LPVOID, ctypes.c_size_t, wt.DWORD, wt.DWORD]\n VirtualAlloc.restype = wt.LPVOID\n\n self.addr = VirtualAlloc(\n 0,\n ctypes.c_size_t(self.size_bytes), \n mem_commit, \n page_readwrite\n )\n\n # Log buffer information\n if self.logger:\n # Divide address by 32 bits. If larger than 1, this results in a BSOD\n address_bits = None if self.addr is None else round(self.addr/2**32, 3)\n message = (\n f'Created buffer '\n f'addr: {self.addr}, '\n f'addr/2**32: {address_bits}, '\n f'allocated: {self._allocated}, '\n f'bytes_per_sample: {self.bytes_per_sample}, '\n f'sample_type: {self.np_sample_type}, '\n f'size_bytes: {self.size_bytes}, '\n )\n if isinstance(self.logger, socket.socket):\n # Send message to a socket\n self.logger.send((message + '\\n').encode())\n else:\n # Write message to a file\n self.logger.write(message)\n self.logger.flush()\n\n if self.addr is None:\n self._allocated = False\n e = ctypes.windll.kernel32.GetLastError()\n raise Exception(\"Memory allocation error: \" + str(e))\n elif self.addr >> 32:\n raise Exception(\n 'Memory allcation address exceeds 32 bits. '\n 'Raising error to avoid BSOD'\n )\n\n self.buffer = self.create_array()\n pointer, read_only_flag = self.buffer.__array_interface__['data']\n\n def create_array(self, samples_per_buffer: int = None):\n \"\"\"Create a numpy array from (a subset of) the allocated memory\n\n Args:\n samples_per_buffer: Number of buffer samples.\n Must be less than or equal to the samples_per_buffer used to\n initialize the Buffer.\n If not set, will use the entire allocated memory\n\n Returns:\n Numpy buffer array\n \"\"\"\n if samples_per_buffer is not None:\n assert samples_per_buffer <= self.samples_per_buffer\n\n size_bytes = self.bytes_per_sample * samples_per_buffer * \\\n self.number_of_channels\n else:\n size_bytes = self.size_bytes\n\n ctypes_array = (ctypes.c_uint8 * size_bytes).from_address(self.addr)\n self.buffer = np.frombuffer(ctypes_array, dtype=self.np_sample_type)\n\n return self.buffer\n\n def free_mem(self, addr=None):\n \"\"\"\n uncommit memory allocated with this buffer object\n :return: None\n \"\"\"\n mem_release = 0x8000\n\n if addr is None:\n addr = self.addr\n\n # for documentation please see:\n # https://msdn.microsoft.com/en-us/library/windows/desktop/aa366892(v=vs.85).aspx\n ctypes.windll.kernel32.VirtualFree.argtypes = [\n ctypes.c_void_p, ctypes.c_long, ctypes.c_long]\n ctypes.windll.kernel32.VirtualFree.restype = ctypes.c_int\n ctypes.windll.kernel32.VirtualFree(ctypes.c_void_p(addr), 0, mem_release)\n self._allocated = False\n\n def __del__(self):\n \"\"\"\n If python garbage collects this object, __del__ should be called and it\n is the last chance to uncommit the memory to prevent a memory leak.\n This method is not very reliable so users should not rely on this\n functionality\n :return:\n \"\"\"\n if self._allocated:\n self.free_mem()\n logging.warning(\n 'Buffer prevented memory leak; Memory released to Windows.\\n'\n 'Memory should have been released before buffer was deleted.')\n\n\nclass AcquisitionController(Instrument):\n \"\"\"\n This class represents all choices that the end-user has to make regarding\n the data-acquisition. this class should be subclassed to program these\n choices.\n\n The basic structure of an acquisition is:\n\n - call to AlazarTech_ATS.acquire internal configuration\n - call to acquisitioncontroller.pre_start_capture\n - Call to the start capture of the Alazar board\n - call to acquisitioncontroller.pre_acquire\n - loop over all buffers that need to be acquired\n dump each buffer to acquisitioncontroller.handle_buffer\n (only if buffers need to be recycled to finish the acquisiton)\n - dump remaining buffers to acquisitioncontroller.handle_buffer\n alazar internals\n - return acquisitioncontroller.post_acquire\n\n Attributes:\n _alazar: a reference to the alazar instrument driver\n \"\"\"\n def __init__(self, name, alazar_name, **kwargs):\n \"\"\"\n :param alazar_name: The name of the alazar instrument on the server\n :return: nothing\n \"\"\"\n super().__init__(name, **kwargs)\n self._alazar = self.find_instrument(alazar_name,\n instrument_class=AlazarTech_ATS)\n\n self._acquisition_settings = {}\n self._fixed_acquisition_settings = {}\n self.add_parameter(name=\"acquisition_settings\",\n get_cmd=lambda: self._acquisition_settings)\n\n # Names and shapes must have initial value, even through they will be\n # overwritten in set_acquisition_settings. If we don't do this, the\n # remoteInstrument will not recognize that it returns multiple values.\n self.add_parameter(name=\"acquisition\",\n parameter_class=ATSAcquisitionParameter,\n acquisition_controller=self)\n\n # Save bytes_per_sample received from ATS digitizer\n self._bytes_per_sample = self._alazar.bytes_per_sample() * 8\n\n def _get_alazar(self):\n \"\"\"\n returns a reference to the alazar instrument. A call to self._alazar is\n quicker, so use that if in need for speed\n :return: reference to the Alazar instrument\n \"\"\"\n return self._alazar\n\n def verify_acquisition_settings(self, **kwargs):\n \"\"\"\n Ensure that none of the fixed acquisition settings are overwritten\n Args:\n **kwargs: List of acquisition settings\n\n Returns:\n acquisition settings wwith fixed settings\n \"\"\"\n for key, val in self._fixed_acquisition_settings.items():\n if kwargs.get(key, val) != val:\n logging.warning('Cannot set {} to {}. Defaulting to {}'.format(\n key, kwargs[key], val))\n kwargs[key] = val\n return kwargs\n\n def get_acquisition_setting(self, setting):\n \"\"\"\n Obtain an acquisition setting for the ATS.\n It checks if the setting is in ATS_controller._acquisition_settings\n If not, it will retrieve the ATS latest parameter value\n\n Args:\n setting: acquisition setting to look for\n\n Returns:\n Value of the acquisition setting\n \"\"\"\n if setting in self._acquisition_settings.keys():\n return self._acquisition_settings[setting]\n else:\n # Must get latest value, since it may not be updated in ATS\n return self._alazar.parameters[setting].get_latest()\n\n def update_acquisition_settings(self, **kwargs):\n \"\"\"\n Updates acquisition settings after first verifying that none of the\n fixed acquisition settings are overwritten. Any pre-existing settings\n that are not overwritten remain.\n\n Args:\n **kwargs: acquisition settings\n\n Returns:\n None\n \"\"\"\n kwargs = self.verify_acquisition_settings(**kwargs)\n self._acquisition_settings.update(**kwargs)\n\n def set_acquisition_settings(self, **kwargs):\n \"\"\"\n Sets acquisition settings after first verifying that none of the\n fixed acquisition settings are overwritten. Any pre-existing settings\n that are not overwritten are removed.\n\n Args:\n **kwargs: acquisition settings\n\n Returns:\n None\n \"\"\"\n kwargs = self.verify_acquisition_settings(**kwargs)\n self._acquisition_settings = kwargs\n\n def do_acquisition(self):\n \"\"\"\n Performs an acquisition using the acquisition settings\n Returns:\n None\n \"\"\"\n records = self._alazar.acquire(acquisition_controller=self,\n **self._acquisition_settings)\n return records\n\n def requires_buffer(self):\n \"\"\"\n Check if enough buffers are acquired\n Returns:\n True if more buffers are needed, False otherwise\n \"\"\"\n raise NotImplementedError(\n 'This method should be implemented in a subclass')\n\n def segment_buffer(self, buffer, scale_voltages=True):\n \"\"\"\n Segments buffers into the distinct channels\n Args:\n buffer: 1D buffer array containing all channels\n scale_voltages: Whether or not to scale data to actual volts\n Returns:\n buffer_segments: Dictionary with items channel_idx: channel_buffer\n \"\"\"\n\n buffer_segments = {}\n for ch, ch_idx in enumerate(self.channel_selection):\n buffer_slice = slice(ch * self.samples_per_record,\n (ch + 1) * self.samples_per_record)\n # TODO int16 conversion necessary but should be done earlier\n buffer_segment = buffer[buffer_slice]\n\n if scale_voltages:\n # Convert data points from an uint16 to volts\n ch_range = self._alazar.parameters['channel_range'+ch_idx]()\n # Determine value corresponding to zero for unsigned int\n mid_val = 2.**(self._bytes_per_sample-1)\n buffer_segment = (buffer_segment - mid_val) / mid_val * ch_range\n\n buffer_segments[ch_idx] = buffer_segment\n return buffer_segments\n\n def pre_start_capture(self):\n \"\"\"\n Use this method to prepare yourself for the data acquisition\n The Alazar instrument will call this method right before\n 'AlazarStartCapture' is called\n \"\"\"\n raise NotImplementedError(\n 'This method should be implemented in a subclass')\n\n def pre_acquire(self):\n \"\"\"\n This method is called immediately after 'AlazarStartCapture' is called\n \"\"\"\n raise NotImplementedError(\n 'This method should be implemented in a subclass')\n\n def handle_buffer(self, buffer):\n \"\"\"\n This method should store or process the information that is contained\n in the buffers obtained during the acquisition.\n\n Args:\n buffer: np.array with the data from the Alazar card\n\n Returns:\n something, it is ignored in any case\n \"\"\"\n raise NotImplementedError(\n 'This method should be implemented in a subclass')\n\n def post_acquire(self):\n \"\"\"\n This method should return any information you want to save from this\n acquisition. The acquisition method from the Alazar driver will use\n this data as its own return value\n\n Returns:\n this function should return all relevant data that you want\n to get form the acquisition\n \"\"\"\n raise NotImplementedError(\n 'This method should be implemented in a subclass')\n\n\nclass ATSAcquisitionParameter(MultiParameter):\n def __init__(self, acquisition_controller=None, **kwargs):\n self.acquisition_controller = acquisition_controller\n super().__init__(snapshot_value=False,\n names=[''], shapes=[()], **kwargs)\n\n @property\n def names(self):\n if self.acquisition_controller is None or \\\n not hasattr(self.acquisition_controller, 'channel_selection')\\\n or self.acquisition_controller.channel_selection is None:\n return ['']\n else:\n return tuple([f'ch{ch}_signal' for ch in\n self.acquisition_controller.channel_selection])\n\n @names.setter\n def names(self, names):\n # Ignore setter since getter is extracted from acquisition controller\n pass\n\n @property\n def labels(self):\n return self.names\n\n @labels.setter\n def labels(self, labels):\n # Ignore setter since getter is extracted from acquisition controller\n pass\n\n @property\n def units(self):\n return ['V'] * len(self.names)\n\n @units.setter\n def units(self, units):\n # Ignore setter since getter is extracted from acquisition controller\n pass\n\n @property\n def shapes(self):\n if hasattr(self.acquisition_controller, 'average_mode'):\n average_mode = self.acquisition_controller.average_mode()\n\n if average_mode == 'point':\n shape = ()\n elif average_mode == 'trace':\n shape = (self.acquisition_controller.samples_per_record,)\n else:\n shape = (self.acquisition_controller.traces_per_acquisition(),\n self.acquisition_controller.samples_per_record)\n return tuple([shape] * self.acquisition_controller.number_of_channels)\n else:\n return tuple(() * len(self.names))\n\n @shapes.setter\n def shapes(self, shapes):\n # Ignore setter since getter is extracted from acquisition controller\n pass\n\n def get_raw(self):\n return self.acquisition_controller.do_acquisition()\n\n\nclass TrivialDictionary:\n \"\"\"\n This class looks like a dictionary to the outside world\n every key maps to this key as a value (lambda x: x)\n \"\"\"\n def __init__(self):\n pass\n\n def __getitem__(self, item):\n return item\n\n def __contains__(self, item):\n # this makes sure that this dictionary contains everything\n return True\n"
] |
[
[
"numpy.frombuffer",
"numpy.array"
]
] |
youngleox/gmu
|
[
"0ab963976098ce7861c462ddae136ac92edd9916",
"0ab963976098ce7861c462ddae136ac92edd9916"
] |
[
"cifar/models/oneshot.py",
"simsiam/main_simsiam.py"
] |
[
"import sys\nimport math\nimport numpy as np\nimport torch\nfrom torch import Tensor\nfrom torch.nn.init import constant_\nfrom torch.nn.modules import Module\nfrom torch.nn import functional as F\n\ndef stats(x):\n with torch.no_grad():\n if x.dim() == 4:\n #print(x.shape)\n #print( x.mean(dim=(0,2,3)).shape )\n if x.shape[0] > 2:\n return torch.mean(x.mean(dim=0)),torch.mean(x.std(dim=0))\n else:\n return 0,1\n elif x.dim() == 2:\n if x.shape[0] > 2:\n return torch.mean(x.mean(dim=0)),torch.mean(x.std(dim=0))\n else:\n return 0,1\n else:\n \n return 0,1\n\nclass OneshotNormalizer2D(Module):\n\n def __init__(self,freq=math.inf):\n super().__init__()\n self.freq = freq\n self.mean = None\n self.std = None\n self.step = 0\n def forward(self, x):\n self.step += 1\n if self.mean is None or self.step % self.freq == 0:\n self.mean,self.std = stats(x)\n \n return (x - self.mean) / self.std ",
"#!/usr/bin/env python\n# Copyright (c) Facebook, Inc. and its affiliates.\n# All rights reserved.\n\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport argparse\nimport builtins\nimport math\nimport os\nimport random\nimport shutil\nimport time\nimport warnings\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.distributed as dist\nimport torch.optim\nimport torch.multiprocessing as mp\nimport torch.utils.data\nimport torch.utils.data.distributed\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\nimport torchvision.models as models\n\nimport simsiam.loader\nimport simsiam.builder\n\nmodel_names = sorted(name for name in models.__dict__\n if name.islower() and not name.startswith(\"__\")\n and callable(models.__dict__[name]))\n\nparser = argparse.ArgumentParser(description='PyTorch ImageNet Training')\nparser.add_argument('data', metavar='DIR',\n help='path to dataset')\nparser.add_argument('-a', '--arch', metavar='ARCH', default='resnet50',\n choices=model_names,\n help='model architecture: ' +\n ' | '.join(model_names) +\n ' (default: resnet50)')\nparser.add_argument('-j', '--workers', default=32, type=int, metavar='N',\n help='number of data loading workers (default: 32)')\nparser.add_argument('--epochs', default=100, type=int, metavar='N',\n help='number of total epochs to run')\nparser.add_argument('--start-epoch', default=0, type=int, metavar='N',\n help='manual epoch number (useful on restarts)')\nparser.add_argument('-b', '--batch-size', default=512, type=int,\n metavar='N',\n help='mini-batch size (default: 512), this is the total '\n 'batch size of all GPUs on the current node when '\n 'using Data Parallel or Distributed Data Parallel')\nparser.add_argument('--lr', '--learning-rate', default=0.05, type=float,\n metavar='LR', help='initial (base) learning rate', dest='lr')\nparser.add_argument('--momentum', default=0.9, type=float, metavar='M',\n help='momentum of SGD solver')\nparser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,\n metavar='W', help='weight decay (default: 1e-4)',\n dest='weight_decay')\nparser.add_argument('-p', '--print-freq', default=10, type=int,\n metavar='N', help='print frequency (default: 10)')\nparser.add_argument('--resume', default='', type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\nparser.add_argument('--world-size', default=-1, type=int,\n help='number of nodes for distributed training')\nparser.add_argument('--rank', default=-1, type=int,\n help='node rank for distributed training')\nparser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,\n help='url used to set up distributed training')\nparser.add_argument('--dist-backend', default='nccl', type=str,\n help='distributed backend')\nparser.add_argument('--seed', default=None, type=int,\n help='seed for initializing training. ')\nparser.add_argument('--gpu', default=None, type=int,\n help='GPU id to use.')\nparser.add_argument('--multiprocessing-distributed', action='store_true',\n help='Use multi-processing distributed training to launch '\n 'N processes per node, which has N GPUs. This is the '\n 'fastest way to use PyTorch for either single node or '\n 'multi node data parallel training')\n\n# simsiam specific configs:\nparser.add_argument('--dim', default=2048, type=int,\n help='feature dimension (default: 2048)')\nparser.add_argument('--pred-dim', default=512, type=int,\n help='hidden dimension of the predictor (default: 512)')\nparser.add_argument('--fix-pred-lr', action='store_true',\n help='Fix learning rate for the predictor')\n\ndef main():\n args = parser.parse_args()\n\n if args.seed is not None:\n random.seed(args.seed)\n torch.manual_seed(args.seed)\n cudnn.deterministic = True\n warnings.warn('You have chosen to seed training. '\n 'This will turn on the CUDNN deterministic setting, '\n 'which can slow down your training considerably! '\n 'You may see unexpected behavior when restarting '\n 'from checkpoints.')\n\n if args.gpu is not None:\n warnings.warn('You have chosen a specific GPU. This will completely '\n 'disable data parallelism.')\n\n if args.dist_url == \"env://\" and args.world_size == -1:\n args.world_size = int(os.environ[\"WORLD_SIZE\"])\n\n args.distributed = args.world_size > 1 or args.multiprocessing_distributed\n\n ngpus_per_node = torch.cuda.device_count()\n if args.multiprocessing_distributed:\n # Since we have ngpus_per_node processes per node, the total world_size\n # needs to be adjusted accordingly\n args.world_size = ngpus_per_node * args.world_size\n # Use torch.multiprocessing.spawn to launch distributed processes: the\n # main_worker process function\n mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))\n else:\n # Simply call main_worker function\n main_worker(args.gpu, ngpus_per_node, args)\n\n\ndef main_worker(gpu, ngpus_per_node, args):\n args.gpu = gpu\n\n # suppress printing if not master\n if args.multiprocessing_distributed and args.gpu != 0:\n def print_pass(*args):\n pass\n builtins.print = print_pass\n\n if args.gpu is not None:\n print(\"Use GPU: {} for training\".format(args.gpu))\n\n if args.distributed:\n if args.dist_url == \"env://\" and args.rank == -1:\n args.rank = int(os.environ[\"RANK\"])\n if args.multiprocessing_distributed:\n # For multiprocessing distributed training, rank needs to be the\n # global rank among all the processes\n args.rank = args.rank * ngpus_per_node + gpu\n dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,\n world_size=args.world_size, rank=args.rank)\n torch.distributed.barrier()\n # create model\n print(\"=> creating model '{}'\".format(args.arch))\n model = simsiam.builder.SimSiam(\n models.__dict__[args.arch],\n args.dim, args.pred_dim)\n\n # infer learning rate before changing batch size\n init_lr = args.lr * args.batch_size / 256\n\n if args.distributed:\n # Apply SyncBN\n model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)\n # For multiprocessing distributed, DistributedDataParallel constructor\n # should always set the single device scope, otherwise,\n # DistributedDataParallel will use all available devices.\n if args.gpu is not None:\n torch.cuda.set_device(args.gpu)\n model.cuda(args.gpu)\n # When using a single GPU per process and per\n # DistributedDataParallel, we need to divide the batch size\n # ourselves based on the total number of GPUs we have\n args.batch_size = int(args.batch_size / ngpus_per_node)\n args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])\n else:\n model.cuda()\n # DistributedDataParallel will divide and allocate batch_size to all\n # available GPUs if device_ids are not set\n model = torch.nn.parallel.DistributedDataParallel(model)\n elif args.gpu is not None:\n torch.cuda.set_device(args.gpu)\n model = model.cuda(args.gpu)\n # comment out the following line for debugging\n raise NotImplementedError(\"Only DistributedDataParallel is supported.\")\n else:\n # AllGather implementation (batch shuffle, queue update, etc.) in\n # this code only supports DistributedDataParallel.\n raise NotImplementedError(\"Only DistributedDataParallel is supported.\")\n print(model) # print model after SyncBatchNorm\n\n # define loss function (criterion) and optimizer\n criterion = nn.CosineSimilarity(dim=1).cuda(args.gpu)\n\n if args.fix_pred_lr:\n optim_params = [{'params': model.module.encoder.parameters(), 'fix_lr': False},\n {'params': model.module.predictor.parameters(), 'fix_lr': True}]\n else:\n optim_params = model.parameters()\n\n optimizer = torch.optim.SGD(optim_params, init_lr,\n momentum=args.momentum,\n weight_decay=args.weight_decay)\n\n # optionally resume from a checkpoint\n if args.resume:\n if os.path.isfile(args.resume):\n print(\"=> loading checkpoint '{}'\".format(args.resume))\n if args.gpu is None:\n checkpoint = torch.load(args.resume)\n else:\n # Map model to be loaded to specified single gpu.\n loc = 'cuda:{}'.format(args.gpu)\n checkpoint = torch.load(args.resume, map_location=loc)\n args.start_epoch = checkpoint['epoch']\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n print(\"=> loaded checkpoint '{}' (epoch {})\"\n .format(args.resume, checkpoint['epoch']))\n else:\n print(\"=> no checkpoint found at '{}'\".format(args.resume))\n\n cudnn.benchmark = True\n\n # Data loading code\n traindir = os.path.join(args.data, 'train')\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\n # MoCo v2's aug: similar to SimCLR https://arxiv.org/abs/2002.05709\n augmentation = [\n transforms.RandomResizedCrop(224, scale=(0.2, 1.)),\n transforms.RandomApply([\n transforms.ColorJitter(0.4, 0.4, 0.4, 0.1) # not strengthened\n ], p=0.8),\n transforms.RandomGrayscale(p=0.2),\n transforms.RandomApply([simsiam.loader.GaussianBlur([.1, 2.])], p=0.5),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize\n ]\n\n train_dataset = datasets.ImageFolder(\n traindir,\n simsiam.loader.TwoCropsTransform(transforms.Compose(augmentation)))\n\n if args.distributed:\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)\n else:\n train_sampler = None\n\n train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),\n num_workers=args.workers, pin_memory=True, sampler=train_sampler, drop_last=True)\n\n for epoch in range(args.start_epoch, args.epochs):\n if args.distributed:\n train_sampler.set_epoch(epoch)\n adjust_learning_rate(optimizer, init_lr, epoch, args)\n\n # train for one epoch\n train(train_loader, model, criterion, optimizer, epoch, args)\n\n if not args.multiprocessing_distributed or (args.multiprocessing_distributed\n and args.rank % ngpus_per_node == 0):\n save_checkpoint({\n 'epoch': epoch + 1,\n 'arch': args.arch,\n 'state_dict': model.state_dict(),\n 'optimizer' : optimizer.state_dict(),\n }, is_best=False, filename='checkpoint_{:04d}.pth.tar'.format(epoch))\n\n\ndef train(train_loader, model, criterion, optimizer, epoch, args):\n batch_time = AverageMeter('Time', ':6.3f')\n data_time = AverageMeter('Data', ':6.3f')\n losses = AverageMeter('Loss', ':.4f')\n progress = ProgressMeter(\n len(train_loader),\n [batch_time, data_time, losses],\n prefix=\"Epoch: [{}]\".format(epoch))\n\n # switch to train mode\n model.train()\n\n end = time.time()\n for i, (images, _) in enumerate(train_loader):\n # measure data loading time\n data_time.update(time.time() - end)\n\n if args.gpu is not None:\n images[0] = images[0].cuda(args.gpu, non_blocking=True)\n images[1] = images[1].cuda(args.gpu, non_blocking=True)\n\n # compute output and loss\n p1, p2, z1, z2 = model(x1=images[0], x2=images[1])\n loss = -(criterion(p1, z2).mean() + criterion(p2, z1).mean()) * 0.5\n\n losses.update(loss.item(), images[0].size(0))\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n progress.display(i)\n\n\ndef save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):\n torch.save(state, filename)\n if is_best:\n shutil.copyfile(filename, 'model_best.pth.tar')\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self, name, fmt=':f'):\n self.name = name\n self.fmt = fmt\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n def __str__(self):\n fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'\n return fmtstr.format(**self.__dict__)\n\n\nclass ProgressMeter(object):\n def __init__(self, num_batches, meters, prefix=\"\"):\n self.batch_fmtstr = self._get_batch_fmtstr(num_batches)\n self.meters = meters\n self.prefix = prefix\n\n def display(self, batch):\n entries = [self.prefix + self.batch_fmtstr.format(batch)]\n entries += [str(meter) for meter in self.meters]\n print('\\t'.join(entries))\n\n def _get_batch_fmtstr(self, num_batches):\n num_digits = len(str(num_batches // 1))\n fmt = '{:' + str(num_digits) + 'd}'\n return '[' + fmt + '/' + fmt.format(num_batches) + ']'\n\n\ndef adjust_learning_rate(optimizer, init_lr, epoch, args):\n \"\"\"Decay the learning rate based on schedule\"\"\"\n cur_lr = init_lr * 0.5 * (1. + math.cos(math.pi * epoch / args.epochs))\n for param_group in optimizer.param_groups:\n if 'fix_lr' in param_group and param_group['fix_lr']:\n param_group['lr'] = init_lr\n else:\n param_group['lr'] = cur_lr\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"torch.no_grad"
],
[
"torch.distributed.init_process_group",
"torch.multiprocessing.spawn",
"torch.utils.data.distributed.DistributedSampler",
"torch.cuda.set_device",
"torch.manual_seed",
"torch.load",
"torch.utils.data.DataLoader",
"torch.distributed.barrier",
"torch.nn.SyncBatchNorm.convert_sync_batchnorm",
"torch.nn.CosineSimilarity",
"torch.optim.SGD",
"torch.cuda.device_count",
"torch.nn.parallel.DistributedDataParallel",
"torch.save"
]
] |
1713mz/nanodet
|
[
"b446207c63f81dd4a2451728d141860544fbda99"
] |
[
"nanodet/model/backbone/shufflenetv2.py"
] |
[
"import torch\nimport torch.nn as nn\nimport torch.utils.model_zoo as model_zoo\n\nfrom ..module.activation import act_layers\n\nmodel_urls = {\n \"shufflenetv2_0.5x\": \"https://download.pytorch.org/models/shufflenetv2_x0.5-f707e7126e.pth\", # noqa: E501\n \"shufflenetv2_1.0x\": \"https://download.pytorch.org/models/shufflenetv2_x1-5666bf0f80.pth\", # noqa: E501\n \"shufflenetv2_1.5x\": None,\n \"shufflenetv2_2.0x\": None,\n}\n\n\ndef channel_shuffle(x, groups):\n # type: (torch.Tensor, int) -> torch.Tensor\n batchsize, num_channels, height, width = x.data.size()\n channels_per_group = num_channels // groups\n\n # reshape\n x = x.view(batchsize, groups, channels_per_group, height, width)\n\n x = torch.transpose(x, 1, 2).contiguous()\n\n # flatten\n x = x.view(batchsize, -1, height, width)\n\n return x\n\n\nclass ShuffleV2Block(nn.Module):\n def __init__(self, inp, oup, stride, activation=\"ReLU\"):\n super(ShuffleV2Block, self).__init__()\n\n if not (1 <= stride <= 3):\n raise ValueError(\"illegal stride value\")\n self.stride = stride\n\n branch_features = oup // 2\n assert (self.stride != 1) or (inp == branch_features << 1)\n\n if self.stride > 1:\n self.branch1 = nn.Sequential(\n self.depthwise_conv(\n inp, inp, kernel_size=3, stride=self.stride, padding=1\n ),\n nn.BatchNorm2d(inp),\n nn.Conv2d(\n inp, branch_features, kernel_size=1, stride=1, padding=0, bias=False\n ),\n nn.BatchNorm2d(branch_features),\n act_layers(activation),\n )\n else:\n self.branch1 = nn.Sequential()\n\n self.branch2 = nn.Sequential(\n nn.Conv2d(\n inp if (self.stride > 1) else branch_features,\n branch_features,\n kernel_size=1,\n stride=1,\n padding=0,\n bias=False,\n ),\n nn.BatchNorm2d(branch_features),\n act_layers(activation),\n self.depthwise_conv(\n branch_features,\n branch_features,\n kernel_size=3,\n stride=self.stride,\n padding=1,\n ),\n nn.BatchNorm2d(branch_features),\n nn.Conv2d(\n branch_features,\n branch_features,\n kernel_size=1,\n stride=1,\n padding=0,\n bias=False,\n ),\n nn.BatchNorm2d(branch_features),\n act_layers(activation),\n )\n\n @staticmethod\n def depthwise_conv(i, o, kernel_size, stride=1, padding=0, bias=False):\n return nn.Conv2d(i, o, kernel_size, stride, padding, bias=bias, groups=i)\n\n def forward(self, x):\n if self.stride == 1:\n x1, x2 = x.chunk(2, dim=1)\n out = torch.cat((x1, self.branch2(x2)), dim=1)\n else:\n out = torch.cat((self.branch1(x), self.branch2(x)), dim=1)\n\n out = channel_shuffle(out, 2)\n\n return out\n\n\nclass ShuffleNetV2(nn.Module):\n def __init__(\n self,\n model_size=\"1.5x\",\n out_stages=(2, 3, 4),\n with_last_conv=False,\n kernal_size=3,\n activation=\"ReLU\",\n pretrain=True,\n ):\n super(ShuffleNetV2, self).__init__()\n # out_stages can only be a subset of (2, 3, 4)\n assert set(out_stages).issubset((2, 3, 4))\n\n print(\"model size is \", model_size)\n\n self.stage_repeats = [4, 8, 4]\n self.model_size = model_size\n self.out_stages = out_stages\n self.with_last_conv = with_last_conv\n self.kernal_size = kernal_size\n self.activation = activation\n if model_size == \"0.5x\":\n self._stage_out_channels = [24, 48, 96, 192, 1024]\n elif model_size == \"1.0x\":\n self._stage_out_channels = [24, 116, 232, 464, 1024]\n elif model_size == \"1.5x\":\n self._stage_out_channels = [24, 176, 352, 704, 1024]\n elif model_size == \"2.0x\":\n self._stage_out_channels = [24, 244, 488, 976, 2048]\n else:\n raise NotImplementedError\n\n # building first layer\n input_channels = 3\n output_channels = self._stage_out_channels[0]\n self.conv1 = nn.Sequential(\n nn.Conv2d(input_channels, output_channels, 3, 2, 1, bias=False),\n nn.BatchNorm2d(output_channels),\n act_layers(activation),\n )\n input_channels = output_channels\n\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n\n stage_names = [\"stage{}\".format(i) for i in [2, 3, 4]]\n for name, repeats, output_channels in zip(\n stage_names, self.stage_repeats, self._stage_out_channels[1:]\n ):\n seq = [\n ShuffleV2Block(\n input_channels, output_channels, 2, activation=activation\n )\n ]\n for i in range(repeats - 1):\n seq.append(\n ShuffleV2Block(\n output_channels, output_channels, 1, activation=activation\n )\n )\n setattr(self, name, nn.Sequential(*seq))\n input_channels = output_channels\n output_channels = self._stage_out_channels[-1]\n if self.with_last_conv:\n conv5 = nn.Sequential(\n nn.Conv2d(input_channels, output_channels, 1, 1, 0, bias=False),\n nn.BatchNorm2d(output_channels),\n act_layers(activation),\n )\n self.stage4.add_module(\"conv5\", conv5)\n self._initialize_weights(pretrain)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.maxpool(x)\n output = []\n for i in range(2, 5):\n stage = getattr(self, \"stage{}\".format(i))\n x = stage(x)\n if i in self.out_stages:\n output.append(x)\n return tuple(output)\n\n def _initialize_weights(self, pretrain=False):\n print(\"init weights...\")\n for name, m in self.named_modules():\n if isinstance(m, nn.Conv2d):\n if \"first\" in name:\n nn.init.normal_(m.weight, 0, 0.01)\n else:\n nn.init.normal_(m.weight, 0, 1.0 / m.weight.shape[1])\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n if m.bias is not None:\n nn.init.constant_(m.bias, 0.0001)\n nn.init.constant_(m.running_mean, 0)\n if pretrain:\n url = model_urls[\"shufflenetv2_{}\".format(self.model_size)]\n if url is not None:\n pretrained_state_dict = model_zoo.load_url(url)\n print(\"=> loading pretrained model {}\".format(url))\n self.load_state_dict(pretrained_state_dict, strict=False)\n"
] |
[
[
"torch.nn.Sequential",
"torch.transpose",
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.nn.init.normal_",
"torch.nn.BatchNorm2d",
"torch.utils.model_zoo.load_url"
]
] |
davesean/modular_semantic_segmentation
|
[
"5f9e34243915b862e8fef5e6195f1e29f4cebf50"
] |
[
"experiments/eval_images_simDisc.py"
] |
[
"from os import path, environ\nimport os\nimport glob\nimport sacred as sc\nimport cv2\nimport scipy.misc\nimport numpy as np\nimport tensorflow as tf\nimport zipfile\nfrom sacred.utils import apply_backspaces_and_linefeeds\nfrom experiments.utils import get_observer, load_data\nfrom xview.datasets import Cityscapes\nfrom experiments.evaluation import evaluate, import_weights_into_network\nfrom xview.datasets import get_dataset\nfrom xview.models import get_model\nfrom xview.settings import EXP_OUT, DATA_BASEPATH\nfrom tests.evaluationFunctions import computePRvalues, computeIOU, computePatchSSIM, ShannonEntropy\nimport sys\nimport shutil\nfrom sys import stdout\n\n\nclass Helper:\n name = 'A'\n\na = Helper()\nb = Helper()\n\ndef error_mask(segm_image, gt_image):\n mask_3d = (segm_image == gt_image)\n mask = np.logical_and(mask_3d[:,:,2],np.logical_and(mask_3d[:,:,0],mask_3d[:,:,1]))\n return ~mask\n\ndef create_directories(run_id, experiment):\n \"\"\"\n Make sure directories for storing diagnostics are created and clean.\n\n Args:\n run_id: ID of the current sacred run, you can get it from _run._id in a captured\n function.\n experiment: The sacred experiment object\n Returns:\n The path to the created output directory you can store your diagnostics to.\n \"\"\"\n root = EXP_OUT\n # create temporary directory for output files\n if not os.path.exists(root):\n os.makedirs(root)\n # The id of this experiment is stored in the magical _run object we get from the\n # decorator.\n output_dir = '{}/{}'.format(root, run_id)\n if os.path.exists(output_dir):\n # Directory may already exist if run_id is None (in case of an unobserved\n # test-run)\n shutil.rmtree(output_dir)\n os.mkdir(output_dir)\n\n # Tell the experiment that this output dir is also used for tensorflow summaries\n experiment.info.setdefault(\"tensorflow\", {}).setdefault(\"logdirs\", [])\\\n .append(output_dir)\n return output_dir\n\nex = sc.Experiment()\n# reduce output of progress bars\nex.captured_out_filter = apply_backspaces_and_linefeeds\nex.observers.append(get_observer())\n\[email protected]\ndef main(modelname, net_config, gan_config, disc_config, datasetSem, datasetGAN, datasetDisc, starting_weights, flag_measure, output_mat, flag_entropy, thresholds, start, _run):\n for key in gan_config:\n setattr(a, key, gan_config[key])\n for key in disc_config:\n setattr(b, key, disc_config[key])\n setattr(a,'EXP_OUT',EXP_OUT)\n setattr(a,'RUN_id',_run._id)\n setattr(b,'EXP_OUT',EXP_OUT)\n setattr(b,'RUN_id',_run._id)\n disc_data_path = os.path.join(datasetDisc['image_input_dir'],str(gan_config['checkpoint'])+\"_full\")\n data_id=str(gan_config['checkpoint'])\n setattr(b,'DATA_id',data_id)\n # Set up the directories for diagnostics\n output_dir = create_directories(_run._id, ex)\n\n # load the data for the data description\n data_desc = get_dataset(datasetSem['name'])\n\n model = get_model(modelname)\n net = model(data_description=data_desc.get_data_description(),\n output_dir=output_dir, **net_config)\n # net.import_weights(filepath=starting_weights)\n print(\"INFO: SemSegNet Imported weights succesfully\")\n\n GAN_graph = tf.Graph()\n with GAN_graph.as_default():\n # create the network\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.9)\n GAN_sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))\n\n if gan_config['type'] == 'cascRef':\n dataGAN = get_dataset('cityscapes_cascGAN')\n cGAN_model = get_model('cascGAN')\n if a.checkpoint is not None:\n ckp = os.path.join(a.EXP_OUT,str(a.checkpoint))\n modelGAN = cGAN_model(GAN_sess,dataset_name='cityscapes_cascGAN',image_size=disc_config['input_image_size'],\n checkpoint_dir=output_dir,\n data_desc=dataGAN.get_data_description(),\n is_training=False, checkpoint=ckp, vgg_checkpoint=\"/cluster/work/riner/users/haldavid/Checkpoints/VGG_Model/imagenet-vgg-verydeep-19.mat\")\n else:\n # load the dataset class\n dataGAN = get_dataset(datasetGAN['name'])\n # data = data(**datasetGAN)\n cGAN_model = get_model('cGAN')\n modelGAN = cGAN_model(GAN_sess, checkpoint_dir=output_dir,\n data_desc=dataGAN.get_data_description(),\n feature_matching=gan_config['feature_matching'],\n checkpoint=os.path.join(a.EXP_OUT,str(a.checkpoint)),\n gen_type=gan_config['type'],use_grayscale=gan_config['use_grayscale'])\n print(\"INFO: Generative model imported weights succesfully\")\n\n Disc_graph = tf.Graph()\n with Disc_graph.as_default():\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.9)\n sessD = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))\n dataD = get_dataset(datasetDisc['name'])\n dataD = dataD(disc_data_path,**datasetDisc)\n disc_model = get_model('simDisc')\n\n disc_checkpoint = None\n if disc_config['checkpoint'] is not None:\n disc_checkpoint = os.path.join(a.EXP_OUT,str(disc_config['checkpoint']))\n modelDiff=disc_model(sess=sessD, checkpoint_dir=output_dir, pos_weight=disc_config['pos_weight'],\n data=dataD, arch=disc_config['arch'], use_grayscale=disc_config['use_grayscale'],\n checkpoint=disc_checkpoint, use_segm=disc_config['use_segm'],\n batch_size=disc_config['batch_size'],feature_extractor=os.path.join(a.EXP_OUT,str(a.checkpoint)))\n\n if disc_config['checkpoint'] is None:\n print(\"INFO: Begin training simDisc\")\n tmp = modelDiff.train(b)\n _run.info['simDisc_predictions'] = tmp\n _run.info['simDisc_mean_predictions'] = np.mean(tmp, axis=0)\n _run.info['simDisc_stdDev'] = np.std(tmp, axis=0)\n print(\"INFO: Finished training simDisc\")\n else:\n print(\"INFO: Init and loaded checpoint for simDisc\")\n\n if flag_measure:\n benchmarks = ['measure']\n else:\n benchmarks = ['wilddash','posneg','valid','measure']\n data_SemSeg = data_desc(**datasetSem)\n\n _run.info['thresholds'] = thresholds\n\n ###########################################################################\n # mapping from Deeplab classes to Adapnet classes\n original_labelinfo = {\n 0: {'name': 'road', 'mapping': 'road'},\n 1: {'name': 'sidewalk', 'mapping': 'sidewalk'},\n 2: {'name': 'building', 'mapping': 'building'},\n 3: {'name': 'wall', 'mapping': 'building'},\n 4: {'name': 'fence', 'mapping': 'fence'},\n 5: {'name': 'pole', 'mapping': 'pole'},\n 6: {'name': 'traffic light', 'mapping': 'void'},\n 7: {'name': 'traffic sign', 'mapping': 'traffic sign'},\n 8: {'name': 'vegetation', 'mapping': 'vegetation'},\n 9: {'name': 'terrain', 'mapping': 'vegetation'},\n 10: {'name': 'sky', 'mapping': 'sky'},\n 11: {'name': 'person', 'mapping': 'person'},\n 12: {'name': 'rider', 'mapping': 'person'},\n 13: {'name': 'car', 'mapping': 'vehicle'},\n 14: {'name': 'truck', 'mapping': 'vehicle'},\n 15: {'name': 'bus', 'mapping': 'vehicle'},\n 16: {'name': 'train', 'mapping': 'vehicle'},\n 17: {'name': 'motorcycle', 'mapping': 'vehicle'},\n 18: {'name': 'bicycle', 'mapping': 'bicycle'},\n 255: {'name': 'void', 'mapping': 'void'}\n }\n\n labelinfo = {\n 0: {'name': 'void', 'color': [0, 0, 0]},\n 1: {'name': 'sky', 'color': [70, 130, 180]},\n 2: {'name': 'building', 'color': [70, 70, 70]},\n 3: {'name': 'road', 'color': [128, 64, 128]},\n 4: {'name': 'sidewalk', 'color': [244, 35, 232]},\n 5: {'name': 'fence', 'color': [190, 153, 153]},\n 6: {'name': 'vegetation', 'color': [107, 142, 35]},\n 7: {'name': 'pole', 'color': [153, 153, 153]},\n 8: {'name': 'vehicle', 'color': [0, 0, 142]},\n 9: {'name': 'traffic sign', 'color': [220, 220, 0]},\n 10: {'name': 'person', 'color': [220, 20, 60]},\n 11: {'name': 'bicycle', 'color': [119, 11, 32]}\n }\n\n label_lookup = [next(i for i in labelinfo\n if labelinfo[i]['name'] == k['mapping'])\n for _, k in original_labelinfo.items()]\n\n base_path = path.join(DATA_BASEPATH, 'fishyscapes_newfog')\n if 'TMPDIR' in environ:\n print('INFO loading dataset into machine ... ')\n # first load the zipfile into a closer memory location, then load all the\n # images\n zip = zipfile.ZipFile(path.join(base_path, 'testset.zip'), 'r')\n localtmp = environ['TMPDIR']\n zip.extractall(localtmp)\n zip.close()\n base_path = localtmp\n\n print('DONE loading dataset into machine ... ')\n\n ###########################################################################\n\n set_size = 1000\n h_orig = 1024\n w_orig = 2048\n\n sub_size = 100\n\n semseg_path = \"/cluster/work/riner/users/blumh/fishyscapes_deeplab_predictions_newfog\"\n out_path = \"/cluster/work/riner/users/blumh/resultsDH\"\n\n for k in range(start,(start+2)):\n kb = k*sub_size\n if k>0:\n print('Done %d images' %(kb))\n stdout.flush()\n img_array = np.zeros((sub_size,256,256,3))\n segm_array = np.zeros((sub_size,256,256,3))\n for i in range(sub_size):\n img = cv2.imread(path.join(base_path,'testset', str(i+kb)+'_rgb.png'))\n dl_labels = np.expand_dims(cv2.imread(path.join(semseg_path,str(i+kb)+'_predict.png'))[:,:,0],axis=0)\n\n cs_labels = np.asarray(label_lookup, dtype='int32')[dl_labels]\n\n lookup = np.array([labelinfo[i]['color'] for i in range(max(labelinfo.keys()) + 1)]).astype(int)\n segm = np.array(lookup[cs_labels[:]]).astype('uint8')[...,::-1]\n\n\n #mask = cv2.imread(path.join(base_path, str(i)+'_mask.png'), cv2.IMREAD_ANYDEPTH)\n # blob['labels'] = cv2.imread(labels_filename, cv2.IMREAD_ANYDEPTH)\n # # apply label mapping\n # blob['labels'] = np.asarray(self.label_lookup, dtype='int32')[blob['labels']]\n\n img_array[i,...] = cv2.resize(img, (256, 256),interpolation=cv2.INTER_LINEAR)\n segm_array[i,...] = cv2.resize(segm[0,...], (256, 256),interpolation=cv2.INTER_NEAREST)\n\n with GAN_sess.as_default():\n with GAN_graph.as_default():\n synth_images = modelGAN.transform(a,segm_array)\n\n with sessD.as_default():\n with Disc_graph.as_default():\n simMat = modelDiff.transform(img_array, synth_images, segm_array)\n\n for i in range(sub_size):\n # filename = path.join(out_path,str(i+kb)+'_rgb.png')\n # cv2.imwrite(filename,cv2.resize(img_array[i,...], (2048, 1024),interpolation=cv2.INTER_LINEAR))\n # filename = path.join(out_path,str(i+kb)+'_segm.png')\n # cv2.imwrite(filename,cv2.resize(segm_array[i,...], (2048, 1024),interpolation=cv2.INTER_NEAREST))\n filename = path.join(out_path,str(i+kb)+'_dissim.png')\n cv2.imwrite(filename,simMat[i,...])\n filename = path.join(out_path,str(i+kb)+'_dissim.npy')\n np.save(filename,cv2.resize(simMat[i,...], (2048, 1024),interpolation=cv2.INTER_LINEAR))\n\n #\n # filename = os.path.join(output_dir,\"rgb\"+str(k)+\".png\")\n # cv2.imwrite(filename, img_array[0,...,::-1])\n # filename = os.path.join(output_dir,\"segm\"+str(k)+\".png\")\n # cv2.imwrite(filename, segm[0,...])\n # filename = os.path.join(output_dir,\"synth\"+str(k)+\".png\")\n # cv2.imwrite(filename, synth_images[0,...])\n # filename = os.path.join(output_dir,\"sim\"+str(k)+\".png\")\n # cv2.imwrite(filename, simMat[0,...]*255)\n\n\n\n\n\n\nif __name__ == '__main__':\n ex.run_commandline()\n # for some reason we have processes running in the background that won't stop\n # this is the only way to kill them\n os._exit(os.EX_OK)\n"
] |
[
[
"tensorflow.Graph",
"numpy.asarray",
"tensorflow.ConfigProto",
"numpy.std",
"tensorflow.GPUOptions",
"numpy.mean",
"numpy.array",
"numpy.logical_and",
"numpy.zeros"
]
] |
sergeykochetkov/kidney-1st
|
[
"d4d543744a83a8b450af89f932a95a39b55fe893"
] |
[
"src/03_generate_pseudo_labels/03_02_pseudo_label_dataset_a_dib/get_config.py"
] |
[
"import torch\n\nVERSION = '03_02'\n\ndef get_config():\n config = {\n 'VERSION':VERSION,\n 'INPUT_PATH':'/mnt/750G/GIT/Kidney/data',\n 'OUTPUT_PATH':f'./result/{VERSION}/',\n 'external_data_path': '/mnt/750G/GIT/kidny_unlabeled_dib/DATASET_A_DIB',\n 'split_seed_list':[0],\n 'FOLD_LIST':[0,1,2,3],\n 'model_path':'../../02_train/result/01/',\n 'model_name':'seresnext101',\n 'val_idxs_list_path':'../../02_train/result/01/',\n\n 'num_classes':1,\n 'resolution':1024,\n 'input_resolution':320,\n 'deepsupervision':False, # always false for inference\n 'clfhead':False,\n 'clf_threshold':0.5,\n 'small_mask_threshold':0, \n 'mask_threshold':0.5,\n 'pad_size':256,\n\n 'tta':4,\n 'test_batch_size':12,\n\n 'FP16':False,\n 'num_workers':4,\n 'device':torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\"),\n }\n return config"
] |
[
[
"torch.cuda.is_available"
]
] |
nguyenhoa93/pylidc
|
[
"8f3ef349b9573a894fac301ce8ccd9acbb9b8991"
] |
[
"pylidc/Annotation.py"
] |
[
"import os, warnings\nimport sqlalchemy as sq\nfrom sqlalchemy.orm import relationship\nfrom ._Base import Base\nfrom .Scan import Scan\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# For contour to boolean mask function.\nimport matplotlib.path as mplpath\n\n# For CT volume visualizer.\nfrom matplotlib.patches import Rectangle\nfrom matplotlib.widgets import Slider, Button, CheckButtons\n\n# For diameter estimation.\nfrom scipy.spatial.distance import pdist,squareform\nfrom scipy.interpolate import RegularGridInterpolator\n\n# For 3D visualizer.\nfrom skimage.measure import mesh_surface_area\nfrom skimage.measure import marching_cubes_lewiner as marching_cubes\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom mpl_toolkits.mplot3d.art3d import Poly3DCollection\nfrom scipy.spatial import Delaunay\nfrom scipy.ndimage.morphology import distance_transform_edt as dtrans\n\n\nfeature_names = \\\n ('subtlety',\n 'internalStructure',\n 'calcification',\n 'sphericity',\n 'margin',\n 'lobulation',\n 'spiculation',\n 'texture',\n 'malignancy')\n\n_off_limits = ['id','scan_id','_nodule_id','scan'] + \\\n list(feature_names)\n\nviz3dbackends = ['matplotlib', 'mayavi']\n\nclass Annotation(Base):\n \"\"\"\n The Nodule model class holds the information from a single physicians \n annotation of a nodule >= 3mm class with a particular scan. A nodule \n has many contours, each of which refers to the contour drawn for \n nodule in each scan slice. \n\n Attributes\n ----------\n subtlety: int, range = {1,2,3,4,5}\n Difficulty of detection. Higher values indicate easier detection.\n\n 1. 'Extremely Subtle'\n 2. 'Moderately Subtle'\n 3. 'Fairly Subtle'\n 4. 'Moderately Obvious'\n 5. 'Obvious'\n\n internalStructure: int, range = {1,2,3,4}\n Internal composition of the nodule.\n\n 1. 'Soft Tissue'\n 2. 'Fluid'\n 3. 'Fat'\n 4. 'Air'\n\n calcification: int, range = {1,2,3,4,6}\n Pattern of calcification, if present.\n\n 1. 'Popcorn'\n 2. 'Laminated'\n 3. 'Solid'\n 4. 'Non-central'\n 5. 'Central'\n 6. 'Absent'\n\n sphericity: int, range = {1,2,3,4,5}\n The three-dimensional shape of the nodule in terms of its roundness.\n\n 1. 'Linear'\n 2. 'Ovoid/Linear'\n 3. 'Ovoid'\n 4. 'Ovoid/Round'\n 5. 'Round'\n\n margin: int, range = {1,2,3,4,5}\n Description of how well-defined the nodule margin is.\n\n 1. 'Poorly Defined'\n 2. 'Near Poorly Defined'\n 3. 'Medium Margin'\n 4. 'Near Sharp'\n 5. 'Sharp'\n\n lobulation: int, range = {1,2,3,4,5}\n The degree of lobulation ranging from none to marked\n\n 1. 'No Lobulation'\n 2. 'Nearly No Lobulation'\n 3. 'Medium Lobulation'\n 4. 'Near Marked Lobulation'\n 5. 'Marked Lobulation'\n\n spiculation: int, range = {1,2,3,4,5}\n The extent of spiculation present.\n\n 1. 'No Spiculation'\n 2. 'Nearly No Spiculation'\n 3. 'Medium Spiculation'\n 4. 'Near Marked Spiculation'\n 5. 'Marked Spiculation'\n\n texture: int, range = {1,2,3,4,5}\n Radiographic solidity: internal texture (solid, ground glass, \n or mixed). \n\n 1. 'Non-Solid/GGO'\n 2. 'Non-Solid/Mixed'\n 3. 'Part Solid/Mixed'\n 4. 'Solid/Mixed'\n 5. 'Solid'\n\n malignancy: int, range = {1,2,3,4,5}\n Subjective assessment of the likelihood of\n malignancy, assuming the scan originated from a 60-year-old male \n smoker. \n\n 1. 'Highly Unlikely'\n 2. 'Moderately Unlikely'\n 3. 'Indeterminate'\n 4. 'Moderately Suspicious'\n 5. 'Highly Suspicious'\n\n Example\n -------\n A short usage example for the Annotation class::\n\n import pylidc as pl\n\n # Get the first annotation with spiculation value greater than 3.\n ann = pl.query(pl.Annotation)\\\\\n .filter(pl.Annotation.spiculation > 3).first()\n \n print(ann.spiculation)\n # => 4\n \n # Each nodule feature has a corresponding property \n # to print the semantic value.\n print(ann.Spiculation)\n # => Medium-High Spiculation\n \n ann = anns.first()\n print(\"%.2f, %.2f, %.2f\" % (ann.diameter,\n ann.surface_area,\n ann.volume))\n # => 17.98, 1221.40, 1033.70\n \"\"\"\n __tablename__ = 'annotations'\n id = sq.Column('id', sq.Integer, primary_key=True)\n scan_id = sq.Column(sq.Integer, sq.ForeignKey('scans.id'))\n scan = relationship('Scan', back_populates='annotations')\n _nodule_id = sq.Column('_nodule_id', sq.String)\n\n # Physician-assigned diagnostic attributes.\n subtlety = sq.Column('subtlety', sq.Integer)\n internalStructure = sq.Column('internalStructure', sq.Integer)\n calcification = sq.Column('calcification', sq.Integer)\n sphericity = sq.Column('sphericity', sq.Integer)\n margin = sq.Column('margin', sq.Integer)\n lobulation = sq.Column('lobulation', sq.Integer)\n spiculation = sq.Column('spiculation', sq.Integer)\n texture = sq.Column('texture', sq.Integer)\n malignancy = sq.Column('malignancy', sq.Integer)\n\n def __repr__(self):\n return \"Annotation(id=%d,scan_id=%d)\" % (self.id, self.scan_id)\n\n def __setattr__(self, name, value):\n if name in _off_limits:\n msg = \"Trying to assign read-only Annotation object attribute \\\n `%s` a value of `%s`.\" % (name,value)\n raise ValueError(msg)\n else:\n super(Annotation,self).__setattr__(name,value)\n\n ####################################\n # { Begin semantic attribute functions\n\n @property\n def Subtlety(self):\n \"\"\"Semantic interpretation of `subtlety` value as string.\"\"\"\n s = self.subtlety\n assert s in range(1,6), \"Subtlety score out of bounds.\"\n if s == 1: return 'Extremely Subtle'\n elif s == 2: return 'Moderately Subtle'\n elif s == 3: return 'Fairly Subtle'\n elif s == 4: return 'Moderately Obvious'\n elif s == 5: return 'Obvious'\n\n @property\n def InternalStructure(self):\n \"\"\"Semantic interpretation of `internalStructure` value as string.\"\"\"\n s = self.internalStructure\n assert s in range(1,5), \"Internal structure score out of bounds.\"\n if s == 1: return 'Soft Tissue'\n elif s == 2: return 'Fluid'\n elif s == 3: return 'Fat'\n elif s == 4: return 'Air'\n\n @property\n def Calcification(self):\n \"\"\"Semantic interpretation of `calcification` value as string.\"\"\"\n s = self.calcification\n assert s in range(1,7), \"Calcification score out of bounds.\"\n if s == 1: return 'Popcorn'\n elif s == 2: return 'Laminated'\n elif s == 3: return 'Solid'\n elif s == 4: return 'Non-central'\n elif s == 5: return 'Central'\n elif s == 6: return 'Absent'\n\n @property\n def Sphericity(self):\n \"\"\"Semantic interpretation of `sphericity` value as string.\"\"\"\n s = self.sphericity\n assert s in range(1,6), \"Sphericity score out of bounds.\"\n if s == 1: return 'Linear'\n elif s == 2: return 'Ovoid/Linear'\n elif s == 3: return 'Ovoid'\n elif s == 4: return 'Ovoid/Round'\n elif s == 5: return 'Round'\n\n @property\n def Margin(self):\n \"\"\"Semantic interpretation of `margin` value as string.\"\"\"\n s = self.margin\n assert s in range(1,6), \"Margin score out of bounds.\"\n if s == 1: return 'Poorly Defined'\n elif s == 2: return 'Near Poorly Defined'\n elif s == 3: return 'Medium Margin'\n elif s == 4: return 'Near Sharp'\n elif s == 5: return 'Sharp'\n\n @property\n def Lobulation(self):\n \"\"\"Semantic interpretation of `lobulation` value as string.\"\"\"\n s = self.lobulation\n assert s in range(1,6), \"Lobulation score out of bounds.\"\n if s == 1: return 'No Lobulation'\n elif s == 2: return 'Nearly No Lobulation'\n elif s == 3: return 'Medium Lobulation'\n elif s == 4: return 'Near Marked Lobulation'\n elif s == 5: return 'Marked Lobulation'\n\n @property\n def Spiculation(self):\n \"\"\"Semantic interpretation of `spiculation` value as string.\"\"\"\n s = self.spiculation\n assert s in range(1,6), \"Spiculation score out of bounds.\"\n if s == 1: return 'No Spiculation'\n elif s == 2: return 'Nearly No Spiculation'\n elif s == 3: return 'Medium Spiculation'\n elif s == 4: return 'Near Marked Spiculation'\n elif s == 5: return 'Marked Spiculation'\n\n @property\n def Texture(self):\n \"\"\"Semantic interpretation of `texture` value as string.\"\"\"\n s = self.texture\n assert s in range(1,6), \"Texture score out of bounds.\"\n if s == 1: return 'Non-Solid/GGO'\n elif s == 2: return 'Non-Solid/Mixed'\n elif s == 3: return 'Part Solid/Mixed'\n elif s == 4: return 'Solid/Mixed'\n elif s == 5: return 'Solid'\n\n @property\n def Malignancy(self):\n \"\"\"Semantic interpretation of `malignancy` value as string.\"\"\"\n s = self.malignancy\n assert s in range(1,6), \"Malignancy score out of bounds.\"\n if s == 1: return 'Highly Unlikely'\n elif s == 2: return 'Moderately Unlikely'\n elif s == 3: return 'Indeterminate'\n elif s == 4: return 'Moderately Suspicious'\n elif s == 5: return 'Highly Suspicious'\n\n # } End attribute functions\n ####################################\n\n def feature_vals(self, return_str=False):\n \"\"\"\n Return all feature values as a numpy array in the order \n presented in `feature_names`.\n\n Parameters\n ----------\n return_str: bool, default=False\n If True, a list of strings is also returned, corresponding\n to the meaning of each numerical feature value.\n\n Return\n ------\n fvals[, fstrs]: array[, list of strings]\n `fvals` is an array of numerical values corresponding to the \n numerical feature values for the annotation. `fstrs` is a \n list of semantic string interpretations of the numerical \n values given in `fvals`.\n \"\"\"\n fvals = np.array([getattr(self,f) for f in feature_names])\n if return_str:\n caps = [f.title() for f in feature_names]\n k = caps.index('Internalstructure')\n caps[k] = 'InternalStructure'\n return fvals, [getattr(self, c) for c in caps]\n else:\n return fvals\n\n def print_formatted_feature_table(self):\n \"\"\"\n Print all feature values as a string table.\n \"\"\"\n fnames = feature_names\n fvals, fstrings = self.feature_vals(True)\n\n print('%-18s %-24s %-2s'%('Feature', 'Meaning','#'))\n print('%-18s %-24s %-2s' % ('-', '-', '-'))\n\n for i in range(len(fnames)):\n print('%-18s | %-24s | %-2d'%(fnames[i].title(), \n fstrings[i], fvals[i]))\n\n def bbox(self, pad=None):\n \"\"\"\n Returns a tuple of Python `slice` objects that can be used to index\n into the image volume corresponding to the extent of the\n (padded) bounding box.\n\n Parameters\n ----------\n pad: int, list of ints, or float, default=None\n * If None (default), then no padding is used.\n * If an integer is provided, then the bounding box is padded\n uniformly by this integer amount.\n * If a list of integers is provided, then it is of the form::\n\n [(i1,i2), (j1,j2), (k1,k2)]\n\n and indicates the pad amounts along each coordinate axis.\n * If a float is provided, then the slices are padded such\n that the bounding box occupies at least `pad` physical units\n (using the corresponding scan `pixel_spacing` and `slice_spacing`\n parameters). This means the returned Slice indices will\n yield a bounding box that is at least `pad` millimeters along\n each coordinate axis direction.\n\n Note\n ----\n In the various `pad` cases above, borders are handled so that if a \n pad beyond the image borders is requested, then it is set \n to the maximum (or minimum, depending on the direction)\n possible index.\n\n Return\n ------\n bb: 3-tuple of Python `slice` objects.\n `bb` is the corresponding bounding box (with desired padding) \n in the CT image volume. `bb[i]` is a slice corresponding\n to the the extent of the bounding box along the \n coordinate axis `i`.\n\n Example\n -------\n\n The example below illustrates the various `pad` argument types::\n\n import pylidc as pl\n \n ann = pl.query(pl.Annotation).first()\n vol = ann.scan.to_volume()\n \n print ann.bbox()\n # => (slice(151, 185, None), slice(349, 376, None), slice(44, 50, None))\n \n print(vol[ann.bbox()].shape)\n # => (34, 27, 6)\n \n print(vol[ann.bbox(pad=2)].shape)\n # => (38, 31, 10)\n \n print(vol[ann.bbox(pad=[(1,2), (3,0), (2,4)])].shape)\n # => (37, 30, 12)\n \n print(max(ann.bbox_dims()))\n # => 21.45\n \n print(vol[ann.bbox(pad=30.0)].shape)\n # => (48, 49, 12)\n \n print(ann.bbox_dims(pad=30.0))\n # => [30.55, 31.200000000000003, 33.0]\n \"\"\"\n # Error checking ...\n if pad is not None:\n if not isinstance(pad, (int, list, float)):\n raise TypeError(\"`pad` is incorrect type.\")\n if isinstance(pad, list):\n if len(pad) != 3:\n raise ValueError(\"`pad` list length should be 3.\")\n for p in pad:\n msg = \"`pad` list elements should be (int, int)\"\n if len(p) != 2:\n raise ValueError(msg)\n if not isinstance(p[0], int) or not isinstance(p[1], int):\n raise TypeError(msg)\n\n # The index limits for the scan.\n limits = [(0,511), (0,511), (0,self.scan.slice_zvals.shape[0]-1)]\n\n cmatrix = self.contours_matrix\n imin,jmin,kmin = cmatrix.min(axis=0)\n imax,jmax,kmax = cmatrix.max(axis=0)\n\n # Adding the padding for each respective case, handling the\n # borders as needed.\n if isinstance(pad, int):\n imin = max(imin-pad, limits[0][0])\n imax = min(imax+pad, limits[0][1])\n jmin = max(jmin-pad, limits[1][0])\n jmax = min(jmax+pad, limits[1][1])\n kmin = max(kmin-pad, limits[2][0])\n kmax = min(kmax+pad, limits[2][1])\n elif isinstance(pad, list):\n imin = max(imin-pad[0][0], limits[0][0])\n imax = min(imax+pad[0][1], limits[0][1])\n jmin = max(jmin-pad[1][0], limits[1][0])\n jmax = min(jmax+pad[1][1], limits[1][1])\n kmin = max(kmin-pad[2][0], limits[2][0])\n kmax = min(kmax+pad[2][1], limits[2][1])\n elif isinstance(pad, float):\n # In this instance, we compute the extend the limits\n # until the required physical size is met (or until we can \n # no long extend the index).\n rij = self.scan.pixel_spacing\n rk = self.scan.slice_spacing\n\n # Check if the desired bbox size is not smaller than is possible.\n if isinstance(pad, float):\n minsize = max(self.bbox_dims(pad=None))\n if pad < minsize:\n raise ValueError((\"Requested `bbox` size (%.4f mm) is \"\n \"less than minimal possible size \"\n \"(%.4f mm).\") % (pad, minsize))\n while (imax-imin)*rij < pad:\n imin -= 1 if imin > limits[0][0] else 0\n imax += 1 if imax < limits[0][1] else 0\n if imin == limits[0][0] and imax == limits[0][1]:\n break\n while (jmax-jmin)*rij < pad:\n jmin -= 1 if jmin > limits[1][0] else 0\n jmax += 1 if jmax < limits[1][1] else 0\n if jmin == limits[1][0] and jmax == limits[1][1]:\n break\n while (kmax-kmin)*rk < pad:\n kmin -= 1 if kmin > limits[2][0] else 0\n kmax += 1 if kmax < limits[2][1] else 0\n if kmin == limits[2][0] and kmax == limits[2][1]:\n break\n\n return (slice(imin,imax+1),\n slice(jmin,jmax+1),\n slice(kmin,kmax+1))\n\n\n def bbox_dims(self, pad=None):\n \"\"\"\n Return the physical dimensions of the nodule bounding box in \n millimeters along each coordinate axis.\n\n Parameters\n ----------\n pad: int, list, or float, default=None\n See :meth:`pylidc.Annotation.bbox` for a \n description of this argument.\n\n Return\n ------\n dims: ndarray, shape=(3,)\n `dims[i]` is the length in millimeters of the bounding box along\n the coordinate axis `i`.\n\n Example\n -------\n An example where we compare the bounding box volume vs the nodule\n volume::\n\n import pylidc as pl\n\n ann = pl.query(pl.Annotation).first()\n\n print(\"%.2f mm^3, %.2f mm^3\" % (ann.volume,\n np.prod(ann.bbox_dims())))\n # => 2439.30 mm^3, 5437.58 mm^3\n \"\"\"\n res = [self.scan.pixel_spacing,]*2 + [self.scan.slice_spacing]\n return np.array([(b.stop-1-b.start)*r \n for r,b in zip(res, self.bbox(pad=pad))])\n\n\n def bbox_matrix(self, pad=None):\n \"\"\"\n The `bbox` function returns a tuple of slices to be used to index\n into an image volume. On the other hand, `bbox_array` returns\n a 3x2 matrix where each row is the (start, stop) indices of the\n i, j, and k axes.\n\n Parameters\n ----------\n pad: int, list, or float\n See :meth:`pylidc.Annotation.bbox` for a \n description of this argument.\n\n Note\n ----\n The indices return by `bbox_array` are *inclusive*, whereas\n the indices of the slice objects in the tuple return by `bbox`\n are offset by +1 in the \"stop\" index.\n\n Return\n ------\n bb_mat: ndarray, shape=(3,2)\n `bb_mat[i]` is the stop and start indices (inclusive) of the \n bounding box along coordinate axis `i`.\n\n Example\n -------\n An example of the difference between `bbox` and `bbox_matrix`::\n\n import pylidc as pl\n ann = pl.query(pl.Annotation).first()\n \n bb = ann.bbox()\n bm = ann.bbox_matrix()\n \n print(all([bm[i,0] == bb[i].start for i in range(3)]))\n # => True\n \n print(all([bm[i,1]+1 == bb[i].stop for i in range(3)]))\n # => True\n \"\"\"\n return np.array([[sl.start, sl.stop-1] for sl in self.bbox(pad=pad)])\n\n\n @property\n def centroid(self):\n \"\"\"\n The center of mass of the nodule as determined by its \n radiologist-drawn contours.\n\n Example\n -------\n An example of plotting the centroid on a CT image slice::\n\n import pylidc as pl\n import matplotlib.pyplot as plt\n \n ann = pl.query(pl.Annotation).first()\n i,j,k = ann.centroid\n\n vol = ann.scan.to_volume()\n \n plt.imshow(vol[:,:,int(k)], cmap=plt.cm.gray)\n plt.plot(j, i, '.r', label=\"Nodule centroid\")\n plt.legend()\n plt.show()\n\n Return\n ------\n centr: ndarray, shape=(3,)\n `centr[i]` is the average index value of all contour index values\n for coordinate axis `i`.\n \"\"\"\n return self.contours_matrix.mean(axis=0)\n\n @property\n def diameter(self):\n \"\"\"\n Estimate the greatest axial plane diameter using the annotation's \n contours. This estimation does not currently account for cases \n where the diamter passes outside the boundary of the nodule, or \n through cavities within the nodule.\n \n Return\n ------\n diam: float\n The maximal diameter as float, accounting for the axial-plane \n resolution of the scan. The units are mm.\n \"\"\"\n greatest_diameter = -np.inf\n i,j,k = 0,0,1 # placeholders for max indices\n for c,contour in enumerate(self.contours):\n contour_array = contour.to_matrix()[:,:2]*self.scan.pixel_spacing\n\n # There's some edge cases where the contour consists only of \n # a single point, which we must ignore.\n if contour_array.shape[0]==1: continue\n \n # pdist computes the pairwise distances between the points.\n # squareform turns the condensed array into matrix where\n # entry i,j is ||point_i - point_j||.\n diameters = squareform(pdist(contour_array))\n diameter = diameters.max()\n\n if diameter > greatest_diameter:\n greatest_diameter = diameter\n i = c\n j,k = np.unravel_index(diameters.argmax(), diameters.shape)\n\n return greatest_diameter\n\n @property\n def surface_area(self):\n \"\"\"\n Estimate the surface area by summing the areas of a trianglation\n of the nodules surface in 3d. Returned units are mm^2.\n\n Return\n ------\n sa: float\n The estimated surface area in squared millimeters.\n \"\"\"\n mask = self.boolean_mask()\n mask = np.pad(mask, [(1,1), (1,1), (1,1)], 'constant') # Cap the ends.\n mask = mask.astype(np.float)\n\n rij = self.scan.pixel_spacing\n rk = self.scan.slice_thickness\n verts, faces, _, _ = marching_cubes(mask, 0.5, spacing=(rij, rij, rk))\n return mesh_surface_area(verts, faces)\n\n @property\n def volume(self):\n \"\"\"\n Estimate the volume of the annotated nodule, using the contour \n annotations. Green's theorem (via the shoelace formula) is first \n used to measure the area in each slice. This area is multiplied \n by the distance between slices to obtain a volume for each slice, \n which is then added or subtracted from the total volume, depending \n on if the inclusion value for the contour. \n \n The distance between slices is taken to be the distance from the \n midpoint between the current `image_z_position` and the \n `image_z_position` in one slice higher plus the midpoint between \n the current `image_z_position` and the `image_z_position` of one \n slice below. If the the `image_z_position` corresponds to an end \n piece, we use the distance between the current `image_z_posiition` \n and the `image_z_position` of one slice below or above for top or \n bottom, respectively. If the annotation only has one contour, we \n use the `slice_thickness` attribute of the scan.\n\n Return\n ------\n vol: float\n The estimated 3D volume of the annotated nodule. Units are cubic\n millimeters.\n \"\"\"\n volume = 0.\n zvals = np.unique([c.image_z_position for c in self.contours])\n\n # We pad a zval on the bottom that is the same distance from the\n # first zval to the second zval but below the first point. We do \n # the same thing for the top zval.\n if len(self.contours) != 1:\n zlow = zvals[ 0] - (zvals[1]-zvals[0])\n zhigh = zvals[-1] + (zvals[-1]-zvals[-2])\n zvals = np.r_[zlow, zvals, zhigh]\n else:\n zvals = None\n\n for i,contour in enumerate(self.contours):\n contour_array = contour.to_matrix() * self.scan.pixel_spacing\n x = contour_array[:,0]\n y = contour_array[:,1]\n # \"Shoelace\" formula for area.\n area = 0.5*np.abs(np.dot(x,np.roll(y,1))-np.dot(y,np.roll(x,1)))\n \n if zvals is not None:\n j = np.argmin(np.abs(contour.image_z_position-zvals))\n spacing_z = 0.5*(zvals[j+1]-zvals[j-1])\n else:\n spacing_z = self.scan.slice_thickness\n\n volume += (1. if contour.inclusion else -1.) * area * spacing_z\n return volume\n\n def visualize_in_3d(self, edgecolor='0.2', cmap='viridis',\n step=1, figsize=(5,5), backend='matplotlib'):\n \"\"\"\n Visualize in 3d a triangulation of the nodule's surface.\n\n Parameters\n ----------\n edgecolor: string color or rgb 3-tuple\n Sets edgecolors of triangulation.\n Ignored if backend != matplotlib.\n\n cmap: matplotlib colormap string.\n Sets the facecolors of the triangulation.\n See `matplotlib.cm.cmap_d.keys()` for all available.\n Ignored if backend != matplotlib.\n\n step: int, default=1\n The `step_size` parameter for the skimage marching_cubes function.\n Bigger values are quicker, but yield coarser surfaces.\n\n figsize: tuple, default=(5,5)\n Figure size for the displayed volume.\n\n backend: string\n The backend for visualization. Default is matplotlib.\n Execute `from pylidc.Annotation import viz3dbackends` to\n see available backends.\n\n Example\n -------\n A short example::\n\n ann = pl.query(pl.Annotation).first()\n ann.visualize_in_3d(edgecolor='green', cmap='autumn')\n \"\"\"\n if backend not in viz3dbackends:\n raise ValueError(\"backend should be in %s.\" % viz3dbackends)\n\n if backend == 'matplotlib':\n if cmap not in plt.cm.cmap_d.keys():\n raise ValueError(\"Invalid `cmap`. See `plt.cm.cmap_d.keys()`.\")\n\n # Pad to cap the ends for masks that hit the edge.\n mask = self.boolean_mask(pad=[(1,1), (1,1), (1,1)]) \n\n rij = self.scan.pixel_spacing\n rk = self.scan.slice_thickness\n\n if backend == 'matplotlib':\n verts, faces, _, _= marching_cubes(mask.astype(np.float), 0.5,\n spacing=(rij, rij, rk),\n step_size=step)\n fig = plt.figure(figsize=figsize)\n ax = fig.add_subplot(111, projection='3d')\n\n t = np.linspace(0, 1, faces.shape[0])\n mesh = Poly3DCollection(verts[faces], \n edgecolor=edgecolor,\n facecolors=plt.cm.cmap_d[cmap](t))\n ax.add_collection3d(mesh)\n\n ceil = max(self.bbox_dims(pad=[(1,1), (1,1), (1,1)]))\n ceil = int(np.round(ceil))\n \n ax.set_xlim(0, ceil)\n ax.set_xlabel('length (mm)')\n\n ax.set_ylim(0, ceil)\n ax.set_ylabel('length (mm)')\n\n ax.set_zlim(0, ceil)\n ax.set_zlabel('length (mm)')\n\n plt.tight_layout()\n plt.show()\n elif backend == 'mayavi':\n try:\n from mayavi import mlab\n sf = mlab.pipeline.scalar_field(mask.astype(np.float))\n sf.spacing = [rij, rij, rk]\n mlab.pipeline.iso_surface(sf, contours=[0.5])\n mlab.show()\n except ImportError:\n print(\"Mayavi could not be imported. Is it installed?\")\n\n\n def visualize_in_scan(self, verbose=True):\n \"\"\"\n Engage an interactive visualization of the slices of the scan \n along with scan and annotation information.\n \n The visualization begins (but is not limited to) the first slice \n where the nodule occurs (according to the annotation). Annotation\n contours are plotted on top of the images \n for visualization and can be toggled on and off, using an interactive \n check mark utility.\n\n Parameters\n ----------\n verbose: bool, default=True\n Turn the image loading statement on/off.\n \"\"\"\n images = self.scan.load_all_dicom_images(verbose)\n \n # Preload contours and sort them by z pos.\n contours = sorted(self.contours, key=lambda c: c.image_z_position)\n fnames = self.scan.sorted_dicom_file_names.split(',')\n index_of_contour = [fnames.index(c.dicom_file_name) for c in contours]\n\n fig = plt.figure(figsize=(16,8))\n\n min_slice = min(index_of_contour)\n max_slice = max(index_of_contour)\n current_slice = min_slice\n\n ax_image = fig.add_axes([0.5,0.0,0.5,1.0])\n img = ax_image.imshow(images[current_slice].pixel_array,\n cmap=plt.cm.gray)\n\n contour_lines = []\n # We draw all the contours initially and set the visibility\n # to False. This works better than trying create and destroy\n # plots every time we update the image.\n for i,c in enumerate(contours):\n arr = c.to_matrix()\n cc, = ax_image.plot(arr[:,1], arr[:,0], '-r')\n cc.set_visible(i==0) # Set the first contour visible.\n contour_lines.append( cc )\n ax_image.set_xlim(-0.5,511.5); ax_image.set_ylim(511.5,-0.5)\n ax_image.axis('off')\n \n # Add the scan info table\n ax_scan_info = fig.add_axes([0.1, 0.76, 0.3, 0.15])\n ax_scan_info.set_facecolor('w')\n scan_info_table = ax_scan_info.table(\n cellText=[\n ['Patient ID:', self.scan.patient_id],\n ['Slice thickness:', '%.3f mm' % self.scan.slice_thickness],\n ['Pixel spacing:', '%.3f mm'%self.scan.pixel_spacing],\n ['Manufacturer:', images[current_slice].Manufacturer],\n ['Model name:', images[current_slice].ManufacturerModelName],\n ['Convolution kernel:', images[current_slice].ConvolutionKernel],\n ],\n loc='center', cellLoc='left'\n )\n # Remove the cell borders.\n # It Seems like there should be an easier way to do this...\n for cell in scan_info_table.properties()['children']:\n cell.set_color('w')\n\n ax_scan_info.set_title('Scan Info')\n ax_scan_info.set_xticks([])\n ax_scan_info.set_yticks([])\n\n # Add annotations / features table.\n ax_annotation_info = fig.add_axes([0.1, 0.45, 0.3, 0.25])\n ax_annotation_info.set_facecolor('w')\n\n # Create the rows to be displayed in the annotations table.\n cell_text = []\n for f in feature_names:\n row = []\n fname = f.capitalize()\n if fname.startswith('Int'):\n fname = 'InternalStructure'\n\n row.append(fname)\n row.append(getattr(self,fname))\n row.append(getattr(self,f))\n\n cell_text.append(row)\n\n annotation_info_table = ax_annotation_info.table(\n cellText=cell_text,\n loc='center', cellLoc='left', colWidths=[0.45,0.45,0.1]\n )\n\n # Again, remove cell borders.\n for cell in annotation_info_table.properties()['children']:\n cell.set_color('w')\n\n ax_annotation_info.set_title('Annotation Info')\n ax_annotation_info.set_xticks([])\n ax_annotation_info.set_yticks([])\n\n # Add the checkbox for turning contours on / off.\n ax_contour_checkbox = fig.add_axes([0.1, 0.25, 0.1, 0.15])\n ax_contour_checkbox.set_facecolor('w')\n contour_checkbox = CheckButtons(ax_contour_checkbox,\n ('Show Contours',), (True,))\n contour_checkbox.is_checked = True\n\n # Add the widgets.\n ax_slice = fig.add_axes([0.1, 0.1, 0.3, 0.05])\n ax_slice.set_facecolor('w')\n txt = 'Z: %.3f'%float(images[current_slice].ImagePositionPatient[-1]) \n sslice = Slider(ax_slice,\n txt,\n 0,\n len(images)-1,\n valinit=current_slice,\n valfmt=u'Slice: %d')\n\n def update(_):\n # Update image itself.\n current_slice = int(sslice.val)\n img.set_data(images[current_slice].pixel_array)\n txt = 'Z: %.3f'\n txt = txt % float(images[current_slice].ImagePositionPatient[-1])\n sslice.label.set_text(txt)\n\n if contour_checkbox.is_checked:\n for i,c in enumerate(contour_lines):\n flag = ((index_of_contour[i] == current_slice) and \n (current_slice >= min_slice) and\n (current_slice <= max_slice))\n # Set contour visible if flag is True.\n c.set_visible(flag)\n else:\n for c in contour_lines: c.set_visible(False)\n\n fig.canvas.draw_idle()\n\n def update_contours(_):\n contour_checkbox.is_checked = not contour_checkbox.is_checked\n update(None) # update requires an argument.\n\n sslice.on_changed(update)\n contour_checkbox.on_clicked(update_contours)\n\n plt.show()\n\n @property\n def contour_slice_zvals(self):\n \"\"\"An array of unique z-coordinates for the contours.\"\"\"\n return np.sort([c.image_z_position for c in self.contours]) \n\n @property\n def contour_slice_indices(self):\n \"\"\"\n Returns an array of indices into the scan where each contour\n belongs. An example should clarify::\n\n import pylidc as pl\n \n ann = pl.query(pl.Annotation)\n \n zvals = ann.contour_slice_zvals\n kvals = ann.contour_slice_indices\n scan_zvals = ann.scan.slice_zvals\n \n for k,z in zip(kvals, zvals):\n # the two z values should the same (up to machine precision)\n print(k, z, scan_zvals[k]) \n \"\"\"\n return np.sort([c.image_k_position for c in self.contours])\n\n @property\n def contours_matrix(self):\n \"\"\"\n All the contour index values a 3D numpy array.\n \"\"\"\n return np.vstack([c.to_matrix(include_k=True)\n for c in sorted(self.contours,\n key=lambda c: c.image_z_position)])\n\n def boolean_mask(self, pad=None, bbox=None, include_contour_points=False):\n \"\"\"\n A boolean volume where 1 indicates nodule and 0 indicates\n non-nodule. The `mask` volume covers the extent of the voxels\n in the image volume given by `annotation.bbox`, i.e., the `mask`\n volume would be placed in the full image volume according to\n the `bbox` attribute.\n\n Parameters\n ----------\n pad: int, list, or float, default=None\n See :meth:`pylidc.Annotation.bbox` for a \n description of this argument.\n\n bbox: 3x2 NumPy array, default=None\n If `bbox` is provided, then `pad` is ignored. This argument allows\n for more fine-tuned control of placement of the mask in a volume,\n or for pre-computation of bbox when working with multiple \n Annotation object.\n\n Example\n -------\n An example::\n\n import pylidc as pl\n import matplotlib.pyplot as plt\n \n ann = pl.query(pl.Annotation).first()\n vol = ann.scan.to_volume()\n \n mask = ann.boolean_mask()\n bbox = ann.bbox()\n \n print(\"Avg HU inside nodule: %.1f\" % vol[bbox][mask].mean())\n # => Avg HU inside nodule: -280.0\n\n print(\"Avg HU outside nodule: %.1f\" % vol[bbox][~mask].mean())\n # => Avg HU outside nodule: -732.2\n \"\"\"\n bb = self.bbox_matrix(pad=pad) if bbox is None else bbox\n\n czs = self.contour_slice_zvals\n cks = self.contour_slice_indices\n\n zs = self.scan.slice_zvals\n zs = zs[cks[0]:cks[-1]+1]\n\n # Lambda to map a z-value to its appropriate index in the volume.\n z_to_index = lambda z: dict(zip(czs,cks))[z] - bb[2,0]#cks[0]\n\n # Get dimensions, initialize mask.\n ni,nj,nk = np.diff(bb, axis=1).astype(int)[:,0] + 1\n mask = np.zeros((ni,nj,nk), dtype=np.bool)\n\n # We check if these points are enclosed within each contour \n # for a given slice. `test_points` is a list of image coordinate \n # points, offset by the bounding box.\n ii,jj = np.indices(mask.shape[:2])\n test_points = bb[:2,0] + np.c_[ii.flatten(), jj.flatten()]\n\n # First we \"turn on\" pixels enclosed by inclusion contours.\n for contour in self.contours:\n if contour.inclusion:\n zi = z_to_index(contour.image_z_position)\n C = contour.to_matrix(include_k=False)\n\n # Turn the contour closed if it is not.\n if (C[0] != C[-1]).any():\n C = np.append(C, C[0].reshape(1,2), axis=0)\n\n # Create path object and test all pixels\n # within the contour's bounding box.\n path = mplpath.Path(C, closed=True)\n contains_pts = path.contains_points(test_points)\n contains_pts = contains_pts.reshape(mask.shape[:2])\n\n # The logical or here prevents the cases where a single\n # slice contains multiple inclusion regions.\n mask[:,:,zi] = np.logical_or(mask[:,:,zi], contains_pts)\n\n if not include_contour_points:\n # Remove the contour points themselves.\n i, j = (C - bb[:2,0]).T\n k = np.ones(C.shape[0], dtype=np.int)*zi\n mask[i,j,k] = False\n\n # Second, we \"turn off\" pixels enclosed by exclusion contours.\n for contour in self.contours:\n if not contour.inclusion:\n zi = z_to_index(contour.image_z_position)\n C = contour.to_matrix(include_k=False)\n\n # Turn the contour closed if it is not.\n if (C[0] != C[-1]).any():\n C = np.append(C, C[0].reshape(1,2), axis=0)\n\n path = mplpath.Path(C, closed=True)\n not_contains_pts = ~path.contains_points(test_points)\n not_contains_pts = not_contains_pts.reshape(mask.shape[:2])\n mask[:,:,zi] = np.logical_and(mask[:,:,zi], not_contains_pts)\n\n # Remove the contour points themselves.\n i, j = (C - bb[:2,0]).T\n k = np.ones(C.shape[0], dtype=np.int)*zi\n mask[i,j,k] = False\n\n return mask\n\n def _as_set(self):\n \"\"\"\n Private function used to computed overlap between nodules of the \n same scan. This function returns a set where is element is a \n 3-tuple referring to a voxel within the scan. If the voxel is \n in the set, the nodule is considered to be defined there.\n \n Essentially this is a boolean mask stored as a set.\n \"\"\"\n included = set()\n excluded = set()\n # Add all points lying within each inclusion contour to S.\n for contour in self.contours:\n contour_matrix = contour.to_matrix()[:,:2]\n # Turn the contour closed if it's not.\n if (contour_matrix[0] != contour_matrix[-1]).all():\n contour_matrix = np.append(contour_matrix,\n contour_matrix[0].reshape(1,2),\n axis=0)\n\n # Create path object and test all pixels \n # within the contour's bounding box.\n path = mplpath.Path(contour_matrix, closed=True)\n mn = contour_matrix.min(axis=0)\n mx = contour_matrix.max(axis=0)\n x,y = np.mgrid[mn[0]:mx[0]+1, mn[1]:mx[1]+1]\n test_points = np.c_[x.flatten(), y.flatten()]\n points_in_contour = test_points[path.contains_points(test_points)]\n\n # Add the z coordinate.\n points_in_contour = np.c_[\\\n points_in_contour,\\\n np.ones(points_in_contour.shape[0])*contour.image_z_position\n ]\n\n # Now turn the numpy matrix into a list of tuples,\n # so we can add it to the corresponding set.\n points_in_contour = list(map(tuple, points_in_contour))\n\n # Update the corresponding set.\n if contour.inclusion:\n included.update(points_in_contour)\n else:\n excluded.update(points_in_contour)\n # Return the included points minus the excluded points.\n return included.difference( excluded )\n\n def uniform_cubic_resample(self, side_length=None, resample_vol=True,\n irp_pts=None, return_irp_pts=False,\n resample_img=True, verbose=True):\n \"\"\"\n Get the CT value volume and respective boolean mask volume. The \n volumes are interpolated and resampled to have uniform spacing of 1mm\n along each dimension. The resulting volumes are cubic of the \n specified `side_length`. Thus, the returned volumes have dimensions,\n `(side_length+1,)*3` (since `side_length` is the spacing).\n\n TODO\n ----\n It would be nice if this function performed fully general \n interpolation, i.e., not necessarily uniform spacing and allowing \n different resample-resolutions along different coordinate axes.\n\n Parameters\n ----------\n side_length: integer, default=None\n The physical length of each side of the new cubic \n volume in millimeters. The default, `None`, takes the\n max of the nodule's bounding box dimensions.\n\n If this parameter is not `None`, then it should be \n greater than any bounding box dimension. If the specified \n `side_length` requires a padding which results in an \n out-of-bounds image index, then the image is padded with \n the minimum CT image value.\n\n resample_vol: boolean, default=True\n If False, only the segmentation volume is resampled.\n\n irp_pts: 3-tuple from meshgrid\n If provided, the volume(s) will be resampled over these interpolation\n points, rather than the automatically calculated points. This allows\n for sampling segmentation volumes over a common coordinate-system.\n\n return_irp_pts: boolean, default=False\n If True, the interpolation points (ix,iy,iz) at which the volume(s)\n were resampled are returned. These can potentially be provided as\n an argument to `irp_pts` for separate selfotations that refer to the\n same nodule, allowing the segmentation volumes to be resampled in a\n common coordinate-system.\n\n verbose: boolean, default=True\n Turn the loading statement on / off.\n\n Return\n ------\n [ct_volume,] mask [, irp_pts]: ndarray, ndarray, list of ndarrays\n `ct_volume` and `mask` are the resampled CT and boolean \n volumes, respectively. `ct_volume` and `irp_points` are optionally\n returned, depending on which flags are set (see above).\n\n Example\n -------\n An example::\n\n import numpy as np\n import matplotlib.pyplot as plt\n import pylidc as pl\n\n ann = pl.query(pl.Annotation).first()\n\n # resampled volumes will have uniform side length of 70mm and\n # uniform voxel spacing of 1mm.\n n = 70\n vol,mask = ann.uniform_cubic_resample(n)\n\n\n # Setup the plot.\n img = plt.imshow(np.zeros((n+1, n+1)), \n vmin=vol.min(), vmax=vol.max(),\n cmap=plt.cm.gray)\n\n\n # View all the resampled image volume slices.\n for i in range(n+1):\n img.set_data(vol[:,:,i] * (mask[:,:,i]*0.6+0.2))\n\n plt.title(\"%02d / %02d\" % (i+1, n))\n plt.pause(0.1)\n\n \"\"\"\n bbox = self.bbox_matrix()\n bboxd = self.bbox_dims()\n rij = self.scan.pixel_spacing\n rk = self.scan.slice_spacing\n\n imin,imax = bbox[0]\n jmin,jmax = bbox[1]\n kmin,kmax = bbox[2]\n\n xmin,xmax = imin*rij, imax*rij\n ymin,ymax = jmin*rij, jmax*rij\n\n zmin = self.scan.slice_zvals[kmin]\n zmax = self.scan.slice_zvals[kmax]\n\n # { Begin input checks.\n if side_length is None:\n side_length = np.ceil(bboxd.max())\n else:\n if not isinstance(side_length, int):\n raise TypeError('`side_length` must be an integer.')\n if side_length < bboxd.max():\n raise ValueError('`side_length` must be greater\\\n than any bounding box dimension.')\n side_length = float(side_length)\n # } End input checks.\n\n # Load the images. Get the z positions.\n images = self.scan.load_all_dicom_images(verbose=verbose)\n img_zs = [float(img.ImagePositionPatient[-1]) for img in images]\n img_zs = np.unique(img_zs)\n\n # Get the z values of the contours.\n contour_zs = np.unique([c.image_z_position for c in self.contours])\n\n # Get the indices where the nodule stops and starts\n # with respect to the scan z values.\n #kmin = np.where(zmin == img_zs)[0][0]\n #kmax = np.where(zmax == img_zs)[0][0]\n\n # Initialize the boolean mask.\n mask = self.boolean_mask()\n\n ########################################################\n # { Begin interpolation grid creation.\n # (The points at which the volumes will be resampled.)\n\n # Compute new interpolation grid points in x.\n d = 0.5*(side_length-(xmax - xmin))\n xhat, step = np.linspace(xmin-d, xmax+d,\n int(side_length)+1, retstep=True)\n assert abs(step-1) < 1e-5, \"New x spacing != 1.\"\n\n # Do the same for y.\n d = 0.5*(side_length-(ymax - ymin))\n yhat, step = np.linspace(ymin-d, ymax+d,\n int(side_length)+1, retstep=True)\n assert abs(step-1) < 1e-5, \"New y spacing != 1.\"\n\n # Do the same for z.\n d = 0.5*(side_length-(zmax - zmin))\n zhat, step = np.linspace(zmin-d, zmax+d,\n int(side_length)+1, retstep=True)\n assert abs(step-1) < 1e-5, \"New z pixel spacing != 1.\"\n\n # } End interpolation grid creation.\n ########################################################\n\n ########################################################\n # { Begin grid creation.\n # (The points at which the volumes are assumed to be sampled.)\n\n # a[x|y|z], b[x|y|z] are the start / stop indexes for the \n # (non resample) sample grids along each respective axis.\n\n # It helps to draw a diagram. For example,\n #\n # *--*--*-- ...\n # x3 x4 x5\n # *---*---*--- ...\n # xhat0\n #\n # In this case, `ax` would be chosen to be 3\n # since this is the index directly to the left of \n # `xhat[0]`. If `xhat[0]` is below any grid point,\n # then `ax` is the minimum possible index, 0. A similar\n # diagram helps with the `bx` index.\n\n T = np.arange(0, 512)*rij\n\n if xhat[0] <= 0:\n ax = 0\n else:\n ax = (T < xhat[0]).sum() - 1\n if xhat[-1] >= T[-1]:\n bx = 512\n else:\n bx = 512 - (T > xhat[-1]).sum() + 1\n\n if yhat[0] <= 0:\n ay = 0\n else:\n ay = (T < yhat[0]).sum() - 1\n if yhat[-1] >= T[-1]:\n by = 512\n else:\n by = 512 - (T > yhat[-1]).sum() + 1\n\n if zhat[0] <= img_zs[0]:\n az = 0\n else:\n az = (img_zs < zhat[0]).sum() - 1\n if zhat[-1] >= img_zs[-1]:\n bz = len(img_zs)\n else:\n bz = len(img_zs) - (img_zs > zhat[-1]).sum() + 1\n \n # These are the actual grids.\n x = T[ax:bx]\n y = T[ay:by]\n z = img_zs[az:bz]\n\n # } End grid creation.\n ########################################################\n\n\n # Create the non-interpolated CT volume.\n if resample_vol:\n ctvol = np.zeros(x.shape+y.shape+z.shape, dtype=np.float64)\n for k in range(z.shape[0]):\n ctvol[:,:,k] = images[k+az].pixel_array[ax:bx, ay:by]\n\n # We currently only have the boolean mask volume on the domain\n # of the bounding box. Thus, we must \"place it\" in the appropriately\n # sized volume (i.e., `ctvol.shape`). This is accomplished by\n # padding `mask`.\n padvals = [(imin-ax, bx-1-imax), # The `b` terms have a `+1` offset\n (jmin-ay, by-1-jmax), # from being an index that is\n (kmin-az, bz-1-kmax)] # corrected with the `-1` here.\n mask = np.pad(mask, pad_width=padvals,\n mode='constant', constant_values=False)\n\n # Obtain minimum image value to use as const for interpolation.\n if resample_vol:\n fillval = min([img.pixel_array.min() for img in images])\n\n if irp_pts is None:\n ix,iy,iz = np.meshgrid(xhat, yhat, zhat, indexing='ij')\n else:\n ix,iy,iz = irp_pts\n IXYZ = np.c_[ix.flatten(), iy.flatten(), iz.flatten()]\n\n # Interpolate the nodule CT volume.\n if resample_vol:\n rgi = RegularGridInterpolator(points=(x, y, z), values=ctvol,\n bounds_error=False, fill_value=fillval)\n ictvol = rgi(IXYZ).reshape(ix.shape)\n\n # Interpolate the mask volume.\n rgi = RegularGridInterpolator(points=(x, y, z), values=mask,\n bounds_error=False, fill_value=False)\n imask = rgi(IXYZ).reshape(ix.shape) > 0\n\n if resample_vol:\n if return_irp_pts:\n return ictvol, imask, (ix,iy,iz)\n else:\n return ictvol, imask\n else:\n if return_irp_pts:\n return imask, (ix,iy,iz)\n else:\n return imask\n\n\n# Add the relationship to the Scan model.\nScan.annotations = relationship('Annotation',\n order_by=Annotation.id,\n back_populates='scan')\n"
] |
[
[
"numpy.linspace",
"numpy.round",
"numpy.roll",
"matplotlib.pyplot.tight_layout",
"numpy.pad",
"numpy.unique",
"numpy.arange",
"scipy.interpolate.RegularGridInterpolator",
"numpy.diff",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.path.Path",
"numpy.logical_or",
"numpy.meshgrid",
"numpy.logical_and",
"matplotlib.pyplot.show",
"numpy.abs",
"matplotlib.widgets.CheckButtons",
"numpy.indices",
"numpy.sort",
"numpy.ones",
"scipy.spatial.distance.pdist",
"matplotlib.pyplot.cm.cmap_d.keys"
]
] |
TatianaPorras/MNIG_to_FMMSP
|
[
"1a62884237a4133f1417982d88f8a1421a972a08"
] |
[
"algorithms/DNEH_SMR.py"
] |
[
"from API.functions import c_range, makespan, PT, ct\n\ndef DNEH_SMR(Tn, U_s, Pn):\n \"\"\"\n Tn es la lista de números triangulares difusos del tiempo de producción del trabajo i en la unidad o máquina u, en secuencia de números naturales, 1, 2, 3, ... El prefijo 't' es por triangular difuso.\n\n U_s es el conjunto de máquinas o unidades de la etapa s.\n\n Pn es la ponderación de los números triangulares en secuencia de números naturales, 1, 2, 3, ...\n\n Esta función aplica el algoritmo DNEH_SMR.\n \"\"\"\n\n import math\n import numpy as np\n\n # L es la cantidad total de etapas del sistema de producción.\n L = len(U_s)\n\n # I es el conjunto de trabajos.\n I = range(len(Tn))\n\n # sec es la secuencia natural de trabajos: 1, 2, 3, ...\n sec = range(1, len(Tn) + 1)\n\n # S es el conjunto de etapas.\n S = range(L)\n\n # n_s es la cantidad de unidades o máquinas de la etapa s.\n n_s = [len(U_s[s]) for s in S]\n\n\n # Paso 1\n\n k = math.floor(L/2)\n\n\n # Paso 2\n\n # Ta es la lista de los tiempos de producción promedio del trabajo i en la etapa s.\n Ta = [1/n_s[s]*np.sum([[Tn[i][u] for i in I] for u in U_s[s]], axis = 0) for s in S]\n\n # reordenar Ta\n Ta = [[Ta[s][i] for s in S] for i in I]\n\n\n # Paso 3\n\n # T1 es la lista de los tiempos promedio de la primera mitad de etapas.\n T1 = [1/k*np.sum([Ta[i][l] for l in c_range(1, k)], axis = 0) for i in I]\n\n # T2 es la lista de los tiempos promedio de la segunda mitad de etapas.\n T2 = [1/(L - k)*np.sum([Ta[i][l] for l in c_range(k + 1, L)], axis = 0) for i in I]\n\n\n # Paso 4\n\n # P2 es la lista de tiempos ponderados de la segunda mitad de etapas.\n P2 = [(T2[i][0] + 2*T2[i][1] + T2[i][2])/4 for i in range(len(T2))]\n\n # dP2 es P2 en forma de diccionario, donde la llave del diccionario son los trabajos en orden natural (1, 2, 3, ...)\n dP2 = dict(zip(sec, P2))\n\n # dP2_ord es dP2 en orden ascendente de P2\n dP2_ord = dict(sorted(dP2.items(), key = lambda arg1: arg1[1]))\n\n # pi_re1 es la secuencia de trabajos en orden ascendente de P2\n pi_re1 = list(dP2_ord.keys())\n\n\n # Paso 5\n\n pi_re2 = []\n for j in I:\n if j % 2 == 0:\n pi_re2.append(pi_re1[0])\n pi_re1.remove(pi_re1[0])\n else:\n pi_re2.append(pi_re1[math.ceil(len(pi_re1)/2) - 1])\n pi_re1.remove(pi_re1[math.ceil(len(pi_re1)/2) - 1])\n\n\n # Paso 6\n\n pi_re3 = pi_re2.copy()\n\n\n # Paso 7\n\n Tss = [Tn for j in pi_re3]\n U_ss = [U_s for j in pi_re3]\n Ps = [Pn for j in pi_re3]\n ex = []\n for j in pi_re2:\n pi_re3s = ct(pi_re3, j, ex)\n Cmax = list(map(makespan, pi_re3s, Tss, U_ss, Ps))\n\n P_Cmax = PT(Cmax)\n j_min = np.argmin(P_Cmax)\n k = pi_re3.index(j)\n\n pi_re3[j_min], pi_re3[k] = pi_re3[k], pi_re3[j_min]\n ex.append(j_min)\n\n return pi_re3, Ta"
] |
[
[
"numpy.argmin",
"numpy.sum"
]
] |
SunDevilThor/Flight-Schools
|
[
"b7ae51b1a15894824bc9dc888ceccc1d78c74e5d"
] |
[
"Flight-Schools.py"
] |
[
"# Flight Schools - Webscraping Project from Upwork\n\n# Objective: Gather data on all of the Airplane Flight Schools in the USA from:\n# https://www.flightschoollist.com/airplane-flight-schools/\n\n# And to create a spreadsheet with the following data:\n\n# 1. Business Name\n# 2. Owner/Director/CEO First and Last Name\n# 3. Owner/Director/CEO Email (\"info@...\" or \"[email protected]\" not accepted)\n# 4. Owner/Director/CEO Contact Phone Number\n# 5. Business Website URL\n# 6. Business Address\n# 7. Business Phone Number\n\n# How many flight schools are operating in the USA?\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\n\n\nMASTER_LIST = []\n\nstates = ['Alabama', 'Alaska', 'Arizona', 'Arkansas', 'California', 'Colorado', 'Connecticut', 'Delaware', 'Florida', 'Georgia',\n 'Hawaii', 'Idaho', 'Illinois', 'Indiana', 'Iowa', 'Kansas', 'Kentucky', 'Louisiana', 'Maine', 'Maryland',\n 'Massachusetts', 'Michigan', 'Minnesota', 'Mississippi', 'Missouri', 'Montana', 'Nebraska', 'Nevada', 'NewHampshire', 'NewJersey',\n 'NewMexico', 'NewYork', 'NorthCarolona', 'Ohio', 'Oklahoma', 'Oregon', 'Pennsylvania', 'RhodeIsland', 'SouthCarolina',\n 'SouthDakota', 'Tennessee', 'Texas', 'Utah', 'Vermont', 'Virgina', 'West Virginia', 'Wisconsin', 'Wyoming']\n\n\nflight_school_states_urls = []\nschool_urls = []\ncontact_details_list = []\nnum_schools_per_state = []\nall_school_urls_pages = []\n\n\ndef flight_state_urls():\n for state in states:\n flight_schools = f'https://www.flightschoollist.com/{state}-airplane-flight-schools/'\n r = s.get(flight_schools)\n print('Getting URL for state:', state, r.status_code)\n flight_school_states_urls.append(flight_schools) \n\n\ndef state_school_url_pages():\n for state in states: \n print('Getting school links for', state)\n state_url = f'https://www.flightschoollist.com/{state}-airplane-flight-schools/'\n base_url = 'https://www.flightschoollist.com'\n\n r = s.get(state_url)\n soup = BeautifulSoup(r.text, 'html.parser')\n\n # Checks the amount of schools for each state\n pagination = soup.find_all('ul', class_ = 'pagination pagination-sm pull-right')\n\n for item in pagination:\n partial_link = item.find('li').find('a')['href']\n num_schools = int(partial_link.split('=')[2])\n\n num_schools_per_state.append(num_schools)\n print(state, 'has', num_schools, 'schools.')\n\n page_number = str(num_schools)\n try:\n page_number = int(page_number) \n if page_number <= 10:\n page_number = 0\n elif page_number > 99 and page_number < 109:\n page_number = 10\n elif page_number > 109:\n page_number = 11\n elif page_number > 119:\n page_number = 12\n else:\n page_number = int(str(page_number)[0]) \n except:\n page_number = int(page_number)\n #print(page_number)\n\n for page in range(0, page_number+1): \n print('Getting links for', state, 'on page:', page)\n pages = state_url + f'?pageNum_rsSchoolLocation={page}&totalRows_rsSchoolLocation={num_schools}' \n all_school_urls_pages.append(pages)\n\n\ndef individual_schools(): \n base_url = 'https://www.flightschoollist.com'\n for url in all_school_urls_pages:\n r = s.get(url)\n soup = BeautifulSoup(r.text, 'html.parser')\n\n data = soup.find_all('tbody')\n for item in data: \n sections = item.find_all('tr')\n for td in sections: \n listings = td.find_all('td')\n\n for item in listings[1]:\n link = base_url + item['href']\n\n school_urls.append(link)\n\n print('Amount of school URLs:', len(school_urls))\n\n\n\n\ndef school_info():\n for url in school_urls: # might need to change back to \"school_urls\" instead of \"all_school_urls\"\n print('Gathering info from:', url)\n try: \n response = s.get(url)\n except Exception as error: \n print('BAD LINK')\n pass\n soup = BeautifulSoup(response.text, 'html.parser')\n\n contact_details = soup.find_all('ul', class_ = 'list list-icons list-icons-style-3 mt-xlg')\n\n for item in contact_details:\n information = item.text\n pieces = information.split(' ')\n\n try: \n school_name = item.find('a').text\n address = pieces[1].split(':')[1].strip()\n airport = pieces[2].split(':')[1].strip()\n phone_number = pieces[3].split(':')[1].strip()\n email = pieces[4].split(':')[1].strip()\n school_url = item.find('a')['href']\n\n except Exception as error:\n print(error, school_name, school_url)\n pass\n \n print('Getting contact details for:', school_name)\n\n details = {\n 'school_name': school_name, \n 'address': address, \n 'airport': airport, \n 'phone_number': phone_number, \n 'email': email, \n 'school_url': school_url,\n }\n\n contact_details_list.append(details)\n\n\ndef output():\n df = pd.DataFrame(contact_details_list)\n df.to_csv('Flight-Schools-Information.csv')\n print(df.head())\n print('Success. Items saved to CSV file.')\n\n\n\n\n# DONE: flight_state_urls function working good\n# DONE: state_school_urls function is working\n# DONE: school_info function is working\n\n\n\nif __name__ == '__main__':\n #pass\n s = requests.Session()\n flight_state_urls()\n state_school_url_pages()\n print('Amount of school URL pages:', len(all_school_urls_pages))\n individual_schools()\n school_info()\n print('Total amount of flight schools in the USA:', sum(num_schools_per_state))\n output()\n\n\n# Workflow: \n\n\n# TO-DO: \n# Fix some of the addresses that only have the beginning showing. \n# Add all schools names to a separate list - CSV file to be imported into another PY file for Selenium\n# Use Selenium to get the CEO of each flight-school. \n\n\n# Bugs: \n# \"Index error: list out of range\" for a few items in \"details\" dictionary in school_info function\n\n# Total amount of flight schools in the USA: 867\n\n# Pagination: \n# https://www.flightschoollist.com/alabama-airplane-flight-schools/?pageNum_rsSchoolLocation=0&totalRows_rsSchoolLocation=11\n# https://www.flightschoollist.com/alabama-airplane-flight-schools/?pageNum_rsSchoolLocation=1&totalRows_rsSchoolLocation=11\n\n\n"
] |
[
[
"pandas.DataFrame"
]
] |
dickronez/autokeras
|
[
"b31f2cafe77bf3a2f738289a89438fb72936117c"
] |
[
"autokeras/utils.py"
] |
[
"import numpy as np\nimport tensorflow as tf\nfrom tensorflow.python.util import nest\n\n\ndef get_global_average_pooling(shape):\n return [tf.keras.layers.GlobalAveragePooling1D,\n tf.keras.layers.GlobalAveragePooling2D,\n tf.keras.layers.GlobalAveragePooling3D][len(shape) - 3]\n\n\ndef get_global_max_pooling(shape):\n return [tf.keras.layers.GlobalMaxPool1D,\n tf.keras.layers.GlobalMaxPool2D,\n tf.keras.layers.GlobalMaxPool3D][len(shape) - 3]\n\n\ndef get_max_pooling(shape):\n return [tf.keras.layers.MaxPool1D,\n tf.keras.layers.MaxPool2D,\n tf.keras.layers.MaxPool3D][len(shape) - 3]\n\n\ndef get_conv(shape):\n return [tf.keras.layers.Conv1D,\n tf.keras.layers.Conv2D,\n tf.keras.layers.Conv3D][len(shape) - 3]\n\n\ndef get_sep_conv(shape):\n return [tf.keras.layers.SeparableConv1D,\n tf.keras.layers.SeparableConv2D,\n tf.keras.layers.Conv3D][len(shape) - 3]\n\n\ndef get_dropout(shape):\n return [tf.keras.layers.SpatialDropout1D,\n tf.keras.layers.SpatialDropout2D,\n tf.keras.layers.SpatialDropout3D][len(shape) - 3]\n\n\ndef validate_num_inputs(inputs, num):\n inputs = nest.flatten(inputs)\n if not len(inputs) == num:\n raise ValueError('Expected {num} elements in the inputs list '\n 'but received {len} inputs.'.format(num=num,\n len=len(inputs)))\n\n\ndef split_train_to_valid(x, y, validation_split):\n # Generate split index\n validation_set_size = int(len(x[0]) * validation_split)\n validation_set_size = max(validation_set_size, 1)\n validation_set_size = min(validation_set_size, len(x[0]) - 1)\n\n # Split the data\n x_train = []\n y_train = []\n x_val = []\n y_val = []\n for temp_x in x:\n x_train.append(temp_x[:-validation_set_size])\n x_val.append(temp_x[-validation_set_size:])\n for temp_y in y:\n y_train.append(temp_y[:-validation_set_size])\n y_val.append(temp_y[-validation_set_size:])\n\n return (x_train, y_train), (x_val, y_val)\n\n\ndef get_name_scope():\n with tf.name_scope('a') as scope:\n name_scope = scope[:-2]\n return name_scope\n\n\ndef dataset_shape(dataset):\n return tf.compat.v1.data.get_output_shapes(dataset)\n\n\ndef inputs_to_datasets(x):\n x = nest.flatten(x)\n new_x = []\n for temp_x in x:\n if isinstance(temp_x, np.ndarray):\n new_x.append(tf.data.Dataset.from_tensor_slices(temp_x))\n return tf.data.Dataset.zip(tuple(new_x))\n\n\ndef prepare_preprocess(x, y):\n \"\"\"Convert each input to a tf.data.Dataset.\"\"\"\n x = inputs_to_datasets(x)\n y = inputs_to_datasets(y)\n return tf.data.Dataset.zip((x, y))\n\n\ndef is_label(y):\n \"\"\"Check if the targets are one-hot encoded or plain labels.\n\n Args:\n y: numpy.ndarray. The targets.\n\n Returns:\n Boolean. Whether the targets are plain label, not encoded.\n \"\"\"\n return len(y.flatten()) == len(y) and len(set(y.flatten())) > 2\n"
] |
[
[
"tensorflow.compat.v1.data.get_output_shapes",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.data.Dataset.zip",
"tensorflow.name_scope",
"tensorflow.python.util.nest.flatten"
]
] |
NovakVed/graph-drawing-algorithm
|
[
"9a73b3b7b6afe0f6222994166fe01c1e49698b60"
] |
[
"Projekt.py"
] |
[
"import grandalf\nfrom grandalf.layouts import SugiyamaLayout\nimport networkx as nx\nimport matplotlib.pyplot as plt\nimport csv\n\nG = nx.DiGraph() # Build your networkx graph here\n\nfilename = input(\"Napiši naziv datoteke koje želiš dodati: \")\nwith open(f\"{filename}.csv\") as file:\n reader = csv.reader(file)\n\n for row in reader:\n G.add_edge(row[0], row[1])\n\ng = grandalf.utils.convert_nextworkx_graph_to_grandalf(G) # undocumented function\n\nclass defaultview(object):\n w,h = 10,10\n\nfor v in g.C[0].sV: v.view = defaultview()\n\nsug = SugiyamaLayout(g.C[0])\nsug.init_all() # roots=[V[0]])\nsug.draw() # This is a bit of a misnomer, as grandalf doesn't actually come with any visualization methods. This method instead calculates positions\n\nposes = {v.data: (v.view.xy[0], v.view.xy[1]) for v in g.C[0].sV} # Extracts the positions\n\nnodesWithPosition = list(poses.values())\nnodesWithPosition.sort(key=lambda x: x[1], reverse=True)\n\ncounter = -1\ndistance = 30\npreviousY = nodesWithPosition[0][1]\noffsetedNodes = []\nfor node in nodesWithPosition:\n currentY = node[1]\n if currentY == previousY:\n counter += 1\n else:\n distance = 30\n counter = 0\n previousY = currentY\n offsetedNodes.append((node[0], node[1]+distance))\n continue\n \n if counter % 3 == 0:\n distance /= 2\n \n offsetedNodes.append((node[0], node[1]+distance))\n # node[1] += distance\n# for pos in poses:\n\noffsetedPoses = {}\n\n\nfor vertex, position in poses.items():\n for node in offsetedNodes:\n if node[0] == position[0]:\n offsetedPoses[vertex] = node\n break\n offsetedPoses[vertex] = position\n\nnx.draw(G, pos=offsetedPoses, with_labels=True)\nimport matplotlib.pyplot as plt\nplt.show()"
] |
[
[
"matplotlib.pyplot.show"
]
] |
makaveli10/cvu
|
[
"50b65cc0a6caa9e636804d3ce10a3ee611b44cd8",
"50b65cc0a6caa9e636804d3ce10a3ee611b44cd8"
] |
[
"cvu/preprocess/image/letterbox.py",
"cvu/detector/yolov5/backends/yolov5_tensorrt.py"
] |
[
"\"\"\"Original Code Taken From ultralytics/yolov5\nURL: https://github.com/ultralytics/yolov5/blob/master/utils/datasets.py\n\"\"\"\nfrom typing import Tuple\n\nimport numpy as np\nimport cv2\n\n\ndef letterbox(img: np.ndarray,\n new_shape: Tuple[int] = (640, 640),\n color: Tuple[int] = (114, 114, 114),\n auto: bool = True,\n scale_fill: bool = False,\n scaleup: bool = True,\n stride: bool = 32) -> np.ndarray:\n \"\"\"Reshape image without affecting the aspect ratio by adding minimum\n letter box type borders, and fill the border area with gray or the\n specified color. Resize and pad image while meeting stride-multiple constraints\n\n Args:\n img (np.ndarray): original image\n new_shape (Tuple[int], optional): shape of output image. Defaults to (640, 640).\n color (Tuple[int], optional): color to be filled in borders. Defaults to (114, 114, 114).\n auto (bool, optional): pick minimum rectangle . Defaults to True.\n scale_fill (bool, optional): strech. Defaults to False.\n scaleup (bool, optional): scale up if needed. Defaults to True.\n stride (bool, optional): used for auto. Defaults to 32.\n\n Returns:\n np.ndarray: resulting image\n \"\"\"\n\n # current shape [height, width]\n shape = img.shape[:2]\n\n # new shape [height, width]\n if isinstance(new_shape, int):\n new_shape = (new_shape, new_shape)\n\n # Scale ratio (new / old)\n scale_ratio = min(new_shape[0] / shape[0], new_shape[1] / shape[1])\n\n # only scale down, do not scale up (for better test mAP)\n if not scaleup:\n scale_ratio = min(scale_ratio, 1.0)\n\n # Compute padding\n # ratio = r, r # width, height ratios\n new_unpad = (int(round(shape[1] * scale_ratio)),\n int(round(shape[0] * scale_ratio)))\n\n # wh padding\n delta_width = new_shape[1] - new_unpad[0]\n delta_height = new_shape[0] - new_unpad[1]\n\n # minimum rectangle\n if auto:\n # update wh padding\n delta_width = np.mod(delta_width, stride)\n delta_height = np.mod(delta_height, stride)\n\n # stretch\n elif scale_fill:\n delta_width, delta_height = 0.0, 0.0\n new_unpad = (new_shape[1], new_shape[0])\n\n # width, height ratios\n # ratio = new_shape[1] / shape[1], new_shape[0] / shape[0]\n\n # divide padding into 2 sides\n delta_width /= 2\n delta_height /= 2\n\n # resize\n if shape[::-1] != new_unpad:\n img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)\n\n # add border\n img = cv2.copyMakeBorder(img,\n int(round(delta_height - 0.1)),\n int(round(delta_height + 0.1)),\n int(round(delta_width - 0.1)),\n int(round(delta_width + 0.1)),\n cv2.BORDER_CONSTANT,\n value=color)\n return img\n",
"\"\"\"This file contains Yolov5's IModel implementation in TensorRT.\nThis model (tensorRT-backend) performs inference using TensorRT,\non a given input numpy array, and returns result after performing\nnms and other backend specific postprocessings.\n\nModel expects normalized inputs (data-format=channels-first) with\nbatch axis. Model does not apply letterboxing to given inputs.\n\"\"\"\nimport os\nfrom typing import Tuple, List\n\nimport numpy as np\nimport tensorrt as trt\nimport pycuda.autoinit # noqa # pylint: disable=unused-import\nimport pycuda.driver as cuda\n\nfrom cvu.interface.model import IModel\nfrom cvu.utils.general import get_path\nfrom cvu.detector.yolov5.backends.common import download_weights\nfrom cvu.postprocess.nms.yolov5 import non_max_suppression_np\n\n\nclass Yolov5(IModel):\n \"\"\"Implements IModel for Yolov5 using TensorRT.\n\n This model (tensorrt-backend) performs inference, using TensorRT,\n on a numpy array, and returns result after performing NMS.\n\n This model does not support runtime dynamic inputs. In other words, once\n created and first inference is done, model expects rest of the inputs to\n be of the same shape as the first input (or the input shape given at the\n initialization step).\n\n Inputs are expected to be normalized in channels-first order\n with/without batch axis.\n \"\"\"\n def __init__(self,\n weight: str = None,\n num_classes: int = 80,\n input_shape=None,\n fp16: bool = True) -> None:\n\n # Create a Context on this device,\n self._ctx = cuda.Device(0).make_context()\n self._logger = trt.Logger(trt.Logger.INFO)\n self._stream = cuda.Stream()\n\n # initiate basic class attributes\n self._weight = weight\n self._fp16 = fp16\n\n # initiate model specific class attributes\n self._nc = num_classes\n self._input_shape = input_shape\n\n # initiate engine related class attributes\n self._engine = None\n self._context = None\n self._inputs = None\n self._outputs = None\n self._bindings = None\n\n # initiate engine if input_shape given\n if self._input_shape is not None:\n self._load_model(weight)\n self._allocate_buffers()\n\n def _deserialize_engine(self,\n trt_engine_path: str) -> trt.tensorrt.ICudaEngine:\n \"\"\"Deserialize TensorRT Cuda Engine\n\n Args:\n trt_engine_path (str): path to engine file\n\n Returns:\n trt.tensorrt.ICudaEngine: deserialized engine\n \"\"\"\n with open(trt_engine_path, 'rb') as engine_file:\n with trt.Runtime(self._logger) as runtime:\n engine = runtime.deserialize_cuda_engine(engine_file.read())\n\n return engine\n\n def _load_model(self, weight: str) -> None:\n \"\"\"Internally loads TensorRT cuda engine and creates execution context.\n\n Args:\n weight (str): path to ONNX weight file, TensorRT Engine file or\n predefined-identifiers (such as yolvo5s, yolov5m, etc.)\n \"\"\"\n # load default models using predefined-identifiers\n if \".\" not in weight:\n height, width = self._input_shape[:2]\n\n # get path to pretrained weights\n engine_path = get_path(__file__, \"weights\",\n f\"{weight}_{height}_{width}_trt.engine\")\n\n onnx_weight = get_path(__file__, \"weights\", f\"{weight}_trt.onnx\")\n\n # download onnx weights if needed, and/or generate engine file\n if not os.path.exists(engine_path):\n\n # download weights if not already downloaded\n download_weights(onnx_weight, \"tensorrt\")\n\n # build engine with current configs and load it\n self._engine = self._build_engine(onnx_weight, engine_path,\n self._input_shape)\n else:\n # deserialize and load engine\n self._engine = self._deserialize_engine(engine_path)\n\n # use custom models\n else:\n # get path to weights\n engine_path = weight.replace(\n \"onnx\", \"engine\") if \".onnx\" in weight else weight\n\n # build engine with given configs and load it\n if not os.path.exists(engine_path):\n self._engine = self._build_engine(weight, engine_path,\n self._input_shape)\n else:\n # deserialize and load engine\n self._engine = self._deserialize_engine(engine_path)\n\n # check if engine loaded properly\n if not self._engine:\n raise Exception(\"[CVU-Error] Couldn't build engine successfully !\")\n\n # create execution context\n self._context = self._engine.create_execution_context()\n if not self._context:\n raise Exception(\n \"[CVU-Error] Couldn't create execution context from engine successfully !\"\n )\n\n def _build_engine(self, onnx_weight: str, trt_engine_path: str,\n input_shape: Tuple[int]) -> trt.tensorrt.ICudaEngine:\n \"\"\"Builds and serializes TensorRT engine by parsing the onnx model.\n\n Args:\n onnx_weight (str): path to onnx weight file\n trt_engine_path (str): path where serialized engine file will be saved\n input_shape (Tuple[int]): input shape for network\n\n Raises:\n FileNotFoundError: raised if onnx weight file doesn't exists\n TypeError: raised if invalid type of weight file is given\n\n Returns:\n trt.tensorrt.ICudaEngine: built engine\n \"\"\"\n\n # checks if onnx path exists\n if not os.path.exists(onnx_weight):\n raise FileNotFoundError(\n f\"[CVU-Error] {onnx_weight} does not exists.\")\n\n # check if valid onnx_weight\n if \".onnx\" not in onnx_weight:\n raise TypeError(\n f\"[CVU-Error] Expected onnx weight file, instead {onnx_weight} is given.\"\n )\n\n # Specify that the network should be created with an explicit batch dimension.\n batch_size = 1 << (int)(\n trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)\n\n # build and serialize engine\n with trt.Builder(self._logger) as builder, \\\n builder.create_network(batch_size) as network, \\\n trt.OnnxParser(network, self._logger) as parser:\n\n # setup builder config\n config = builder.create_builder_config()\n config.max_workspace_size = 64 * 1 << 20 # 64 MB\n builder.max_batch_size = 1\n\n # FP16 quantization\n if self._fp16:\n if builder.platform_has_fast_fp16:\n print(\"[CVU-Info] Platform has FP16 support.\",\n \"Setting fp16 to True\")\n config.flags = 1 << (int)(trt.BuilderFlag.FP16)\n\n # parse onnx model\n with open(onnx_weight, 'rb') as onnx_file:\n if not parser.parse(onnx_file.read()):\n for error in range(parser.num_errors):\n print(parser.get_error(error))\n\n # set input shape\n network.get_input(0).shape = (1, 3, *input_shape)\n\n # build engine\n engine = builder.build_engine(network, config)\n with open(trt_engine_path, 'wb') as trt_engine_file:\n trt_engine_file.write(engine.serialize())\n print(\"[CVU-Info] Engine serialized and saved !\")\n return engine\n\n def _allocate_buffers(self) -> None:\n \"\"\"Allocates memory for inference using TensorRT engine.\n \"\"\"\n inputs, outputs, bindings = [], [], []\n for binding in self._engine:\n size = trt.volume(self._engine.get_binding_shape(binding))\n dtype = trt.nptype(self._engine.get_binding_dtype(binding))\n host_mem = cuda.pagelocked_empty(size, dtype)\n device_mem = cuda.mem_alloc(host_mem.nbytes)\n bindings.append(int(device_mem))\n if self._engine.binding_is_input(binding):\n inputs.append({'host': host_mem, 'device': device_mem})\n else:\n outputs.append({'host': host_mem, 'device': device_mem})\n\n # set buffers\n self._inputs = inputs\n self._outputs = outputs\n self._bindings = bindings\n\n def __call__(self, inputs: np.ndarray) -> np.ndarray:\n \"\"\"Performs model inference on given inputs, and returns\n inference's output after NMS.\n\n Args:\n inputs (np.ndarray): normalized in channels-first format,\n with/without batch axis.\n\n Raises:\n Exception: raised if inputs's shape doesn't not match with\n expected input shape.\n\n Returns:\n np.ndarray: inference's output after NMS\n \"\"\"\n # set input shape and build engine if first inference\n if self._input_shape is None:\n self._input_shape = inputs.shape[-2:]\n print(\"[CVU-Info] Building and Optimizing TRT-Engine\",\n f\"for input_shape={self._input_shape}.\",\n \"This might take a few minutes for first time.\")\n self._load_model(self._weight)\n self._allocate_buffers()\n\n # check if inputs shape match expected shape\n if inputs.shape[-2:] != self._input_shape:\n raise Exception(\n (\"[CVU-Error] Invalid Input Shapes: Expected input to \" +\n f\"be of shape {self._input_shape}, but got \" +\n f\" input of shape {inputs.shape[-2:]}.\" +\n \"Please rebuild TRT Engine with correct shapes.\"))\n\n # perform inference and postprocess\n outputs = self._inference(inputs)\n preds = self._post_process(outputs)\n return preds[0]\n\n def _inference(self, inputs: np.ndarray) -> List[np.ndarray]:\n \"\"\"Runs inference on the given inputs.\n\n Args:\n inputs (np.ndarray): channels-first format,\n with/without batch axis\n\n Returns:\n List[np.ndarray]: inference's output (raw tensorrt output)\n \"\"\"\n self._ctx.push()\n\n # copy inputs to input memory\n # without astype gives invalid arg error\n self._inputs[0]['host'] = np.ravel(inputs).astype(np.float32)\n\n # transfer data to the gpu\n for inp in self._inputs:\n cuda.memcpy_htod_async(inp['device'], inp['host'], self._stream)\n\n # run inference\n self._context.execute_async_v2(bindings=self._bindings,\n stream_handle=self._stream.handle)\n\n # fetch outputs from gpu\n for out in self._outputs:\n cuda.memcpy_dtoh_async(out['host'], out['device'], self._stream)\n\n # synchronize stream\n self._stream.synchronize()\n self._ctx.pop()\n return [out['host'] for out in self._outputs]\n\n def _post_process(self, outputs: List[np.ndarray]) -> List[np.ndarray]:\n \"\"\"Post-process outputs from model inference.\n\n Transforms tensorrt output into boxes, confs, labels and\n applies non max suppression.\n\n Args:\n outputs (List[np.ndarray]): raw tensorrt output tensor\n\n Returns:\n List[np.ndarray]: post-processed output after nms\n \"\"\"\n # reshape into expected output shape\n outputs = outputs[-1].reshape((1, -1, self._nc + 5))\n return non_max_suppression_np(outputs)\n\n def __repr__(self) -> str:\n \"\"\"Returns Model Information\n\n Returns:\n str: information string\n \"\"\"\n return f\"Yolov5s TensorRT-Cuda-{self._input_shape}\"\n\n def __del__(self):\n \"\"\"Clean up execution context stack.\n \"\"\"\n try:\n self._ctx.pop()\n except pycuda.driver.LogicError as _:\n print(\"[CVU-Info] Context stack is already empty.\")\n"
] |
[
[
"numpy.mod"
],
[
"numpy.ravel"
]
] |
maindolaamit/mllib
|
[
"03f46089b1d6661dcc3245118b7d108d124e645c",
"03f46089b1d6661dcc3245118b7d108d124e645c"
] |
[
"cnn/model_builder.py",
"utils/charts.py"
] |
[
"import importlib\nimport os\nfrom pathlib import Path\n\nimport pandas as pd\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau\nfrom livelossplot.inputs.keras import PlotLossesCallback\nfrom sklearn.model_selection import KFold\nfrom tensorflow.keras.layers import Dropout\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.python.keras.layers import Conv2D, MaxPool2D, Flatten\nfrom tensorflow.python.keras.models import Sequential\n\nMODELS_FILE_DIR = Path(__file__).resolve().parent\nMODELS_JSON_FILE_PATH = os.path.join(MODELS_FILE_DIR, 'models.json')\n\n\ndef get_vgg_block(num_blocks=1, input_shape=(150, 150, 3),\n padding='same'):\n assert 4 > num_blocks > 0, f'Number of blocks should be in range 1 and 3'\n model = Sequential()\n dropout_list = [.2, .2, .3, .3]\n filter_list = [32, 64, 128, 128]\n\n def dropout_value(index):\n return dropout_list[index] if index <= len(dropout_list) else dropout_list[-1]\n\n def filter_value(index):\n return filter_list[index] if index <= len(filter_list) else filter_list[-1]\n\n for i in range(num_blocks):\n if i == 0:\n model.add(Conv2D(filters=filter_value(i), kernel_size=3,\n activation='relu', input_shape=input_shape,\n padding=padding)\n )\n else:\n model.add(Conv2D(filters=filter_value(i),\n kernel_size=3, activation='relu', padding=padding))\n # model.add(Conv2D(filters=filter_value(i), kernel_size=3,\n # activation='relu', padding=padding))\n model.add(MaxPool2D(pool_size=2, strides=2))\n model.add(Dropout(dropout_value(i)))\n\n model.add(Flatten())\n\n return model\n\n\nclass CNNModel:\n def __init__(self, model_name, weights='imagenet', input_shape=(224, 224, 3),\n optimizer=Adam(), loss='categorical_crossentropy', metrics=None):\n \"\"\"\n Constructor method\n :param model_name: Base model name\n :param weights: Weights of the model, initialized to imagenet\n \"\"\"\n if metrics is None:\n metrics = ['accuracy']\n self.model_name = model_name\n self.metrics = metrics\n self.weights = weights\n self.input_shape = input_shape\n self.model = None\n self.loss = loss\n self.optimizer = optimizer\n self.preprocessing_function = None\n\n def _get_base_module(self, model_name):\n \"\"\"\n Get the base model based on the base model name\n :param model_name: Base model name\n :return: Base models' library\n \"\"\"\n import json\n with open(MODELS_JSON_FILE_PATH) as model_json_file:\n models = json.load(model_json_file)\n if model_name not in models.keys():\n raise Exception(f\"Invalid model name, should have one of the value {models.keys()}\")\n self.base_model_name = models[model_name]['model_name']\n model_package = models[model_name]['model_package']\n print(f\"{model_package}.{self.base_model_name}\")\n self.base_module = importlib.import_module(model_package)\n\n def build(self):\n \"\"\"\n Build the CNN model for Neural Image Assessment\n \"\"\"\n # Load pre trained model\n base_cnn = getattr(self.base_module, self.base_model_name)\n self.preprocessing_function = getattr(self.base_module, 'preprocess_input')\n self.model_name = base_cnn(input_shape=self.input_shape, weights=self.weights,\n pooling='avg', include_top=False)\n return self.model\n\n def compile(self):\n \"\"\"\n Compile the Model\n \"\"\"\n self.model.compile(optimizer=self.optimizer, loss=self.loss, metrics=self.metrics)\n\n def summary(self):\n self.model.summary()\n\n def get_preprocess_input(self):\n return self.preprocessing_function\n\n def train_model_from_dataframe(self, df, img_directory, model, x_col, y_col, monitor='val_accuracy',\n weight_prefix=None, weights_dir=None, class_mode='category',\n batch_size=32, epochs=25, verbose=0):\n # Assign current directory if no directory passed to save weights\n if weights_dir is None:\n weights_dir = os.path.join(os.getcwd(), 'weights')\n # create directory if not exists\n if not os.path.isdir(weights_dir):\n os.mkdir(weights_dir)\n else:\n assert (os.path.isdir(weights_dir), 'Invalid directory ' + weights_dir)\n\n train_result_df = []\n target_size = (self.input_shape[0], self.input_shape[1])\n # Take a 5 fold cross validation\n cv = KFold(n_splits=5, shuffle=True, random_state=1024)\n fold = 1\n\n # Loop for each fold\n for train_index, val_index in cv.split(df[x_col]):\n train_df, val_df = df.iloc[train_index], df.iloc[val_index]\n # Define Generators\n train_datagen = ImageDataGenerator(rescale=1.0 / 255, horizontal_flip=True,\n vertical_flip=True)\n train_gen = train_datagen.flow_from_dataframe(train_df, directory=img_directory,\n x_col=x_col, y_col=y_col,\n batch_size=batch_size, class_mode=class_mode,\n target_size=target_size,\n preprocessing_function=self.preprocessing_function)\n\n valid_gen = train_datagen.flow_from_dataframe(val_df, directory=img_directory,\n x_col=x_col, y_col=y_col,\n batch_size=batch_size, class_mode=class_mode,\n target_size=target_size,\n preprocessing_function=self.preprocessing_function)\n\n # compile model\n model.compile(optimizer=self.optimizer, loss=self.loss, metrics=self.metrics)\n # Define the callbacks\n es = EarlyStopping(monitor=monitor, patience=4)\n\n weight_prefix = weight_prefix if weight_prefix is not None else self.base_model_name\n weight_filepath = os.path.join(weights_dir, f'{weight_prefix}_weight_best_fold_{fold}.hdf5')\n print(f'\\tModel Weight file : {weight_filepath}')\n mckpt = ModelCheckpoint(\n filepath=weight_filepath,\n save_weights_only=True,\n monitor=monitor,\n mode=\"max\",\n save_best_only=True,\n )\n lr = ReduceLROnPlateau(monitor='val_loss', patience=2, verbose=1)\n plot_loss = PlotLossesCallback()\n\n # start training\n history = model.fit(train_gen, validation_data=valid_gen,\n epochs=epochs, callbacks=[es, mckpt, lr, plot_loss],\n verbose=verbose)\n result_df = pd.DataFrame(history.history)\n result_df['fold'] = fold\n train_result_df.append(result_df)\n\n fold += 1\n return pd.concat(train_result_df)\n",
"import matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pandas as pd\nimport numpy as np\n\n\ndef reset_rc():\n \"\"\"\n Reset the matplotlib rc parameters\n \"\"\"\n mpl.rcParams.update(mpl.rcParamsDefault)\n\n\ndef set_rc(titlesize=10):\n \"\"\"\n Set the RC parameters for matplotlib and set Seaborn settings\n \"\"\"\n pd.set_option('display.max_colwidth', 100)\n # sns.set(style=\"ticks\", color_codes=True)\n # plt.style.use('seaborn-whitegrid')\n sns.set_theme(style='whitegrid', color_codes=True, palette='Set2')\n\n # Set Matplotlib defaults\n plt.rc('figure', autolayout=True)\n plt.rc('axes', labelweight='bold', labelsize='large',\n titleweight='bold', titlesize=titlesize, titlepad=10)\n\n plt.rcParams['text.color'] = '#191c1b'\n purple_color = \"#b753e6\"\n _ = plt.tight_layout()\n # plt.rcParams['axes.labelcolor']= '#ffaa80'\n # plt.rcParams['xtick.color'] = '#e27caa'\n # plt.rcParams['ytick.color'] = '#799fec'\n # plt.rcParams['font.size']=12\n\n\ndef donut(df_col, title=None, figsize=None, titlesize=17):\n \"\"\"\n Create a Donut chart for the given Pandas Series\n :param figsize: Figure size\n :param title: Title of the figure\n :param df_col: Pandas Series\n \"\"\"\n df_count = df_col.apply(str).str.lower().value_counts()\n\n fig, ax = plt.subplots(figsize=figsize)\n colors = ['#66b3ff', '#99ff99', '#ffcc99']\n patches, text, autotext = ax.pie(df_count, labels=df_count.index, autopct='%1.1f%%',\n shadow=True, startangle=90)\n # draw circle\n centre_circle = plt.Circle((0, 0), 0.70, fc='white')\n fig = plt.gcf()\n fig.gca().add_artist(centre_circle)\n\n # Equal aspect ratio ensures that pie is drawn as a circle\n ax.axis('equal')\n if title is None:\n title = f'Distribution of {df_col.name.capitalize()}'\n plt.title(title, fontsize=titlesize)\n plt.tight_layout()\n plt.show()\n\n\ndef bi_donut(df_cols, sup_title):\n \"\"\"\n Creates two donut charts side by side for the passed DataFrame of two columns\n :param df_cols: DataFrame\n :param sup_title: Sup Title of the cart\n \"\"\"\n fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(15, 6), constrained_layout=False)\n columns = df_cols.columns\n if len(columns) < 2:\n raise Exception(\"Number of columns less than 2.\")\n\n for i in range(2):\n ax = ax1 if i == 0 else ax2\n col = columns[i]\n df_col = df_cols[col]\n df_count = df_col.str.upper().value_counts()\n title = f'{df_col.name.upper()}'\n ax.set_title(label=title, ha='center', va='bottom',\n fontdict={'fontsize': 15}, color='r')\n patches, text, autotext = ax.pie(df_count, labels=df_count.index, autopct='%1.1f%%',\n startangle=90)\n # draw circle\n centre_circle = plt.Circle((0, 0), 0.70, fc='white')\n # Equal aspect ratio ensures that pie is drawn as a circle\n ax.axis('equal')\n ax.add_artist(centre_circle)\n ax.legend(loc='upper left')\n\n plt.tight_layout()\n fig.suptitle(sup_title, ha='center', va='bottom', size=18,\n fontweight='bold', fontdict={'color': 'g'})\n _ = plt.show()\n\n\ndef stacked_donut(grp_data):\n \"\"\"\n Creates a stacked Donut chart by grouping second column inside the first column\n :param grp_data: Pandas Dataframe of two columns\n \"\"\"\n columns = grp_data.columns\n if len(columns) < 2:\n raise Exception(\"Number of columns less than 2.\")\n\n # Create groups and subgroups\n from matplotlib import cm\n group_names = grp_data.index.levels[0].to_list()\n group_size = [grp_data[name].sum() for name in group_names]\n subgroup_names = [f\"{x[1]}\" for x in grp_data.index.values]\n subgroup_size = grp_data.values\n\n # Create colors\n color_pallete = [cm.Blues, cm.Reds, cm.Greens,\n cm.Oranges, cm.Purples, cm.Greys]\n\n grp_cmap = {group_names[i]: color_pallete[i]\n for i in range(len(group_names))}\n\n # Create color palette for group and subgroups\n grp_colors = [grp_cmap[color](0.8) for color in grp_cmap.keys()]\n subgroup_colors = []\n for grp in grp_cmap.keys():\n variance = np.linspace(.1, .9, grp_data[grp].count()) # Divide based on Sub-group count for a group\n for i in variance:\n subgroup_colors.append(grp_cmap[grp](i))\n\n # First Ring (outside)\n fig, ax = plt.subplots(figsize=(15, 8))\n ax.axis('equal')\n mypie, _ = ax.pie(group_size, radius=1.3, labels=group_names, colors=grp_colors)\n plt.setp(mypie, width=0.3, edgecolor='white')\n # Second Ring (Inside)\n mypie2, _ = ax.pie(subgroup_size, radius=1.3 - 0.3, labels=subgroup_names, labeldistance=0.7,\n colors=subgroup_colors)\n plt.setp(mypie2, width=.9, edgecolor='white')\n plt.margins(0, 0)\n\n # show it\n plt.show()\n\n\ndef strip_violinplot(data, x, y=None, hue=None, title=\"Data Distribution\", xlabel=None, ylabel=None, figsize=(12, 8)):\n \"\"\"\n Creates a Violinplot with stripplot showing the distribution density of data as well\n :param data: Dataframe\n :param x: X data\n :param y: y data\n :param hue: hue column to be used\n :param title: Title of the plot\n :param xlabel: X label name\n :param ylabel: Y label name\n :param figsize: figsize\n \"\"\"\n fig, ax = plt.subplots(figsize=figsize)\n ax = sns.stripplot(x=x, y=y, data=data\n , hue=hue, jitter=0.2, size=2.5)\n ax = sns.violinplot(x=x, y=y, data=data)\n if xlabel is None:\n xlabel = data[x].name\n if y and ylabel is None:\n ylabel = data[y].name\n ax.set(title=title, xlabel=xlabel, ylabel=ylabel)\n _ = plt.show()\n\n\ndef plot_stripviolens(df, cols, figsize=(20, 20)):\n \"\"\"Plot the subplots of violinplot and stripplot in 3 columns\"\"\"\n import math\n ncols = 3\n nrows = math.ceil(len(cols) / ncols)\n\n fig, axs = plt.subplots(nrows, ncols, figsize=figsize)\n fig.tight_layout()\n\n purple_color = \"#b753e6\"\n for i, colname in enumerate(cols):\n row = int(i / ncols)\n col = i % ncols\n sns.stripplot(data=df, x=colname, ax=axs[row][col], color=purple_color, alpha=0.5)\n sns.violinplot(data=df, x=colname, ax=axs[row][col])\n axs[row][col].set(xlabel=colname)\n\n plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.2, hspace=0.5)\n\n\ndef plot_boxes(df, cols, figsize=(20, 16)):\n \"\"\"Plot the subplots of boxplots in 3 columns\"\"\"\n import math\n ncols = 3\n nrows = math.ceil(len(cols) / ncols)\n\n fig, axs = plt.subplots(nrows, ncols, figsize=figsize)\n fig.tight_layout()\n\n for i, colname in enumerate(cols):\n row = int(i / ncols)\n col = i % ncols\n sns.boxplot(x=df[colname], ax=axs[row][col]).set(xlabel=colname)\n\n plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.2, hspace=0.5)\n\n\ndef qq_plot(df, features):\n \"\"\"\n Plot a Quantile to Quantile plot for the features\n :param df:\n :param features:\n \"\"\"\n import scipy.stats as stats\n import pylab\n plt.figure(figsize=(10, 6))\n plt.subplot(1, 2, 1)\n df[features].hist()\n plt.subplot(1, 2, 2)\n stats.probplot(df[features], dist='norm', plot=pylab)\n plt.show()\n\n\ndef grouped_bar(df, title, figsize=(20, 8)):\n \"\"\"\n Creates a beautiful Bar chart with groups\n :param df: Dataframe\n :param title: Title of the chart\n :param figsize: Figure size\n \"\"\"\n colors_list = ['#5cb85c', '#5bc0de', '#d9534f']\n # Create graph with figure size 20,8 | width 0.8\n ax = df.plot.bar(figsize=figsize, width=0.8, color=colors_list)\n print('')\n # Set Title with font size 16\n plt.title(title, size=16)\n # Shift Legend to upper right with font 14\n plt.legend(loc='upper right', fontsize=14)\n plt.xticks(fontsize=14, rotation=0)\n # Hide Y axis values\n ax.yaxis.set_major_locator(plt.NullLocator())\n\n # Remove the borders\n ax.spines['left'].set_color(None)\n ax.spines['right'].set_color(None)\n ax.spines['top'].set_color(None)\n\n # Show Percentage on Yticks\n value_format = \"{:.1%}\"\n # Loop through the Rectangle objects of the plot to get the bars\n for bar in ax.patches:\n ax.text(bar.get_x() + bar.get_width() / 2 + .05, # X position of text, start position + half width + margin\n bar.get_height() + 1, # Y position of text\n f\"{bar.get_height()}%\", # Text Value\n fontsize=14, # Fontsize of text\n ha=\"center\" # Alignment of text\n )\n\n # Show plot\n plt.show()\n\n\ndef value_count_bar(data, title=None, normalize=True, figsize=(12, 7),\n titlesize=17, labelsize=14, ticksize=12):\n \"\"\"\n Plots a beautiful bar plot for the count of columns\n :param data: Pandas Series\n :param title: Title of the plot\n :param normalize: If true distribution will be shown in normalized fashion\n :param labelsize: Label size\n :param figsize: Figure size\n :param titlesize: Title size\n \"\"\"\n grp_data = data.value_counts(normalize=normalize).reset_index()\n column_name = grp_data.columns[1]\n grp_data.columns = [column_name, 'count']\n _, ax = plt.subplots(figsize=figsize)\n ax = sns.barplot(x=column_name, y=\"count\", data=grp_data, palette=sns.color_palette(\"tab10\"))\n if title is None:\n title = f'Distribution of {column_name.capitalize()}'\n ax.set_title(title, fontsize=titlesize)\n ax.set_xlabel(column_name.capitalize(), fontsize=labelsize)\n ax.set_ylabel(f'{column_name.capitalize()} count', fontsize=labelsize)\n # ax.set(title=title, xlabel=column_name.capitalize(), ylabel=f'{column_name.capitalize()} count')\n\n plt.xticks(fontsize=ticksize, rotation=10)\n ax.yaxis.set_major_locator(plt.NullLocator())\n ax.spines['left'].set_color(None)\n ax.spines['right'].set_color(None)\n ax.spines['top'].set_color(None)\n\n # Print values above the bar\n for bar in ax.patches:\n x_pos = bar.get_x() + bar.get_width() / 2\n y_pos = bar.get_height() + bar.get_height() * .01\n value = f\"{bar.get_height() * 100:.2f} %\" if normalize else round(bar.get_height())\n ax.text(x_pos, # X Position of the text\n y_pos, # Y Position of the text\n value, # Value to print above bar\n fontsize=labelsize, # Fontsize\n ha=\"center\" # Alignment of text\n )\n plt.show()\n\n\ndef plot_images(images_path_list, row=1, col=1, figsize=(8, 8)):\n num_plots = row * col\n assert images_path_list is not None and len(images_path_list) >= num_plots, 'Number of items in the list are lesser than subplots count.'\n fig, axs = plt.subplots(row, col, figsize=figsize)\n for i in range(row):\n for j in range(col):\n image_path = images_path_list[i+j]\n axs[i, j].imshow(plt.imread(image_path))\n plt.show()\n\n\ndef plot_confusion_matrix(cnf_matrix_data, target_names,\n title='Confusion matrix'):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n # from sklearn.metrics import confusion_matrix\n df_cm = pd.DataFrame(cnf_matrix_data, columns=target_names, index=target_names).astype('float')\n\n df_cm.index.name = 'Actual'\n df_cm.columns.name = 'Predicted'\n plt.figure(figsize=(10, 5))\n plt.title(title, color='green', fontsize=25)\n tick_marks = np.arange(len(target_names))\n plt.xticks(tick_marks, target_names, rotation=45, color='indigo')\n plt.yticks(tick_marks, target_names, color='indigo')\n sns.set(font_scale=1.4) # for label size\n sns.heatmap(df_cm, cmap=\"YlGnBu\", annot=True, annot_kws={\"size\": 16}, fmt=\".0f\")\n # plt.imshow(df_cm, interpolation='nearest', cmap=\"YlGnBu\")\n plt.tight_layout()\n plt.ylabel('Actual', color='crimson', fontsize=20)\n plt.xlabel('Predicted', color='crimson', fontsize=20)\n\n\ndef plot_loss_acc(history, title_remarks=None, figsize=(8,6)):\n \"\"\"\n Method to plot the Loss and Accuracy with the given History dataframe as input\n \"\"\"\n history_df = pd.DataFrame(history.history)\n # plot loss during training\n fig, ax = plt.subplots(ncols=2, figsize=figsize)\n title = f'Loss and Accurracy Graph\\n{\"\" if title_remarks is None else title_remarks}'\n fig.suptitle(title, size=15)\n\n ax[0].set_title('Loss')\n ax[0].plot(history_df['loss'], label='train')\n if 'val_loss' in history_df.columns:\n ax[0].plot(history_df['val_loss'], label='test')\n ax[0].legend()\n # plot mse during training\n ax[1].set_title('Accurracy')\n ax[1].plot(history_df['accuracy'], label='train')\n if 'val_accuracy' in history_df.columns:\n ax[1].plot(history_df['val_accuracy'], label='test')\n ax[1].legend()\n plt.show()\n"
] |
[
[
"pandas.concat",
"tensorflow.python.keras.layers.Flatten",
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"tensorflow.python.keras.models.Sequential",
"sklearn.model_selection.KFold",
"pandas.DataFrame",
"tensorflow.keras.optimizers.Adam",
"tensorflow.python.keras.layers.MaxPool2D"
],
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.imread",
"matplotlib.pyplot.rc",
"pandas.DataFrame",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.Circle",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.subplots_adjust",
"pandas.set_option",
"matplotlib.pyplot.NullLocator",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"matplotlib.pyplot.margins",
"scipy.stats.probplot",
"matplotlib.rcParams.update",
"matplotlib.pyplot.show",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.setp",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.yticks"
]
] |
iamh2o/oxfs
|
[
"991e2f9dd2fc5e0e693b57298027a0edffc87fbb"
] |
[
"files/iozone_xls_to_graph.py"
] |
[
"#!/usr/bin/env python\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib\n\nclass XlxToGraph(object):\n def __init__(self):\n self.oxfs_iozone_xls = './oxfs_iozone.xls'\n self.sshfs_iozone_xls = './sshfs_iozone.xls'\n self.oxfs_xls = pd.read_excel(self.oxfs_iozone_xls)\n self.sshfs_xls = pd.read_excel(self.sshfs_iozone_xls)\n matplotlib.rcParams['font.sans-serif'] = ['monofur']\n\n def float2(self, x):\n return int(x * 100) / 100\n\n def draw_one(self, xls, title, color):\n title = '{} ({})'.format(title, xls.columns[0])\n column_a = xls[xls.columns[0]]\n column_c = xls[xls.columns[2]]\n\n ticks = [column_a[x] for x in range(3, 16)]\n kbps = [self.float2(column_c[x]) for x in range(3, 16)]\n plt.barh(range(16 - 3), kbps, height=0.2, color=color, alpha=0.8)\n plt.yticks(range(16 - 3), ticks)\n plt.xlim(0, max(kbps) * 1.2)\n plt.xlabel(\"Speed\")\n plt.title(title)\n for x, y in enumerate(kbps):\n plt.text(y + 1000, x - 0.1, '%s KB/s' % y)\n\n plt.show()\n\n def draw_compare(self):\n xls = self.oxfs_xls\n column_a = xls[xls.columns[0]]\n column_c = xls[xls.columns[2]]\n\n oxfs_ticks = [column_a[x] + '- oxfs' for x in range(3, 16)]\n oxfs_kbps = [self.float2(column_c[x]) for x in range(3, 16)]\n\n xls = self.sshfs_xls\n column_a = xls[xls.columns[0]]\n column_c = xls[xls.columns[2]]\n\n sshfs_ticks = [column_a[x] + '- sshfs' for x in range(3, 16)]\n sshfs_kbps = [self.float2(column_c[x]) for x in range(3, 16)]\n\n ticks = []\n kbps = []\n for i in range(0, len(oxfs_kbps)):\n ticks.append(oxfs_ticks[i])\n ticks.append(sshfs_ticks[i])\n kbps.append(oxfs_kbps[i])\n kbps.append(sshfs_kbps[i])\n\n barlist = plt.barh(range(len(kbps)), kbps, height=0.3, color='coral', alpha=0.8)\n for bar in barlist[1::2]:\n bar.set_color('slateblue')\n plt.yticks(range(len(ticks)), ticks)\n plt.xlim(0, max(kbps) * 1.2)\n for x, y in enumerate(kbps):\n plt.text(y + 1000, x - 0.1, '%s KB/s' % y)\n\n title = 'Oxfs Vs Sshfs ({})'.format(xls.columns[0])\n plt.title(title)\n plt.xlabel(\"Speed\")\n\n plt.show()\n\nxls2graph = XlxToGraph()\nxls2graph.draw_one(xls2graph.oxfs_xls, 'Oxfs', 'coral')\n\nxls2graph = XlxToGraph()\nxls2graph.draw_one(xls2graph.sshfs_xls, 'Sshfs', 'slateblue')\n\nxls2graph = XlxToGraph()\nxls2graph.draw_compare()\n"
] |
[
[
"pandas.read_excel",
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.text",
"matplotlib.pyplot.show"
]
] |
alexchungio/Slimming-Pytorch
|
[
"98ee08fffa7642d578cc4994b0eb8f534ebc5fb1"
] |
[
"models/channel_selection.py"
] |
[
"import numpy as np\nimport torch\nimport torch.nn as nn\n\n\nclass channel_selection(nn.Module):\n \"\"\"\n Select channels from the output of BatchNorm2d layer. It should be put directly after BatchNorm2d layer.\n The output shape of this layer is determined by the number of 1 in `self.indexes`.\n \"\"\"\n def __init__(self, num_channels):\n \"\"\"\n Initialize the `indexes` with all one vector with the length same as the number of channels.\n During pruning, the places in `indexes` which correspond to the channels to be pruned will be set to 0.\n \"\"\"\n super(channel_selection, self).__init__()\n self.indexes = nn.Parameter(torch.ones(num_channels))\n\n def forward(self, input_tensor):\n \"\"\"\n Parameter\n ---------\n input_tensor: (N,C,H,W). It should be the output of BatchNorm2d layer.\n \"\"\"\n selected_index = np.squeeze(np.argwhere(self.indexes.data.cpu().numpy()))\n if selected_index.size == 1:\n selected_index = np.resize(selected_index, (1,))\n output = input_tensor[:, selected_index, :, :]\n return output"
] |
[
[
"numpy.resize",
"torch.ones"
]
] |
eudoxos/imfractal
|
[
"a33c5ae5771ba9d4437cb665c0c48dc492e74025"
] |
[
"tests/test_bonesShow.py"
] |
[
"\"\"\"\nCopyright (c) 2013 Rodrigo Baravalle\nAll rights reserved.\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions\nare met:\n1. Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n2. Redistributions in binary form must reproduce the above copyright\nnotice, this list of conditions and the following disclaimer in the\ndocumentation and/or other materials provided with the distribution.\n3. The name of the author may not be used to endorse or promote products\nderived from this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR\nIMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\nOF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\nIN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,\nINCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT\nNOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF\nTHIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\nfrom imfractal import *\n\nfrom PIL import Image\nimport time\nimport matplotlib.pyplot as plt\nfrom pylab import *\n\nimport os\nimport sys\nsys.path.append('/home/rodrigo/imfractal/imfractal/Algorithm/')\n\nimport qs3D\n\ndef createFolders(dirs):\n\n for f in dirs:\n if not os.path.isdir(f): \n os.mkdir (f) \n\n\ndef do_test():\n\n\n\n arr = [ \"imfractal/Algorithm/qs3D\", \"imfractal/Algorithm/qs\"]\n\n for i in range(len(arr)):\n\n command1 = \"cython \"+arr[i]+\".pyx \"\n command2 = \"gcc -c -fPIC -I/usr/include/python2.7/ \"+arr[i]+\".c\"+\" -o \"+arr[i]+\".o\"\n command3 = \"gcc -shared \"+arr[i]+\".o -o \"+arr[i]+\".so\"\n\n print(command1)\n os.system(command1)\n print(command2)\n os.system(command2)\n print(command3)\n os.system(command3)\n\n\n print(\"Creating folder..\")\n createFolders(['exps/figs'])\n\n # load array object file\n res = np.load(\"mfss.npy\")\n\n patients = [\"5c\", \"6b\", \"8b\", \"8c\", \"V12\"]\n\n # scans except xct\n scans = [\"M1\", \"M2\", \"01\", \"02\", \"03\"]\n \n\n pp = 0\n for p in patients:\n ss = 0\n for s in scans:\n for i in range(1,26,2):\n clf()\n plt.figure(pp*len(patients)+ss*len(scans)+i+1)\n plt.subplot(221)\n plt.ylim(ymax = 3.6, ymin = 2.0)\n #plt.title(\"XCT 5c_XtremeCTSlices\")\n plt.title(p + \"-\" + s + \" - HRCT - VOI \"+str(i))\n plt.plot(res[pp][ss][i])\n\n plt.subplot(222)\n plt.ylim(ymax = 3.6, ymin = 2.0)\n plt.title(p + \" - XCT - VOI \"+str(i))\n plt.plot(res[pp][5][i])\n\n plt.subplot(223)\n plt.ylim(ymax = 3.6, ymin = 2.0)\n \n plt.title(p + \"-\" + s + \" - HRCT - VOI \"+ str(i+1))\n plt.plot(res[pp][ss][i+1])\n\n plt.subplot(224)\n plt.ylim(ymax = 3.6, ymin = 2.0)\n plt.title(p + \" - XCT - VOI \"+ str(i+1))\n plt.plot(res[pp][5][i+1])\n\n \n savefig(\"exps/figs/\"+p+s+\"VOI\"+str(i)+\"-\"+str(i+1)+\".png\")\n\n ss = ss+1\n pp = pp+1\n\n \n"
] |
[
[
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.subplot"
]
] |
pranjukn/AI-Feynman
|
[
"92e67b01fc2b00ed6ebcacc67edf6122b4219ac7"
] |
[
"aifeynman/S_gen_sym.py"
] |
[
"import numpy as np\nfrom .RPN_to_eq import RPN_to_eq\nfrom scipy.optimize import fsolve\nfrom sympy import lambdify, N\nimport torch\nimport copy\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom .get_pareto import Point, ParetoSet\nfrom .S_get_expr_complexity import get_expr_complexity\nfrom . import test_points\nimport os\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nis_cuda = torch.cuda.is_available()\n\n\n# fix this to work with the other variables constant\ndef check_gen_sym(pathdir,filename,model,gen_sym_idx,express,mu,sigma,nu=10):\n gen_sym_idx = np.append(gen_sym_idx,-1)\n data_all = np.loadtxt(pathdir+filename)\n # Choose only the data to be separated\n data = np.loadtxt(pathdir+filename)[:,gen_sym_idx]\n # Turn the equation from RPN to normal mathematical expression\n eq = RPN_to_eq(express)\n \n # Get the variables appearing in the equation\n possible_vars = [\"x%s\" %i for i in np.arange(0,30,1)]\n variables = []\n N_vars = len(data[0])-1\n for i in range(N_vars):\n variables = variables + [possible_vars[i]]\n symbols = variables\n f = lambdify(symbols, N(eq))\n\n fixed = data[:,0:-1]\n length_fixed = len(fixed)\n\n bm = np.ones(len(data[0])-1,dtype=bool)\n obj = test_points.init_general_test_point(eq, data[:,:-1], data[:,-1], bm)\n\n list_z = np.array([])\n z = 0\n i = 0\n while z<nu and i<len(data[0:1000]):\n # Generate functions based on the discovered possible equation and check if they are right\n dt = test_points.get_test_point(obj,data[i][:-1])\n diff = abs(f(*fixed[i])-f(*dt))\n with torch.no_grad():\n if diff<1e-4:\n if is_cuda:\n dt_ = data_all[i]\n ii = 0\n for k in gen_sym_idx[:-1]:\n dt_[k]=dt[ii]\n ii = ii + 1\n dt = torch.tensor(dt_).float().cuda().view(1,len(dt_))\n dt = torch.cat((torch.tensor([np.zeros(len(dt[0]))]).float().cuda(),dt), 0)\n error = torch.tensor(data[:,-1][i]).cuda()-model(dt[:,:-1])[1:]\n error = error.cpu().detach().numpy()\n list_z = np.append(list_z,np.log2(1+abs(error)*2**30))\n z = np.sqrt(len(list_z))*(np.mean(list_z)-mu)/sigma\n else:\n dt_ = data_all[i]\n ii = 0\n for k in gen_sym_idx[:-1]:\n dt_[k]=dt[ii]\n ii = ii + 1\n dt = torch.tensor(dt_).float().view(1,len(dt_))\n dt = torch.cat((torch.tensor([np.zeros(len(dt[0]))]).float(),dt), 0)\n error =torch.tensor(data[:,-1][i])-model(dt[:,:-1])[1:]\n error = error.detach().numpy()\n list_z = np.append(list_z,np.log2(1+abs(error)*2**30))\n z = np.sqrt(len(list_z))*(np.mean(list_z)-mu)/sigma\n \n i = i + 1\n else:\n i = i + 1\n\n \n if i==len(data[0:1000]) and np.mean(list_z)<mu:\n return (1,express,np.mean(list_z),np.std(list_z))\n else:\n return (0,express,100,100)\n\n\ndef do_gen_sym(pathdir, filename, gen_sym_idx,express):\n gen_sym_idx = np.append(gen_sym_idx,-1)\n data_all = np.loadtxt(pathdir+filename)\n\n # Choose only the data to be separated\n data = np.loadtxt(pathdir+filename)[:,gen_sym_idx]\n # Turn the equation from RPN to normal mathematical expression\n eq = RPN_to_eq(express)\n # Get the variables appearing in the equation\n possible_vars = [\"x%s\" %i for i in np.arange(0,30,1)]\n variables = []\n\n N_vars = len(data[0])-1\n for i in range(N_vars):\n variables = variables + [possible_vars[i]]\n\n symbols = variables\n f = lambdify(symbols, N(eq))\n\n ii = 0\n for k in gen_sym_idx[1:-1]:\n data_all = np.delete(data_all,k-ii,1)\n ii = ii + 1\n\n new_data = f(*np.transpose(data[:,0:-1]))\n data_all[:,gen_sym_idx[0]]=new_data\n #save_data = np.column_stack((new_data,data_all))\n save_data = data_all\n\n try:\n os.mkdir(\"results/gen_sym\")\n except:\n pass\n\n file_name = filename + \"-gen_sym\"\n np.savetxt(\"results/gen_sym/\"+file_name,save_data)\n\n return (\"results/gen_sym/\", file_name)\n\ndef add_gen_sym_on_pareto(PA1,PA, gen_sym_idx, express):\n # Turn the equation from RPN to normal mathematical expression\n possible_vars = [\"x%s\" %i for i in np.arange(0,100,1)]\n gen_sym_idx = np.array(gen_sym_idx)\n math_eq = RPN_to_eq(express)\n\n PA1 = np.array(PA1.get_pareto_points()).astype('str')\n for i in range(len(PA1)):\n exp1 = PA1[i][2]\n temp_list = copy.deepcopy(gen_sym_idx)\n bf_eq = math_eq\n \n while(len(temp_list)>1):\n for j in range(len(possible_vars)-len(temp_list),temp_list[-1]-len(temp_list)+1,-1):\n exp1 = exp1.replace(possible_vars[j],possible_vars[j+1])\n temp_list = np.delete(temp_list,-1)\n \n # replace variables in bf_eq\n arr_idx = np.flip(np.arange(0,len(gen_sym_idx),1), axis=0)\n actual_idx = np.flip(gen_sym_idx, axis=0)\n for k in range(len(gen_sym_idx)):\n bf_eq = bf_eq.replace(possible_vars[arr_idx[k]],possible_vars[actual_idx[k]])\n\n exp1 = exp1.replace(possible_vars[temp_list[0]],\"(\" + bf_eq + \")\")\n compl = get_expr_complexity(exp1)\n PA.add(Point(x=compl,y=float(PA1[i][1]),data=str(exp1)))\n\n return PA\n"
] |
[
[
"numpy.arange",
"numpy.transpose",
"torch.tensor",
"numpy.append",
"numpy.delete",
"torch.no_grad",
"numpy.mean",
"torch.cuda.is_available",
"numpy.std",
"numpy.savetxt",
"numpy.array",
"numpy.flip",
"numpy.loadtxt"
]
] |
HuJiayin/spark
|
[
"f6fba2b196346103256fa32a797438fbd5cc001c"
] |
[
"python/pyspark/ml/tuning.py"
] |
[
"#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport itertools\nimport numpy as np\n\nfrom pyspark import since\nfrom pyspark.ml import Estimator, Model\nfrom pyspark.ml.param import Params, Param\nfrom pyspark.ml.param.shared import HasSeed\nfrom pyspark.ml.util import keyword_only\nfrom pyspark.sql.functions import rand\n\n__all__ = ['ParamGridBuilder', 'CrossValidator', 'CrossValidatorModel']\n\n\nclass ParamGridBuilder(object):\n r\"\"\"\n Builder for a param grid used in grid search-based model selection.\n\n >>> from pyspark.ml.classification import LogisticRegression\n >>> lr = LogisticRegression()\n >>> output = ParamGridBuilder() \\\n ... .baseOn({lr.labelCol: 'l'}) \\\n ... .baseOn([lr.predictionCol, 'p']) \\\n ... .addGrid(lr.regParam, [1.0, 2.0]) \\\n ... .addGrid(lr.maxIter, [1, 5]) \\\n ... .build()\n >>> expected = [\n ... {lr.regParam: 1.0, lr.maxIter: 1, lr.labelCol: 'l', lr.predictionCol: 'p'},\n ... {lr.regParam: 2.0, lr.maxIter: 1, lr.labelCol: 'l', lr.predictionCol: 'p'},\n ... {lr.regParam: 1.0, lr.maxIter: 5, lr.labelCol: 'l', lr.predictionCol: 'p'},\n ... {lr.regParam: 2.0, lr.maxIter: 5, lr.labelCol: 'l', lr.predictionCol: 'p'}]\n >>> len(output) == len(expected)\n True\n >>> all([m in expected for m in output])\n True\n\n .. versionadded:: 1.4.0\n \"\"\"\n\n def __init__(self):\n self._param_grid = {}\n\n @since(\"1.4.0\")\n def addGrid(self, param, values):\n \"\"\"\n Sets the given parameters in this grid to fixed values.\n \"\"\"\n self._param_grid[param] = values\n\n return self\n\n @since(\"1.4.0\")\n def baseOn(self, *args):\n \"\"\"\n Sets the given parameters in this grid to fixed values.\n Accepts either a parameter dictionary or a list of (parameter, value) pairs.\n \"\"\"\n if isinstance(args[0], dict):\n self.baseOn(*args[0].items())\n else:\n for (param, value) in args:\n self.addGrid(param, [value])\n\n return self\n\n @since(\"1.4.0\")\n def build(self):\n \"\"\"\n Builds and returns all combinations of parameters specified\n by the param grid.\n \"\"\"\n keys = self._param_grid.keys()\n grid_values = self._param_grid.values()\n return [dict(zip(keys, prod)) for prod in itertools.product(*grid_values)]\n\n\nclass CrossValidator(Estimator, HasSeed):\n \"\"\"\n K-fold cross validation.\n\n >>> from pyspark.ml.classification import LogisticRegression\n >>> from pyspark.ml.evaluation import BinaryClassificationEvaluator\n >>> from pyspark.mllib.linalg import Vectors\n >>> dataset = sqlContext.createDataFrame(\n ... [(Vectors.dense([0.0]), 0.0),\n ... (Vectors.dense([0.4]), 1.0),\n ... (Vectors.dense([0.5]), 0.0),\n ... (Vectors.dense([0.6]), 1.0),\n ... (Vectors.dense([1.0]), 1.0)] * 10,\n ... [\"features\", \"label\"])\n >>> lr = LogisticRegression()\n >>> grid = ParamGridBuilder().addGrid(lr.maxIter, [0, 1]).build()\n >>> evaluator = BinaryClassificationEvaluator()\n >>> cv = CrossValidator(estimator=lr, estimatorParamMaps=grid, evaluator=evaluator)\n >>> cvModel = cv.fit(dataset)\n >>> evaluator.evaluate(cvModel.transform(dataset))\n 0.8333...\n\n .. versionadded:: 1.4.0\n \"\"\"\n\n # a placeholder to make it appear in the generated doc\n estimator = Param(Params._dummy(), \"estimator\", \"estimator to be cross-validated\")\n\n # a placeholder to make it appear in the generated doc\n estimatorParamMaps = Param(Params._dummy(), \"estimatorParamMaps\", \"estimator param maps\")\n\n # a placeholder to make it appear in the generated doc\n evaluator = Param(\n Params._dummy(), \"evaluator\",\n \"evaluator used to select hyper-parameters that maximize the cross-validated metric\")\n\n # a placeholder to make it appear in the generated doc\n numFolds = Param(Params._dummy(), \"numFolds\", \"number of folds for cross validation\")\n\n @keyword_only\n def __init__(self, estimator=None, estimatorParamMaps=None, evaluator=None, numFolds=3,\n seed=None):\n \"\"\"\n __init__(self, estimator=None, estimatorParamMaps=None, evaluator=None, numFolds=3,\\\n seed=None)\n \"\"\"\n super(CrossValidator, self).__init__()\n #: param for estimator to be cross-validated\n self.estimator = Param(self, \"estimator\", \"estimator to be cross-validated\")\n #: param for estimator param maps\n self.estimatorParamMaps = Param(self, \"estimatorParamMaps\", \"estimator param maps\")\n #: param for the evaluator used to select hyper-parameters that\n #: maximize the cross-validated metric\n self.evaluator = Param(\n self, \"evaluator\",\n \"evaluator used to select hyper-parameters that maximize the cross-validated metric\")\n #: param for number of folds for cross validation\n self.numFolds = Param(self, \"numFolds\", \"number of folds for cross validation\")\n self._setDefault(numFolds=3)\n kwargs = self.__init__._input_kwargs\n self._set(**kwargs)\n\n @keyword_only\n @since(\"1.4.0\")\n def setParams(self, estimator=None, estimatorParamMaps=None, evaluator=None, numFolds=3,\n seed=None):\n \"\"\"\n setParams(self, estimator=None, estimatorParamMaps=None, evaluator=None, numFolds=3,\\\n seed=None):\n Sets params for cross validator.\n \"\"\"\n kwargs = self.setParams._input_kwargs\n return self._set(**kwargs)\n\n @since(\"1.4.0\")\n def setEstimator(self, value):\n \"\"\"\n Sets the value of :py:attr:`estimator`.\n \"\"\"\n self._paramMap[self.estimator] = value\n return self\n\n @since(\"1.4.0\")\n def getEstimator(self):\n \"\"\"\n Gets the value of estimator or its default value.\n \"\"\"\n return self.getOrDefault(self.estimator)\n\n @since(\"1.4.0\")\n def setEstimatorParamMaps(self, value):\n \"\"\"\n Sets the value of :py:attr:`estimatorParamMaps`.\n \"\"\"\n self._paramMap[self.estimatorParamMaps] = value\n return self\n\n @since(\"1.4.0\")\n def getEstimatorParamMaps(self):\n \"\"\"\n Gets the value of estimatorParamMaps or its default value.\n \"\"\"\n return self.getOrDefault(self.estimatorParamMaps)\n\n @since(\"1.4.0\")\n def setEvaluator(self, value):\n \"\"\"\n Sets the value of :py:attr:`evaluator`.\n \"\"\"\n self._paramMap[self.evaluator] = value\n return self\n\n @since(\"1.4.0\")\n def getEvaluator(self):\n \"\"\"\n Gets the value of evaluator or its default value.\n \"\"\"\n return self.getOrDefault(self.evaluator)\n\n @since(\"1.4.0\")\n def setNumFolds(self, value):\n \"\"\"\n Sets the value of :py:attr:`numFolds`.\n \"\"\"\n self._paramMap[self.numFolds] = value\n return self\n\n @since(\"1.4.0\")\n def getNumFolds(self):\n \"\"\"\n Gets the value of numFolds or its default value.\n \"\"\"\n return self.getOrDefault(self.numFolds)\n\n def _fit(self, dataset):\n est = self.getOrDefault(self.estimator)\n epm = self.getOrDefault(self.estimatorParamMaps)\n numModels = len(epm)\n eva = self.getOrDefault(self.evaluator)\n nFolds = self.getOrDefault(self.numFolds)\n seed = self.getOrDefault(self.seed)\n h = 1.0 / nFolds\n randCol = self.uid + \"_rand\"\n df = dataset.select(\"*\", rand(seed).alias(randCol))\n metrics = np.zeros(numModels)\n for i in range(nFolds):\n validateLB = i * h\n validateUB = (i + 1) * h\n condition = (df[randCol] >= validateLB) & (df[randCol] < validateUB)\n validation = df.filter(condition)\n train = df.filter(~condition)\n for j in range(numModels):\n model = est.fit(train, epm[j])\n # TODO: duplicate evaluator to take extra params from input\n metric = eva.evaluate(model.transform(validation, epm[j]))\n metrics[j] += metric\n\n if eva.isLargerBetter():\n bestIndex = np.argmax(metrics)\n else:\n bestIndex = np.argmin(metrics)\n bestModel = est.fit(dataset, epm[bestIndex])\n return CrossValidatorModel(bestModel)\n\n @since(\"1.4.0\")\n def copy(self, extra=None):\n \"\"\"\n Creates a copy of this instance with a randomly generated uid\n and some extra params. This copies creates a deep copy of\n the embedded paramMap, and copies the embedded and extra parameters over.\n\n :param extra: Extra parameters to copy to the new instance\n :return: Copy of this instance\n \"\"\"\n if extra is None:\n extra = dict()\n newCV = Params.copy(self, extra)\n if self.isSet(self.estimator):\n newCV.setEstimator(self.getEstimator().copy(extra))\n # estimatorParamMaps remain the same\n if self.isSet(self.evaluator):\n newCV.setEvaluator(self.getEvaluator().copy(extra))\n return newCV\n\n\nclass CrossValidatorModel(Model):\n \"\"\"\n Model from k-fold cross validation.\n\n .. versionadded:: 1.4.0\n \"\"\"\n\n def __init__(self, bestModel):\n super(CrossValidatorModel, self).__init__()\n #: best model from cross validation\n self.bestModel = bestModel\n\n def _transform(self, dataset):\n return self.bestModel.transform(dataset)\n\n @since(\"1.4.0\")\n def copy(self, extra=None):\n \"\"\"\n Creates a copy of this instance with a randomly generated uid\n and some extra params. This copies the underlying bestModel,\n creates a deep copy of the embedded paramMap, and\n copies the embedded and extra parameters over.\n\n :param extra: Extra parameters to copy to the new instance\n :return: Copy of this instance\n \"\"\"\n if extra is None:\n extra = dict()\n return CrossValidatorModel(self.bestModel.copy(extra))\n\n\nif __name__ == \"__main__\":\n import doctest\n from pyspark.context import SparkContext\n from pyspark.sql import SQLContext\n globs = globals().copy()\n # The small batch size here ensures that we see multiple batches,\n # even in these small test examples:\n sc = SparkContext(\"local[2]\", \"ml.tuning tests\")\n sqlContext = SQLContext(sc)\n globs['sc'] = sc\n globs['sqlContext'] = sqlContext\n (failure_count, test_count) = doctest.testmod(\n globs=globs, optionflags=doctest.ELLIPSIS)\n sc.stop()\n if failure_count:\n exit(-1)\n"
] |
[
[
"numpy.argmax",
"numpy.zeros",
"numpy.argmin"
]
] |
ProjectPepperHSB/Backend-Services
|
[
"06f0a6e58dbc06fc29e8144a149be4978363f036"
] |
[
"analysis/weekly-report.py"
] |
[
"# ----- I M P O R T S ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- -----\nimport sys, getpass, traceback\nimport warnings \n\nimport pandas as pd\nimport numpy as np\n# from sklearn.linear_model import LinearRegression\n\nfrom datetime import datetime, timedelta\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_pdf import PdfPages\n\nfrom dotenv import dotenv_values\nfrom Client import Client # can be found in same dir as this file in repositorie\n\n# ----- M E T A D A T A ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- -----\n\n__version__ = 'v1.0.0'\n__description__ = 'Script to fetch Pepper data and create .pdf containing weekly report'\n__author__ = 'Benjamin Thomas Schwertfeger'\n__copyright__ = 'Benjamin Thomas Schwertfeger'\n__email__ = '[email protected]'\n__status__ = 'Production'\n__github__ = 'https://github.com/ProjectPepperHSB/Backend-Services.git'\n\n# ----- S E T T I N G S ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- -----\n\nif sys.platform == 'linux' or sys.platform == 'linux2':\n out_dir = f'/home/docker-hbv-kms/weekly-reports'\nelif sys.platform == 'darwin':\n out_dir = f'/Users/{getpass.getuser()}/repositories/Backend-Services/analysis/out'\nelif sys.platform == 'win32':\n exit() # enter your windows path path\n\nwarnings.filterwarnings('ignore')\n\nplt.rcParams['figure.figsize'] = [10, 6]\nplt.rcParams['savefig.bbox'] = 'tight'\nplt.style.use('fivethirtyeight')\n\n_red, _orange, _gray, _green, _blue = '#fc4f30', '#e5ae38', '#8b8b8b', '#6d904f', '#30a2da'\n_weekdays = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']\n \n# ----- ----- -----\nLOOKBACK = 7 #days\n\nnow = datetime.now()\nstart_date = (now - timedelta(days=LOOKBACK)).strftime('%Y-%m-%d')\nout_fname = f'weekly_report_{start_date}_-_{now.strftime(\"%Y-%m-%d\")}.pdf'\n\n# ----- S E T U P ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- -----\n\nnot_understand_df, emotion_states_df, use_case_df = None, None, None\n\n# ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- -----\n\ndef init() -> None:\n global not_understand_df, emotion_states_df, use_case_df\n\n try:\n config = dotenv_values('.env') \n API_KEY = config['API_KEY']\n except KeyError:\n print('No .env file with API_KEY found!')\n exit()\n\n try:\n client = Client(API_KEY, sandbox=False, verbose=1) # create client to connect with web-app \n\n query_str = f'WHERE ts > NOW() - INTERVAL {LOOKBACK} day'\n \n not_understand_df = pd.DataFrame(data=client.sql_query(f'SELECT * FROM pepper_did_not_understand_table {query_str}'))\n emotion_states_df = pd.DataFrame(data=client.sql_query(f'SELECT * FROM pepper_emotion_table {query_str}'))\n use_case_df = pd.DataFrame(data=client.sql_query(f'SELECT * FROM pepper_use_case_table {query_str}'))\n\n # preprocessing\n emotion_states_df['dialog_time'] = np.array([x for x in emotion_states_df['dialog_time']]).astype('float32')\n except:\n print(f'Could not fetch data from backend!\\n{traceback.format_exc()}')\n print('Check your internet connection and check if the backend service is running!')\n exit()\n\ndef plots(pdf) -> None:\n # PIE ----- USE-CASE-USAGE -----\n plt.figure()\n fig = use_case_df['use_case'].value_counts().plot(kind='pie', autopct='%1.1f%%').get_figure()\n plt.title('Use-Case usage'); plt.axis('off'); pdf.savefig(fig)\n\n # PIE ----- GENDER-DISTRIBUTION -----\n plt.figure()\n fig = emotion_states_df.gender.value_counts().plot(kind='pie', autopct='%1.1f%%',colors = [ _red, _blue ]).get_figure()\n plt.title('Gender distribution'); plt.axis('off'); pdf.savefig(fig) \n\n # PIE ----- BASIC-EMOTION -----\n plt.figure()\n fig = emotion_states_df['basic_emotion'].value_counts().plot(kind='pie', autopct='%1.1f%%',colors = [ _gray, _red, _orange, _green]).get_figure()\n plt.title('Distribution of basic emotion occurrence'); plt.axis('off'); pdf.savefig()\n\n # BARH ----- DISTRIBUTION OF EMOTION BY GENDER -----\n plt.figure()\n fig = pd.concat(\n [emotion_states_df[['gender', 'basic_emotion']].pivot_table(index=['gender'], columns=col, aggfunc=len) for col in ['basic_emotion']], axis = 1\n ).fillna(0).plot(kind = 'barh', color = { 'bad': _red, 'bored': _gray, 'excited': _orange, 'good': _green }).get_figure()\n plt.title('Distributions of basic emotions grouped by gender'); plt.xlabel('count'); pdf.savefig()\n\n # BARH ----- DISTRIBUTION OF PLEASURE STATES ------\n plt.figure()\n fig = pd.concat(\n [emotion_states_df[['gender', 'pleasure_state']].pivot_table(index=['gender'], columns=col, aggfunc=len) for col in ['pleasure_state']], axis=1\n ).fillna(0).plot(kind='barh', color={ 'bad': _red, 'medium': _gray, 'good': _orange, 'perfect': _green })\n plt.title('Distribution of pleasure states grouped by gender'); pdf.savefig()\n\n # ----- STATISTICS BY DAY OF WEEK ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ------ ----- ----- ----- ----- \n d = emotion_states_df[['distance', 'gender', 'age', 'basic_emotion', 'pleasure_state', 'excitement_state', 'smile_state', 'dialog_time', 'ts']]\n d_use_case = use_case_df[['use_case', 'ts']]\n\n for i, ts in enumerate(d['ts']):\n date_obj = datetime.strptime(ts, '%Y-%m-%dT%H:%M:%S.000Z')\n d['ts'][i] = f'{date_obj.day}.{date_obj.month}.{date_obj.year} {date_obj.hour}:{date_obj.minute}'\n for i, ts in enumerate(d_use_case['ts']):\n date_obj = datetime.strptime(ts, '%Y-%m-%dT%H:%M:%S.000Z')\n d_use_case['ts'][i] = f'{date_obj.day}.{date_obj.month}.{date_obj.year} {date_obj.hour}:{date_obj.minute}'\n \n d['weekday'] = [datetime.strptime(ts, '%d.%m.%Y %H:%M').weekday() for ts in d['ts']]\n d_use_case['weekday'] = [datetime.strptime(ts, '%d.%m.%Y %H:%M').weekday() for ts in d_use_case['ts']]\n \n # BAR ----- EMOTIONS BY WEEKDAY\n plt.figure()\n fig = pd.concat(\n [d[['weekday', 'basic_emotion']].pivot_table(index=['weekday'], columns=col, aggfunc=len) for col in ['basic_emotion']], axis = 1\n ).fillna(0).plot(kind = 'bar', color = { 'bad': _red, 'bored': _gray, 'excited': _orange, 'good': _green }, rot = 35)\n\n plt.title('Distribution of basic emotions grouped by weekday')\n plt.xticks(np.arange(len(plt.gca().get_xticklabels())), [_weekdays[int(i.get_text())] for i in plt.gca().get_xticklabels()])\n plt.xlabel('weekday'); plt.ylabel('count'); pdf.savefig()\n\n # BAR ----- GENDER BY WEEKDAY\n plt.figure() \n fig = pd.concat(\n [d[['weekday', 'gender']].pivot_table(index=['weekday'], columns=col, aggfunc=len) for col in ['gender']], axis = 1\n ).fillna(0).plot(kind = 'bar', rot = 35)\n\n plt.title('Usage by gender and weekday')\n plt.xticks(np.arange(len(plt.gca().get_xticklabels())), [_weekdays[int(i.get_text())] for i in plt.gca().get_xticklabels()])\n plt.xlabel('weekday'); plt.ylabel('count'); pdf.savefig()\n\n # BAR ----- Dialog time by weekday\n plt.figure()\n fig = d[['weekday', 'dialog_time']].groupby('weekday').mean().plot(kind='bar', rot=35)\n plt.title('Mean dialog time by weekday')\n plt.xticks(np.arange(len(plt.gca().get_xticklabels())), [_weekdays[int(i.get_text())] for i in plt.gca().get_xticklabels()])\n plt.xlabel('weekday'); plt.ylabel('dialog time in minutes'); pdf.savefig()\n\n plt.figure()\n fig = pd.concat(\n [d_use_case[['weekday', 'use_case']].pivot_table(index=['weekday'], columns=col, aggfunc=len) for col in ['use_case']],axis = 1\n ).fillna(0).plot(kind = 'bar', rot = 35)\n\n plt.title('Use case by weekday')\n plt.xticks(np.arange(len(plt.gca().get_xticklabels())), [_weekdays[int(i.get_text())] for i in plt.gca().get_xticklabels()])\n plt.xlabel('weekday')\n plt.ylabel('count')\n pdf.savefig()\n\n # ----- ----- ------ ----- ----- ----- ----- ----- ----- ------ ----- ----- ----- ----- ----- ----- ------ ----- ----- ----- ----- \n\n # SCATTER ----- LINEAR REGRESSION -----\n # maybe not representative enough for 7 day period\n \n # data = emotion_states_df[['distance', 'age', 'gender', 'basic_emotion', 'pleasure_state', 'excitement_state', 'smile_state', 'dialog_time']]\n # X = data.iloc[:, 1].values.reshape(-1, 1).astype('int') # age\n # Y = data.iloc[:, -1].values.reshape(-1, 1).astype('float32') # dialog_time\n # linear_regressor = LinearRegression(); linear_regressor.fit(X, Y) \n # Y_pred = linear_regressor.predict(X) \n\n # plt.figure()\n # fig = plt.scatter(X, Y)\n # plt.plot(X, Y_pred, color='red')\n # plt.xlabel('age'); plt.ylabel('dialog time')\n # plt.title('Linear regression on dialog time and age')\n # pdf.savefig()\n\n\ndef main() -> None:\n init()\n\n with PdfPages(f'{out_dir}/{out_fname}') as pdf:\n plots(pdf)\n \n # metadata\n d = pdf.infodict()\n d['Title'] = f'Weekly report {start_date} - {now}'\n d['Author'] = 'Team Pepper'\n # d['Subject'] = '....'\n d['Keywords'] = 'pepper robot matplotlib plots report'\n d['CreationDate'] = now.strftime('%Y-%m-%d')\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"matplotlib.backends.backend_pdf.PdfPages",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.xlabel",
"numpy.array",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.figure"
]
] |
tutu96177/mult_regression
|
[
"6c8f48f24f1a8c3cb0e8d3446c4a1f9404acdb64"
] |
[
"core/data_process.py"
] |
[
"# -*- coding: utf-8 -*-#\n\n#-------------------------------------------------------------------------------\n# Name: HLK-20C02(4℃)\n# Description: \n# Author: shichao\n# Date: 2019/7/17\n#-------------------------------------------------------------------------------\n\nimport os\nimport numpy as np\nimport pandas as pd\nimport time\nimport datetime\n\n# 核心代码,设置显示的最大列、宽等参数,消掉打印不完全中间的省略号\npd.set_option('display.max_columns', 1000)\npd.set_option('display.width', 1000)\npd.set_option('display.max_colwidth', 1000)\n\n\n# 读取数据\ndef read_df(file_name):\n file_dir = './raw_data/'\n file_path = os.path.join(file_dir, file_name)\n file_path = open(file_path)\n df_file = pd.read_csv(file_path)\n df_file['DateTime'] = pd.to_datetime(df_file['DateTime'])\n df_file = df_file.sort_values(by='DateTime')\n return df_file\n\n\n# 对 x 增加特征:date_ymd 年月日、把年月日时分秒转化为秒数utc时间\ndef add_features(df_file):\n date_list = []\n for date in list(df_file['DateTime']):\n date_str = str(date).split(' ')[0]\n date_list.append(date_str)\n df_file['date_ymd'] = date_list\n time_stamp_list = []\n for time_stamp in list(df_file['DateTime']):\n time_s = time.mktime(time.strptime(str(time_stamp), '%Y-%m-%d %H:%M:%S'))\n # time_s = time.mktime(time.strptime(time_stamp, '%Y/%m/%d %H:%M:%S'))\n time_stamp_list.append(time_s)\n df_file['time_stamp'] = time_stamp_list\n # date_ymdh_list = []\n # for time_stamp in list(df_file['DateTime']):\n # date_ymdh = str(time_stamp).split(':')[0]\n # date_ymdh_list.append(date_ymdh)\n # df_file['date_ymdh'] = date_ymdh_list\n return df_file\n\n\n\n# 画图:补全缺失值后,画图与原图比较\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\ndef draw_plot(dataset_12_plot):\n #date_time = 'date_ymd'\n temperature = 'Temperature'\n date_ymd = 'date_ymd'\n plt.figure(1, figsize=(26, 13))\n # 获取坐标轴\n ax = plt.gca()\n #plt.plot(dataset_12_plot[date_time], dataset_12_plot[temperature], 'red', marker='o')\n plt.plot(dataset_12_plot[temperature], 'red', marker='o')\n for label in ax.get_xticklabels():\n # 横轴标签旋转 30°\n label.set_rotation(30)\n label.set_horizontalalignment('right')\n # 显示图例\n plt.legend(loc='upper left')\n ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d %H:%M:%S')) # 设置时间标签显示格式\n ax.xaxis.set_major_locator(mdates.HourLocator()) # X轴的间隔为小时\n png_dir = './repair_png/'\n date_ymd = str(dataset_12_plot[date_ymd][1])\n png_path = os.path.join(png_dir, date_ymd + '_' + str(len(dataset_12_plot)) + '.png')\n plt.savefig(png_path)\n plt.show()\n\n\n# 对缺失的温度进行插补\ndef repair_tem(df_data_by_date, sample_frequency):\n \"\"\"\n :param df_data_by_date:\n :param sample_frequency: 采样频率\n :return:\n \"\"\"\n # 去除重复列,默认所有列无重复记录\n #df_data_by_date.duplicated()\n df_data_by_date = df_data_by_date.reset_index(drop=True)\n term_list_1 = list(df_data_by_date['Temperature'])\n term_list_date = list(df_data_by_date['date_ymd'])\n n = len(term_list_1)\n date_list = []\n temp_temp_list = []\n # 采样频率\n time_n = 3 # 时间间隔, 3 分钟\n for i in range(n):\n if (i >= 0 and i + 1 <= n - 1):\n temp_temp_list.append(term_list_1[i])\n date_list.append(term_list_date[i])\n # 对中间缺失的温度值进行插补\n if (df_data_by_date.loc[i + 1]['time_stamp'] - df_data_by_date.loc[i]['time_stamp'] >= (sample_frequency + time_n) * 60):\n #n_temp = int(np.ceil((df_data_by_date.loc[i + 1]['time_stamp'] - df_data_by_date.loc[i]['time_stamp']) / (sample_frequency * 60.0)))\n # 四舍五入取整\n n_temp = int(((df_data_by_date.loc[i + 1]['time_stamp'] - df_data_by_date.loc[i]['time_stamp']) / (sample_frequency * 60.0)) + 0.5)\n for j in range(n_temp - 1):\n temp_temp = (df_data_by_date.loc[i + 1]['Temperature'] + df_data_by_date.loc[i]['Temperature']) / 2\n temp_temp_list.append(temp_temp)\n date_list.append(term_list_date[-1])\n temp_temp_list.append(term_list_1[-1])\n date_list.append(term_list_date[-1])\n # 如果开始连续缺失数量少于 30%, 用均值补齐\n df_data_by_date = df_data_by_date.reset_index(drop=True)\n #date_ = term_list_date[1]\n # 看是否中间补全\n if(len(temp_temp_list) < int(24*60/sample_frequency)):\n # 开头缺失\n continue_list = []\n time_s = time.mktime(time.strptime(str(term_list_date[1]), '%Y-%m-%d')) # 当天开始时间 0 时 0 分 0 秒\n if(df_data_by_date.loc[0]['time_stamp'] - time_s > (sample_frequency + time_n) * 60):\n # 开头缺失\n n_temp = int(np.ceil((df_data_by_date.loc[0]['time_stamp'] - time_s) / (sample_frequency * 60.0)))\n for j in range(n_temp - 1):\n #for j in range(int(24*60/sample_frequency) - len(term_list_1)):\n continue_list.append(round(np.mean(term_list_1), 2))\n date_list.append(term_list_date[-1])\n continue_list.extend(temp_temp_list)\n temp_temp_list = continue_list\n # 结尾缺失\n # 获取下一天的日期\n date_end = pd.to_datetime(term_list_date[1]) + datetime.timedelta(days=1)\n time_end = time.mktime(time.strptime(str(date_end), '%Y-%m-%d %X'))\n if(time_end - df_data_by_date.loc[len(df_data_by_date)-1]['time_stamp'] >= (sample_frequency + time_n) * 60):\n # 结尾缺失\n for j in range(int(24*60/sample_frequency) - len(term_list_1)):\n continue_list.append(round(np.mean(term_list_1), 2))\n date_list.append(term_list_date[-1])\n temp_temp_list.extend(continue_list)\n df_repair = pd.DataFrame()\n df_repair['date_ymd'] = date_list\n df_repair['Temperature'] = temp_temp_list\n return df_repair\n\n\n\n# 对温度做分段常数逼近处理,下采样\ndef constant_appro_low(df_data_by_date_tem):\n df_data_by_date_tem = df_data_by_date_tem.reset_index(drop=True)\n df_appro = pd.DataFrame()\n date_index = pd.date_range(end = '01/01/2019', periods=len(df_data_by_date_tem), freq='D')\n temperature = 'Temperature'\n date_ymd = 'date_ymd'\n df_appro[temperature] = df_data_by_date_tem[temperature]\n df_appro.index = date_index\n # 下采样,取均值\n df_appro_low = pd.DataFrame()\n # 一个小时聚合一次常值\n df_appro_low[temperature] = df_appro[temperature].resample(rule='6D').mean()\n #date_list = df_data_by_date_tem.loc[:len(df_appro_low)-1][date_ymd]\n #df_appro_low[date_ymd] = list(date_list)\n # 差分,做一阶差分\n df_appro_diff = pd.DataFrame()\n df_appro_diff[temperature] = df_appro_low.loc[:][temperature].diff(1) # 1 阶差分\n df_appro_diff[date_ymd] = list(df_data_by_date_tem.loc[:len(df_appro_diff)-1][date_ymd])\n df_appro_diff = df_appro_diff.dropna()\n df_appro_diff = df_appro_diff.reset_index(drop=True)\n df_appro_diff[temperature] = df_appro_diff[temperature].apply(lambda x: round(x, 2))\n return df_appro_diff\n\n\n\n\n# 对 x 进行特征提取,\nfrom tsfresh import extract_relevant_features\nfrom tsfresh import extract_features\nimport tsfresh as tsf\ndef get_features(df_appro):\n #extracted_features = extract_features(df_appro, column_id='date_ymd')\n #ts = pd.Series(x) # 数据x假设已经获取\n ts = df_appro['Temperature']\n # 一阶差分绝对和\n abs_sum = tsf.feature_extraction.feature_calculators.absolute_sum_of_changes(ts)\n abs_sum = round(abs_sum, 2)\n # 各阶自相关系数的聚合统计特征\n param_statis = [{'f_agg': 'mean', 'maxlag': 2}]\n diff_statis = tsf.feature_extraction.feature_calculators.agg_autocorrelation(ts, param_statis)\n diff_statis = diff_statis[0][1]\n diff_statis = round(diff_statis, 2)\n # ADF 检测统计值\n param_adf = [{'attr': 'pvalue'}]\n adf = tsf.feature_extraction.feature_calculators.augmented_dickey_fuller(ts, param_adf)\n adf = adf[0][1]\n adf = round(adf, 2)\n # 峰度\n peak = tsf.feature_extraction.feature_calculators.kurtosis(ts)\n peak = round(peak, 2)\n # 时序数据复杂度\n complexity = tsf.feature_extraction.feature_calculators.cid_ce(ts, True)\n complexity = round(complexity, 2)\n # 线性回归分析\n param_line = [{'attr': 'pvalue'}]\n line = tsf.feature_extraction.feature_calculators.linear_trend(ts, param_line)\n line = list(zip(line))[0][0][1]\n line = round(line, 2)\n # 分组熵\n bin_entropy = tsf.feature_extraction.feature_calculators.binned_entropy(ts, 10)\n bin_entropy = round(bin_entropy, 2)\n # 近似熵\n appro_entropy = tsf.feature_extraction.feature_calculators.approximate_entropy(ts, 6, 0.1)\n appro_entropy = round(appro_entropy, 2)\n # 傅里叶变换频谱统计量\n param_fly = [{'aggtype': 'skew'}]\n fly = tsf.feature_extraction.feature_calculators.fft_aggregated(ts, param_fly)\n fly = list(zip(fly))[0][0][1]\n fly = round(fly, 2)\n # 傅里叶变换系数\n param_fly_change = [{'coeff': 2, 'attr': 'angle'}]\n fly_change = tsf.feature_extraction.feature_calculators.fft_coefficient(ts, param_fly_change)\n fly_change = list(zip(fly_change))[0][0][1]\n fly_change = round(fly_change, 2)\n # 小坡变换\n param_cwt = [{'widths': tuple([2, 2, 2]), 'coeff': 2, 'w': 2}]\n cwt = tsf.feature_extraction.feature_calculators.cwt_coefficients(ts, param_cwt)\n cwt = list(zip(cwt))[0][0][1]\n cwt = round(cwt, 2)\n return abs_sum, adf, peak, complexity, line, bin_entropy, appro_entropy, fly, fly_change, cwt\n\n\n\n# 对每天的温度进行特征提取, 分段特征; 统计特征;熵特征; 第 3 阶段\ndef get_features_everday(df_data, sample_frequence):\n date_ymd_str = 'date_ymd'\n temperature = 'Temperature'\n date_ymd_field = df_data[date_ymd_str]\n date_ymds = []\n for i in date_ymd_field:\n if i not in date_ymds:\n date_ymds.append(i)\n df_features = pd.DataFrame()\n for date_ymd in date_ymds:\n # date_ymd = '2019-04-17'\n abs_sum_list = []\n adf_list = []\n peak_list = []\n complexity_list = []\n line_list = []\n bin_entropy_list = []\n appro_entropy_list = []\n fly_list = []\n fly_change_list = []\n cwt_list = []\n date_ymd_list = []\n df_data_by_date = df_data[df_data[date_ymd_str] == date_ymd]\n # 删除重复记录\n df_data_by_date = df_data_by_date.drop_duplicates()\n # 缺失值大于 30% 的直接舍弃\n #sample_frequence = 10 # 采样频率是 10 min\n data_num = int((24*60)/sample_frequence)\n abandon_percent = 0.3\n abondon_thr = int(data_num * (1 - abandon_percent))\n if (len(df_data_by_date) <= abondon_thr):\n continue\n # # 将异常温度升高的波峰的温度值用当天的温度均值做替换, 此台设备没有可替换的异常温度值\n # df_data_by_date = replace_abnorm_temp(df_data_by_date)\n # 用插值补全温度值\n df_data_by_date_tem = repair_tem(df_data_by_date, sample_frequency)\n # draw_plot(df_data_by_date_tem)\n # 如果是连续性缺失,则舍弃\n if (len(df_data_by_date_tem) <= abondon_thr):\n continue\n # 分段常数逼近,重采样,下采样,df_data_by_date_tem.resample()\n df_appro = constant_appro_low(df_data_by_date_tem)\n temp_array = np.array(list(df_appro[temperature]))\n # 分段常数逼近特征列名: 23 个 , 每 60 分钟取一次均值\n columns_name = ['t' + str(x) for x in range(0, len(temp_array))]\n temp_array = temp_array.reshape(1, len(columns_name))\n df_x = pd.DataFrame(temp_array, columns=columns_name)\n # thresh 提取特征\n abs_sum, adf, peak, complexity, line, bin_entropy, appro_entropy, fly, fly_change, cwt = get_features(df_appro)\n abs_sum_list.append(abs_sum)\n adf_list.append(adf)\n peak_list.append(peak)\n complexity_list.append(complexity)\n line_list.append(line)\n bin_entropy_list.append(bin_entropy)\n appro_entropy_list.append(appro_entropy)\n fly_list.append(fly)\n fly_change_list.append(fly_change)\n cwt_list.append(cwt)\n date_ymd_list.append(date_ymd)\n # 统计特征列名: 8\n df_x['abs_sum'] = abs_sum_list\n df_x['adf'] = adf_list\n df_x['peak'] = peak_list\n df_x['complexity'] = complexity_list\n df_x['line'] = line_list\n df_x['fly'] = fly_list\n df_x['fly_change'] = fly_change_list\n df_x['cwt'] = cwt_list\n # 信息熵特征:数据片段相似性 2 个\n df_x['bin_entropy'] = bin_entropy_list\n df_x['appro_entropy'] = appro_entropy_list\n df_x[date_ymd_str] = date_ymd_list\n df_features = pd.concat([df_features, df_x], axis=0, sort=False)\n return df_features\n\n\n\n\n# 提取目标 y : 之后手动去确认一天的正常除霜时间和跨夜的除霜时间:\n# correct 栏位是人工确认白天的除霜时间,accross 栏位是跨夜的除霜时间\n# correct 和 accross 是人工添加的两个栏位,待确认整理后,才可进行下一步\ndef get_targets(file_name_):\n file_label_dir = './label_data/'\n file_label_path = os.path.join(file_label_dir, file_name_ + '.csv')\n file_label_path = open(file_label_path)\n df_file = pd.read_csv(file_label_path)\n if 'Unnamed: 5' in df_file.columns:\n del df_file['Unnamed: 5']\n df_file['hum_label'] = df_file['hum_label'].astype(str)\n df_file_label = df_file[df_file['hum_label']=='1.0']\n label_preprocess_dir = 'targets/'\n file_name_label = file_name_ + '_label' + '.csv'\n label_preprocess_path = os.path.join(label_preprocess_dir, file_name_label)\n #df_file_label.to_csv(label_preprocess_path, index=False)\n return df_file_label\n\n\n# 给 label y 增加辅助列\ndef add_y_col(df_file):\n date_list = []\n for date in list(df_file['DateTime']):\n date_str = str(date).split(' ')[0]\n date_list.append(date_str)\n df_file['date_ymd'] = date_list\n return df_file\n\n\n# 对 y 进行补全、筛选处理:\n# 将白天和跨夜的除霜时间进行整理,按照时间顺序进行排序。手动确认日期。\ndef complete_targets(file_name_label, n_cnt):\n \"\"\"\n :param file_name_label: 挑出的 y 标签\n :param n_cnt: 除霜次数\n :return:\n \"\"\"\n label_dir = './targets/'\n label_path = os.path.join(label_dir, file_name_label)\n label_path = open(label_path)\n df_label = pd.read_csv(label_path)\n df_label['correct'] = df_label['correct'].astype(str)\n #df_label['across'] = df_label['across'].astype(str)\n df_laebl_temp_1 = df_label[df_label['correct']=='1.0']\n #df_laebl_temp_2 = df_label[df_label['across']=='1.0']\n # 对 label y 进行整理\n df_laebl_temp_1 = df_laebl_temp_1.reset_index(drop=True)\n #df_laebl_temp_2 = df_laebl_temp_2.reset_index(drop=True)\n df_laebl_temp_1 = add_y_col(df_laebl_temp_1)\n #df_laebl_temp_2 = add_y_col(df_laebl_temp_2)\n # 保存在 ./targets/ 下人工检查日期: 处理跨夜的日期\n save_dir = './targets/'\n save_path_1 = os.path.join(save_dir, file_name_label.split('.')[0] + '_1.csv')\n #df_laebl_temp_1.to_csv(save_path_1, index=False)\n save_path_2 = os.path.join(save_dir, file_name_label.split('.')[0] + '_2.csv')\n #df_laebl_temp_2.to_csv(save_path_2, index=False)\n print ()\n\n\n# 把 label y 按日期变成一行\ndef label_to_hor(df_data, n_cnt):\n df_data = df_data.reset_index(drop=True)\n date_ymd_field = df_data['date_ymd']\n date_ymds = []\n for i in date_ymd_field:\n if i not in date_ymds:\n date_ymds.append(i)\n df_features = pd.DataFrame()\n for date_ymd in date_ymds:\n date_list = []\n df_data_by_date = df_data[df_data['date_ymd'] == date_ymd]\n date_list.append(date_ymd)\n temp_array = np.array(list(df_data_by_date['DateTime']))\n temp_array = temp_array.reshape(1, n_cnt*2)\n df_y = pd.DataFrame(temp_array)\n df_y['date_ymd'] = date_list\n df_features = pd.concat([df_features, df_y], axis=0)\n return df_features\n\n\n\n# 对 y 进行合并排序,将人工手动的跨夜和白天除霜数据整理为一个目标文件。修改 date_ymd 栏位\ndef combine_label(file_name_, n_cnt):\n file_name_label_1 = file_name_ + '_label_1.csv'\n label_dir = './targets/'\n label_path_1 = os.path.join(label_dir, file_name_label_1)\n label_path_1 = open(label_path_1)\n df_label_1 = pd.read_csv(label_path_1)\n #file_name_label_2 = file_name_ + '_label_2.csv'\n #label_path_2 = os.path.join(label_dir, file_name_label_2)\n #label_path_2 = open(label_path_2)\n #df_label_2 = pd.read_csv(label_path_2)\n # 检查日期,人工检查\n import collections\n a = collections.Counter(df_label_1['date_ymd'])\n #b = collections.Counter(df_label_2['date_ymd'])\n # 把 label y 按照每天日期排列成一行\n df_label_y_1 = label_to_hor(df_label_1, n_cnt)\n #df_label_y_2 = label_to_hor(df_label_2, n_cnt)\n #df_y = pd.concat([df_label_y_1, df_label_y_2], axis=0)\n df_y = pd.concat([df_label_y_1], axis=0)\n df_y['date_ymd'] = pd.to_datetime(df_y['date_ymd'])\n df_y = df_y.sort_values(by='date_ymd')\n #df_y.to_csv(label_dir + file_name_label_1.split('_')[0] + '_y.csv', index=False)\n print()\n\n\n\n# 整合 x 和 y, 与匹配的日期对应起来, 对应产出在 label_preprocess_2/ 文件夹下\ndef merge_features_target(file_name):\n features_dir = './features/'\n target_dir = './targets/'\n features_path = os.path.join(features_dir, file_name + '_x_1.csv') # 第 2 阶段\n features_path = open(features_path)\n target_path = os.path.join(target_dir, file_name + '_y.csv')\n target_path = open(target_path)\n df_features = pd.read_csv(features_path)\n df_target = pd.read_csv(target_path)\n # 添加辅助列 date_ymd_1: 为了匹配用昨天的温度预测下一天的时间\n df_target['date_ymd'] = pd.to_datetime(df_target['date_ymd'])\n df_target = df_target.sort_values(by='date_ymd')\n # 获取当前日期前一天日期\n before_days = 1\n #before_days_date = now_date + datetime.timedelta(days=-before_days)\n df_target['date_ymd'] = df_target['date_ymd'].apply(lambda x: x + datetime.timedelta(days=-before_days))\n\n df_target['date_ymd'] = df_target['date_ymd'].astype(str)\n # 时间字符串格式转换\n df_features['date_ymd'] = pd.to_datetime(df_features['date_ymd'])\n df_features = df_features.sort_values(by='date_ymd')\n df_features['date_ymd'] = df_features['date_ymd'].astype(str)\n # 合并 x 和 y\n df_data = pd.merge(df_features, df_target, on='date_ymd')\n data_merge_dir = './fea_tar_data/'\n data_merge_path = os.path.join(data_merge_dir, file_name + '_feature_target_1.csv') # 第 1 阶段 x 和 y 合并后的成果\n del df_data['date_ymd']\n # df_data.to_csv(data_merge_path, index=False)\n print ()\n\n\n\n\n\n\n\n\n\n\nif __name__ == '__main__':\n # 处理特征 X\n file_name = 'HLK-20C02(4℃).csv'\n file_name_ = os.path.splitext(file_name)[0]\n # # 读取数据\n # df_file = read_df(file_name)\n # # 增加辅助列\n # df_file = add_features(df_file)\n # # 获取 x 特征\n # sample_frequency = 10 # 采样频率 10 min\n # df_features = get_features_everday(df_file, sample_frequency) # 第 1 阶段\n # # df_features.to_csv('./features/' + file_name_ +'_x_1.csv', index=False) # 保存第 1 阶段 x\n # 提取 y: 人工确认当天除霜时间和跨夜除霜时间。\n df_file_y = get_targets(file_name_)\n # 处理标签 Y :人工手动整理跨夜除霜数据\n file_name_label = file_name_ + '_label' + '.csv'\n n_cnt = 5 # 除霜次数\n complete_targets(file_name_label, n_cnt)\n # 将手动整理的跨夜和白天除霜时间,根据日期整合一个文件 y\n # combine_label(file_name_, n_cnt)\n # 整合 x 和 y\n merge_features_target(file_name_)\n\n print ()\n"
] |
[
[
"matplotlib.pyplot.gca",
"matplotlib.pyplot.legend",
"pandas.read_csv",
"pandas.to_datetime",
"pandas.concat",
"pandas.merge",
"matplotlib.dates.DateFormatter",
"matplotlib.pyplot.savefig",
"pandas.DataFrame",
"matplotlib.pyplot.plot",
"matplotlib.dates.HourLocator",
"numpy.ceil",
"numpy.mean",
"pandas.set_option",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
RyanMoussouni/iSeeBetter
|
[
"af193ae0852f8e477fcd6875dce874eb5092a24a"
] |
[
"SRGAN/model.py"
] |
[
"import math\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\n\n\nclass Generator(nn.Module):\n def __init__(self, scale_factor):\n upsample_block_num = int(math.log(scale_factor, 2))\n\n super(Generator, self).__init__()\n self.block1 = nn.Sequential(\n nn.Conv2d(3, 64, kernel_size=9, padding=4),\n nn.PReLU()\n )\n self.block2 = ResidualBlock(64)\n self.block3 = ResidualBlock(64)\n self.block4 = ResidualBlock(64)\n self.block5 = ResidualBlock(64)\n self.block6 = ResidualBlock(64)\n self.block7 = nn.Sequential(\n nn.Conv2d(64, 64, kernel_size=3, padding=1),\n nn.PReLU()\n )\n block8 = [UpsampleBLock(64, 2) for _ in range(upsample_block_num)]\n block8.append(nn.Conv2d(64, 3, kernel_size=9, padding=4))\n self.block8 = nn.Sequential(*block8)\n\n def forward(self, x):\n block1 = self.block1(x)\n block2 = self.block2(block1)\n block3 = self.block3(block2)\n block4 = self.block4(block3)\n block5 = self.block5(block4)\n block6 = self.block6(block5)\n block7 = self.block7(block6)\n block8 = self.block8(block1 + block7)\n\n return (F.tanh(block8) + 1) / 2\n\n\nclass Discriminator(nn.Module):\n def __init__(self):\n super(Discriminator, self).__init__()\n self.net = nn.Sequential(\n nn.Conv2d(3, 64, kernel_size=3, padding=1),\n nn.LeakyReLU(0.2),\n\n nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1),\n nn.BatchNorm2d(64),\n nn.LeakyReLU(0.2),\n\n nn.Conv2d(64, 128, kernel_size=3, padding=1),\n nn.BatchNorm2d(128),\n nn.LeakyReLU(0.2),\n\n nn.Conv2d(128, 128, kernel_size=3, stride=2, padding=1),\n nn.BatchNorm2d(128),\n nn.LeakyReLU(0.2),\n\n nn.Conv2d(128, 256, kernel_size=3, padding=1),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.2),\n\n nn.Conv2d(256, 256, kernel_size=3, stride=2, padding=1),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.2),\n\n nn.Conv2d(256, 512, kernel_size=3, padding=1),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.2),\n\n nn.Conv2d(512, 512, kernel_size=3, stride=2, padding=1),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.2),\n\n nn.AdaptiveAvgPool2d(1),\n nn.Conv2d(512, 1024, kernel_size=1),\n nn.LeakyReLU(0.2),\n nn.Conv2d(1024, 1, kernel_size=1)\n )\n\n def forward(self, x):\n batch_size = x.size(0)\n return torch.sigmoid(self.net(x).view(batch_size))\n\n\nclass ResidualBlock(nn.Module):\n def __init__(self, channels):\n super(ResidualBlock, self).__init__()\n self.conv1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)\n self.bn1 = nn.BatchNorm2d(channels)\n self.prelu = nn.PReLU()\n self.conv2 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)\n self.bn2 = nn.BatchNorm2d(channels)\n\n def forward(self, x):\n residual = self.conv1(x)\n residual = self.bn1(residual)\n residual = self.prelu(residual)\n residual = self.conv2(residual)\n residual = self.bn2(residual)\n\n return x + residual\n\n\nclass UpsampleBLock(nn.Module):\n def __init__(self, in_channels, up_scale):\n super(UpsampleBLock, self).__init__()\n self.conv = nn.Conv2d(in_channels, in_channels * up_scale ** 2, kernel_size=3, padding=1)\n self.pixel_shuffle = nn.PixelShuffle(up_scale)\n self.prelu = nn.PReLU()\n\n def forward(self, x):\n x = self.conv(x)\n x = self.pixel_shuffle(x)\n x = self.prelu(x)\n return x\n"
] |
[
[
"torch.nn.Sequential",
"torch.nn.PReLU",
"torch.nn.Conv2d",
"torch.nn.PixelShuffle",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.LeakyReLU",
"torch.nn.BatchNorm2d",
"torch.nn.functional.tanh"
]
] |
Ivancaminal72/mcv-m6-2018-team3
|
[
"dcdbc97d6d9534f1c0479e98113f35bca0084d86"
] |
[
"Week4/utils.py"
] |
[
"import glob\nimport os\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef rgb2gray(rgb):\n r, g, b = rgb[:,:,0], rgb[:,:,1], rgb[:,:,2]\n gray = 0.2989 * r + 0.5870 * g + 0.1140 * b\n return gray\n\ndef video_to_frame(filename, grayscale=True):\n vidcap = cv2.VideoCapture(filename)\n # Check if camera opened successfully\n if not vidcap.isOpened():\n print(filename)\n print(\"Error opening video stream or file\")\n exit(1)\n frames_vol=[]\n while vidcap.isOpened():\n ret, frame = vidcap.read()\n if type(frame) == type(None):\n break\n if grayscale: frame= cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n frames_vol.append(frame)\n return np.array(frames_vol)\n\ndef load_data(data_path, data_id, seq_range=None, grayscale=True):\n X = []\n y = []\n\n path = os.path.join(data_path, data_id)\n\n if seq_range is None: seq_range = [0, len(glob.glob(path + '/groundtruth/*'))]\n\n for index in range(seq_range[0], seq_range[1] + 1):\n in_name = path + '/input/in' + str(index).zfill(6) + '.jpg'\n gt_name = path + '/groundtruth/gt' + str(index).zfill(6) + '.png'\n\n if grayscale:\n in_image = cv2.imread(in_name, 0)\n gt_image = cv2.imread(gt_name, 0)\n else:\n in_image = cv2.imread(in_name)\n gt_image = cv2.imread(gt_name, 0)\n\n X.append(in_image)\n y.append(gt_image)\n\n return np.array(X), np.array(y)\n\n\n# INPUT: X: is a sequence of images, path: directory to save images.\ndef write_images(X, path, head_filename):\n\n path = os.path.join(path, head_filename)\n\n for i in range(X.shape[0]):\n plt.figure()\n filename = path + str(i).zfill(6) + '.png'\n plt.imshow(X[i], cmap=\"gray\")\n plt.savefig(filename)\n plt.close()\n\n return\n\ndef write_images2(X, path, head_filename):\n\n path = os.path.join(path, head_filename)\n\n for i in range(X.shape[0]):\n filename = path + str(i).zfill(6) + '.png'\n cv2.imwrite(filename, X[i])\n return\n\ndef simplify_labels(y):\n aux = np.ones(y.shape) * np.nan\n aux[np.where(y == 0)] = 1\n aux[np.where(y == 50)] = 1\n return aux\n\ndef build_mask(y):\n # Convert ground truth to mask\n mask = np.ones(y.shape)\n mask[np.where(y == 0)] = 0\n mask[np.where(y == 50)] = 0\n mask[np.where(y == 85)] = np.nan\n mask[np.where(y == 170)] = np.nan\n\n return mask\n\ndef fit(X, y):\n idx = simplify_labels(y)\n mean_map = np.nanmean(X * idx, axis=0)\n var_map = np.nanvar(X * idx, axis=0)\n\n return np.array([mean_map, var_map])\n\ndef predict(X, background_model, alpha):\n mean_map = background_model[0]\n var_map = background_model[1]\n\n prediction = np.zeros(X.shape)\n prediction[np.absolute(X - mean_map) >= alpha * (var_map + 2)] = 1\n\n return prediction\n\n\ndef pixel_evaluation(predictions, ground_truth):\n\n\n # ground_truth: first call build_mask\n idx = np.where(~ np.isnan(ground_truth))\n ground_truth = ground_truth[idx]\n predictions = predictions[idx]\n\n\n TP = len(np.where(ground_truth[np.where(predictions == 1)] == 1)[0])\n FP = len(np.where(ground_truth[np.where(predictions == 1)] == 0)[0])\n\n FN = len(np.where(ground_truth[np.where(predictions == 0)] == 1)[0])\n TN = len(np.where(ground_truth[np.where(predictions == 0)] == 0)[0])\n\n TF = len(np.where(ground_truth == 1)[0])\n\n return np.array([TP, TN, FP, FN, TF])\n\n\ndef precision(pe):\n return pe[0] / (pe[0] + pe[2])\n\n\ndef recall(pe):\n return pe[0] / (pe[0] + pe[3])\n\n\ndef f1_score(pe):\n return 2 * pe[0] / (2 * pe[0] + pe[2] + pe[3])\n\ndef fpr_metric(pe):\n return pe[2]/(pe[2]+pe[1])\n\ndef tpr_metric(pe):\n return pe[0] / (pe[0] + pe[3])\n\ndef write_video(sequence, output_path):\n height, width = sequence[0].shape\n fourcc = cv2.VideoWriter_fourcc(*'MP4V')\n video = cv2.VideoWriter(filename=output_path, fourcc=fourcc, fps=25 ,frameSize=(height, width), isColor=0)\n for frame in sequence:\n print(frame.shape)\n video.write(frame)\n video.release()\n\ndef MOG2(X_pred):\n\n fgbgMOG2 = cv2.createBackgroundSubtractorMOG2()\n\n shadowMOG = np.zeros(X_pred.shape)\n for idx, frame in enumerate(X_pred):\n\n shadow=fgbgMOG2.apply(frame)\n shadowMOG[idx][shadow == 127] = 1\n\n return shadowMOG\n\ndef visual_of(im, gtx, gty, gtz, wsize=300, mult=1, thickness=1):\n step = int(wsize)\n mwsize = int(wsize / 2)\n h,w = gtx.shape\n\n for i in np.arange(-mwsize,h+1-mwsize,step):\n for j in np.arange(-mwsize,w+1-mwsize,step):\n ai,bi,aj, bj = getCoords(i, j, wsize, h, w)\n mask = gtz[ai:bi,aj:bj]\n if np.count_nonzero(mask) == 0:\n continue\n winx = gtx[ai:bi,aj:bj]\n winy = gty[ai:bi,aj:bj]\n glob_x = winx[0,0]*mult\n glob_y = winy[0,0]*mult\n pt1 = (int(j + mwsize), int(i + mwsize))\n pt2 = (int(j + mwsize + glob_x), int(i + mwsize + glob_y))\n color = (0, 255, 0)\n im = cv2.arrowedLine(im, pt1, pt2, color, thickness)\n return im\n\ndef getCoords(i,j,w_size,h,w):\n if i<0:\n ai=0\n else:\n ai=i\n\n if j<0:\n aj=0\n else:\n aj=j\n\n if i+w_size>=h:\n bi=h-1\n else:\n bi=i+w_size\n\n if j+w_size>=h:\n bj=w-1\n else:\n bj=j+w_size\n\n return ai, bi, aj, bj"
] |
[
[
"matplotlib.pyplot.imshow",
"numpy.absolute",
"numpy.isnan",
"numpy.arange",
"numpy.nanvar",
"matplotlib.pyplot.savefig",
"numpy.ones",
"numpy.nanmean",
"matplotlib.pyplot.close",
"numpy.count_nonzero",
"numpy.array",
"numpy.zeros",
"numpy.where",
"matplotlib.pyplot.figure"
]
] |
Thenerdstation/mpi4jax
|
[
"8e2fa86abcf6e775d1acea1b85fe44d15ff57387"
] |
[
"mpi4jax/_src/collective_ops/sendrecv.py"
] |
[
"import numpy as _np\nfrom mpi4py import MPI as _MPI\n\nfrom jax import abstract_arrays, core\nfrom jax.core import Primitive\nfrom jax.interpreters import ad, xla, batching\nfrom jax.lax import create_token\nfrom jax.lib import xla_client\n\nfrom ..utils import (\n HashableMPIType,\n default_primitive_impl,\n to_dtype_handle,\n to_mpi_handle,\n to_mpi_ptr,\n unpack_hashable,\n wrap_as_hashable,\n xla_constant_intc,\n xla_constant_uintptr,\n)\nfrom ..decorators import translation_rule_cpu, translation_rule_gpu\nfrom ..validation import enforce_types\nfrom ..comm import get_default_comm\nfrom ..jax_compat import Tracer, Token\n\n# The Jax primitive\nmpi_sendrecv_p = Primitive(\"sendrecv_mpi\") # Create the primitive\nmpi_sendrecv_impl = default_primitive_impl(mpi_sendrecv_p)\n\n\n# This function applies the primitive to an AST\n@enforce_types(\n source=_np.integer,\n dest=_np.integer,\n sendtag=_np.integer,\n recvtag=_np.integer,\n comm=(type(None), _MPI.Intracomm, HashableMPIType),\n status=(type(None), _MPI.Status, HashableMPIType),\n token=(type(None), Token, Tracer),\n)\ndef sendrecv(\n sendbuf,\n recvbuf,\n source,\n dest,\n *,\n sendtag=0,\n recvtag=_MPI.ANY_TAG,\n comm=None,\n status=None,\n token=None,\n):\n \"\"\"Perform a sendrecv operation.\n\n .. warning::\n\n Unlike mpi4py's sendrecv, this returns a *new* array with the received data.\n\n Arguments:\n sendbuf: Array or scalar input to send.\n recvbuf: Array or scalar input with the correct shape and dtype. This can\n contain arbitrary data and will not be overwritten.\n source (int): Rank of the source MPI process.\n dest (int): Rank of the destination MPI process.\n sendtag (int): Tag of this message for sending.\n recvtag (int): Tag of this message for receiving.\n comm (mpi4py.MPI.Comm): The MPI communicator to use (defaults to\n a clone of :obj:`COMM_WORLD`).\n status (mpi4py.MPI.Status): Status object, can be used for introspection.\n token (Token): XLA token to use to ensure correct execution order.\n If not given, a new token is generated.\n\n Returns:\n Tuple[DeviceArray, Token]:\n - Received data.\n - A new, modified token, that depends on this operation.\n\n \"\"\"\n if token is None:\n token = create_token(sendbuf)\n\n if comm is None:\n comm = get_default_comm()\n\n comm = wrap_as_hashable(comm)\n\n if status is not None:\n status = wrap_as_hashable(status)\n\n return tuple(\n mpi_sendrecv_p.bind(\n sendbuf,\n recvbuf,\n token,\n source=source,\n dest=dest,\n sendtag=sendtag,\n recvtag=recvtag,\n comm=comm,\n status=status,\n _must_transpose=False,\n )\n )\n\n\n# This function compiles the operation\n@translation_rule_cpu\ndef mpi_sendrecv_xla_encode_cpu(\n c,\n sendbuf,\n recvbuf,\n token,\n source,\n dest,\n sendtag,\n recvtag,\n comm,\n status,\n _must_transpose=False,\n):\n from ..xla_bridge.mpi_xla_bridge import MPI_STATUS_IGNORE_ADDR\n\n # when performing forward diff, the gradient will follow the sent message.\n # so if you do a sendrecv from rank 0 to 1, the gradient wrt the inputs of rank 0\n # will end up in rank 1.\\\n # it's maybe possible to fix this by, at the end of the calculation, bringing back\n # the gradient to the correct rank, but that would require some study.\n if _must_transpose:\n raise RuntimeError(\n \"sendrecv cannot be used with forward-mode (vjp) autodiff, because \"\n \"the gradient might be located on a different mpi rank than the \"\n \"desired one. Use reverse-mode (jvp) differentiation instead.\"\n )\n\n comm = unpack_hashable(comm)\n status = unpack_hashable(status)\n\n recv_shape = c.GetShape(recvbuf)\n recv_dtype = recv_shape.element_type()\n recv_dims = recv_shape.dimensions()\n\n # compute total number of elements in array\n recv_nitems = _np.prod(recv_dims, dtype=int)\n recv_dtype_handle = to_dtype_handle(recv_dtype)\n\n send_shape = c.GetShape(sendbuf)\n send_dtype = send_shape.element_type()\n send_dims = send_shape.dimensions()\n\n # compute total number of elements in array\n send_nitems = _np.prod(send_dims, dtype=int)\n send_dtype_handle = to_dtype_handle(send_dtype)\n\n sh = xla_client.Shape.tuple_shape(\n [\n xla_client.Shape.array_shape(recv_dtype, recv_dims),\n xla_client.Shape.token_shape(),\n ]\n )\n\n if status is None:\n status_ptr = _np.uintp(MPI_STATUS_IGNORE_ADDR)\n else:\n status_ptr = to_mpi_ptr(status)\n\n operands = (\n xla_constant_intc(c, send_nitems),\n sendbuf,\n xla_constant_intc(c, dest),\n xla_constant_intc(c, sendtag),\n xla_constant_uintptr(c, send_dtype_handle),\n xla_constant_intc(c, recv_nitems),\n xla_constant_intc(c, source),\n xla_constant_intc(c, recvtag),\n xla_constant_uintptr(c, recv_dtype_handle),\n xla_constant_uintptr(c, to_mpi_handle(comm)),\n xla_constant_uintptr(c, status_ptr),\n token,\n )\n\n return xla_client.ops.CustomCall(\n c,\n b\"mpi_sendrecv\",\n operands=operands,\n shape=sh,\n has_side_effect=True,\n )\n\n\n@translation_rule_gpu\ndef mpi_sendrecv_xla_encode_gpu(\n c,\n sendbuf,\n recvbuf,\n token,\n source,\n dest,\n sendtag,\n recvtag,\n comm,\n status,\n _must_transpose=False,\n):\n\n if _must_transpose:\n raise RuntimeError(\n \"sendrecv cannot be used with forward-mode (vjp) autodiff, because \"\n \"the gradient might be located on a different mpi rank than the \"\n \"desired one. Use reverse-mode (jvp) differentiation instead.\"\n )\n\n from ..xla_bridge.mpi_xla_bridge import MPI_STATUS_IGNORE_ADDR\n from ..xla_bridge.mpi_xla_bridge_gpu import build_sendrecv_descriptor\n\n comm = unpack_hashable(comm)\n status = unpack_hashable(status)\n\n recv_shape = c.GetShape(recvbuf)\n recv_dtype = recv_shape.element_type()\n recv_dims = recv_shape.dimensions()\n\n # compute total number of elements in recv array\n recv_nitems = _np.prod(recv_dims, dtype=int)\n recv_dtype_handle = to_dtype_handle(recv_dtype)\n\n send_shape = c.GetShape(sendbuf)\n send_dtype = send_shape.element_type()\n send_dims = send_shape.dimensions()\n\n # compute total number of elements in send array\n send_nitems = _np.prod(send_dims, dtype=int)\n send_dtype_handle = to_dtype_handle(send_dtype)\n\n sh = xla_client.Shape.tuple_shape(\n [\n xla_client.Shape.array_shape(recv_dtype, recv_dims),\n xla_client.Shape.token_shape(),\n ]\n )\n\n if status is None:\n status_ptr = _np.uintp(MPI_STATUS_IGNORE_ADDR)\n else:\n status_ptr = to_mpi_ptr(status)\n\n descriptor = build_sendrecv_descriptor(\n send_nitems,\n dest,\n sendtag,\n send_dtype_handle,\n recv_nitems,\n source,\n recvtag,\n recv_dtype_handle,\n to_mpi_handle(comm),\n status_ptr,\n )\n\n return xla_client.ops.CustomCall(\n c,\n b\"mpi_sendrecv\",\n operands=(\n sendbuf,\n token,\n ),\n shape=sh,\n opaque=descriptor,\n has_side_effect=True,\n )\n\n\n# This function evaluates only the shapes during AST construction\ndef mpi_sendrecv_abstract_eval(\n sendbuf,\n recvbuf,\n token,\n source,\n dest,\n sendtag,\n recvtag,\n comm,\n status,\n _must_transpose=False,\n):\n return (\n abstract_arrays.ShapedArray(recvbuf.shape, recvbuf.dtype),\n core.abstract_token,\n )\n\n\ndef mpi_sendrecv_batch_eval(\n in_args,\n batch_axes,\n source,\n dest,\n sendtag,\n recvtag,\n comm,\n status,\n _must_transpose=False,\n):\n\n sendbuf, recvbuf, token = in_args\n\n assert batch_axes[0] == batch_axes[1]\n\n res = mpi_sendrecv_p.bind(\n sendbuf,\n recvbuf,\n token,\n source=source,\n dest=dest,\n sendtag=sendtag,\n recvtag=recvtag,\n comm=comm,\n status=status,\n _must_transpose=_must_transpose,\n )\n return res, (batch_axes[0], batch_axes[2])\n\n\ndef mpi_sendrecv_value_and_jvp(\n in_args,\n tan_args,\n source,\n dest,\n sendtag,\n recvtag,\n comm,\n status,\n _must_transpose=False,\n):\n sendbuf, recvbuf, token = in_args\n send_tan, recv_tan, token_tan = tan_args\n\n val, token = mpi_sendrecv_p.bind(\n sendbuf,\n recvbuf,\n token,\n source=source,\n dest=dest,\n sendtag=sendtag,\n recvtag=recvtag,\n comm=comm,\n status=status,\n _must_transpose=_must_transpose,\n )\n\n # throw away return token to work around jax#6285\n jvp, token_jvp = mpi_sendrecv_p.bind(\n send_tan,\n recv_tan,\n token,\n source=source,\n dest=dest,\n sendtag=sendtag,\n recvtag=recvtag,\n comm=comm,\n status=status,\n _must_transpose=not _must_transpose,\n )\n\n return (val, token), (jvp, ad.Zero.from_value(token_jvp))\n\n\ndef mpi_sendrecv_transpose_rule(\n tan_args, *x_args, source, dest, sendtag, recvtag, comm, status, _must_transpose\n):\n _, _, token = x_args\n out_tan, token_tan = tan_args\n\n # swap the sender and receiver\n res, token = mpi_sendrecv_p.bind(\n out_tan,\n out_tan,\n token,\n source=dest,\n dest=source,\n sendtag=sendtag,\n recvtag=recvtag,\n comm=comm,\n status=status,\n _must_transpose=not _must_transpose,\n )\n return res, ad.Zero.from_value(res), token_tan\n\n\nmpi_sendrecv_p.multiple_results = True\nmpi_sendrecv_p.def_impl(mpi_sendrecv_impl)\nmpi_sendrecv_p.def_abstract_eval(mpi_sendrecv_abstract_eval)\n\nbatching.primitive_batchers[mpi_sendrecv_p] = mpi_sendrecv_batch_eval\n\nad.primitive_jvps[mpi_sendrecv_p] = mpi_sendrecv_value_and_jvp\nad.primitive_transposes[mpi_sendrecv_p] = mpi_sendrecv_transpose_rule\n\n# assign to the primitive the correct encoder\nxla.backend_specific_translations[\"cpu\"][mpi_sendrecv_p] = mpi_sendrecv_xla_encode_cpu\nxla.backend_specific_translations[\"gpu\"][mpi_sendrecv_p] = mpi_sendrecv_xla_encode_gpu\n"
] |
[
[
"numpy.uintp",
"numpy.prod"
]
] |
rajesh1226/fastestimator
|
[
"0765c7478c0889cf4e2841d51a35c9a06a406472"
] |
[
"fastestimator/xai/gradcam.py"
] |
[
"# Copyright 2019 The FastEstimator Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport math\nimport os\n\nimport cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\nfrom tf_explain.core.grad_cam import GradCAM\nfrom tf_explain.utils.display import heatmap_display\n\nfrom fastestimator.xai.util import show_image, show_text\nfrom fastestimator.util.util import decode_predictions\n\n\nclass FEGradCAM(GradCAM):\n def explain(self, model_input, model, layer_name, class_index, colormap=cv2.COLORMAP_INFERNO):\n \"\"\"\n Compute GradCAM for a specific class index.\n\n Args:\n model_input (tf.tensor): Data to perform the evaluation on.\n model (tf.keras.Model): tf.keras model to inspect\n layer_name (str): Targeted layer for GradCAM\n class_index (int, None): Index of targeted class\n colormap (int): Used in parent method signature, but ignored here\n\n Returns:\n tf.cams: The gradcams\n \"\"\"\n outputs, guided_grads, predictions = FEGradCAM.get_gradients_and_filters(model, model_input, layer_name,\n class_index)\n cams = GradCAM.generate_ponderated_output(outputs, guided_grads)\n\n input_min = tf.reduce_min(model_input)\n input_max = tf.reduce_max(model_input)\n\n # Need to move input image into the 0-255 range\n adjust_sum = 0.0\n adjust_factor = 1.0\n if input_min < 0:\n adjust_sum = 1.0\n adjust_factor /= 2.0\n if input_max <= 1:\n adjust_factor *= 255.0\n\n heatmaps = [\n heatmap_display(cam.numpy(), (inp.numpy() + adjust_sum) * adjust_factor, colormap) for cam,\n inp in zip(cams, model_input)\n ]\n\n return heatmaps, predictions\n\n @staticmethod\n @tf.function\n def get_gradients_and_filters(model, images, layer_name, class_index):\n \"\"\"\n Generate guided gradients and convolutional outputs with an inference.\n\n Args:\n model (tf.keras.Model): tf.keras model to inspect\n images (tf.tensor): 4D-Tensor with shape (batch_size, H, W, 3)\n layer_name (str): Targeted layer for GradCAM\n class_index (int, None): Index of targeted class. If None will explain the class the network predicted\n\n Returns:\n Tuple[tf.Tensor, tf.Tensor]: (Target layer outputs, Guided gradients)\n \"\"\"\n grad_model = tf.keras.models.Model([model.inputs], [model.get_layer(layer_name).output, model.output])\n\n with tf.GradientTape() as tape:\n conv_outputs, predictions = grad_model(images)\n if class_index is not None:\n loss = predictions[:, class_index]\n else:\n class_indices = tf.reshape(tf.argmax(predictions, 1, output_type='int64'), (images.shape[0], 1))\n row_indices = tf.reshape(tf.range(class_indices.shape[0], dtype='int64'), (class_indices.shape[0], 1))\n classes = tf.concat([row_indices, class_indices], 1)\n loss = tf.gather_nd(predictions, classes)\n\n grads = tape.gradient(loss, conv_outputs)\n\n guided_grads = (tf.cast(conv_outputs > 0, \"float32\") * tf.cast(grads > 0, \"float32\") * grads)\n\n return conv_outputs, guided_grads, predictions\n\n\ndef plot_gradcam(inputs, model, layer_id=None, target_class=None, decode_dictionary=None,\n colormap=cv2.COLORMAP_INFERNO):\n \"\"\"Creates a GradCam interpretation of the given input\n\n Args:\n inputs (tf.tensor): Model input, with batch along the zero axis\n model (tf.keras.model): tf.keras model to inspect\n layer_id (int, str, None): Which layer to inspect. Should be a convolutional layer. If None, the last \\\n acceptable layer from the model will be selected\n target_class (int, None): Which output class to try to explain. None will default to explaining the maximum \\\n likelihood prediction\n decode_dictionary (dict): A dictionary of \"class_idx\" -> \"class_name\" associations\n colormap (int): Which colormap to use when generating the heatmaps\n\n Returns:\n The matplotlib figure handle\n \"\"\"\n gradcam = FEGradCAM()\n if isinstance(layer_id, int):\n layer_id = model.layers[layer_id].name\n if layer_id is None:\n for layer in reversed(model.layers):\n if layer.output.shape.ndims == 4:\n layer_id = layer.name\n break\n\n heatmaps, predictions = gradcam.explain(model_input=inputs,\n model=model,\n layer_name=layer_id,\n class_index=target_class,\n colormap=colormap)\n\n decoded = decode_predictions(np.asarray(predictions), top=3, dictionary=decode_dictionary)\n\n num_rows = math.ceil(inputs.shape[0] / 2.0)\n num_cols = 6\n dpi = 96.0\n\n box_width = max(220, inputs.shape[2])\n box_height = max(220, inputs.shape[1])\n fig, axs = plt.subplots(num_rows, num_cols, figsize=(num_cols * (box_width / dpi), num_rows * (box_height / dpi)),\n dpi=dpi)\n if num_rows == 1:\n axs = [axs] # axs object not wrapped if there's only one row\n\n odd_cols = inputs.shape[0] % 2 == 1\n if odd_cols:\n axs[num_rows - 1][3].axis('off')\n axs[num_rows - 1][4].axis('off')\n axs[num_rows - 1][5].axis('off')\n\n for row in range(num_rows):\n for idx, cols in enumerate(((0, 1, 2), (3, 4, 5))):\n if row == num_rows - 1 and idx == 1 and odd_cols:\n break\n show_text(np.ones_like(inputs[2 * row + idx]),\n decoded[2 * row + idx],\n axis=axs[row][cols[0]],\n title=\"Predictions\" if row == 0 else None)\n show_image(inputs[2 * row + idx], axis=axs[row][cols[1]], title=\"Raw\" if row == 0 else None)\n show_image(heatmaps[2 * row + idx], axis=axs[row][cols[2]], title=\"GradCam\" if row == 0 else None)\n\n plt.subplots_adjust(top=0.95, bottom=0.01, left=0.01, right=0.99, hspace=0.03, wspace=0.03)\n\n return fig\n\n\ndef visualize_gradcam(inputs,\n model,\n layer_id=None,\n target_class=None,\n decode_dictionary=None,\n colormap=cv2.COLORMAP_INFERNO,\n save_path='.'):\n \"\"\"Displays or saves a GradCam interpretation of the given input\n\n Args:\n inputs (tf.tensor): Model input, with batch along the zero axis\n model (tf.keras.model): tf.keras model to inspect\n layer_id (int, str, None): Which layer to inspect. Should be a convolutional layer. If None, the last \\\n acceptable layer from the model will be selected\n target_class (int, None): Which output class to try to explain. None will default to explaining the maximum \\\n likelihood prediction\n decode_dictionary (dict): A dictionary of \"class_idx\" -> \"class_name\" associations\n colormap (int): Which colormap to use when generating the heatmaps\n save_path (str, None): Where to save the image. If None then the image will be displayed instead\n \"\"\"\n plot_gradcam(inputs=inputs,\n model=model,\n layer_id=layer_id,\n target_class=target_class,\n decode_dictionary=decode_dictionary,\n colormap=colormap)\n if save_path is None:\n plt.show()\n else:\n save_path = os.path.dirname(save_path)\n if save_path == \"\":\n save_path = \".\"\n os.makedirs(save_path, exist_ok=True)\n save_file = os.path.join(save_path, 'gradCam.png')\n print(\"Saving to {}\".format(save_file))\n plt.savefig(save_file, dpi=300, bbox_inches=\"tight\")\n"
] |
[
[
"tensorflow.reduce_max",
"tensorflow.concat",
"tensorflow.gather_nd",
"numpy.ones_like",
"tensorflow.range",
"numpy.asarray",
"tensorflow.cast",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"tensorflow.reduce_min",
"matplotlib.pyplot.subplots_adjust",
"tensorflow.argmax",
"matplotlib.pyplot.show",
"tensorflow.GradientTape"
]
] |
robinzixuan/chatbot
|
[
"f71eb2a0972563e81fd32d90bb764e91432dad27"
] |
[
"nlp_chatbot/metrics.py"
] |
[
"from . import PAD_INDEX\n\nimport numpy as np\n\n\ndef mlm_accuracy(predictions, targets):\n mlm_predictions, nsp_predictions = predictions\n mlm_targets, is_nexts = targets\n\n relevent_indexes = np.where(mlm_targets != PAD_INDEX)\n relevent_predictions = mlm_predictions[relevent_indexes]\n relevent_targets = mlm_targets[relevent_indexes]\n\n corrects = np.equal(relevent_predictions, relevent_targets)\n return corrects.mean()\n\n\ndef nsp_accuracy(predictions, targets):\n mlm_predictions, nsp_predictions = predictions\n mlm_targets, is_nexts = targets\n\n corrects = np.equal(nsp_predictions, is_nexts)\n return corrects.mean()\n\n\ndef classification_accuracy(predictions, targets):\n corrects = np.equal(predictions, targets)\n return corrects.mean()\n"
] |
[
[
"numpy.where",
"numpy.equal"
]
] |
delta2323/chainerchem
|
[
"364dd2b26aec2d0b25d5e2b30a9510a9d44814af"
] |
[
"chainer_chemistry/dataset/parsers/sdf_file_parser.py"
] |
[
"from logging import getLogger\nimport numpy\nfrom rdkit import Chem\nfrom tqdm import tqdm\n\nfrom chainer_chemistry.dataset.parsers.base_parser import BaseFileParser\nfrom chainer_chemistry.dataset.preprocessors.common import MolFeatureExtractionError # NOQA\nfrom chainer_chemistry.dataset.preprocessors.mol_preprocessor import MolPreprocessor # NOQA\nfrom chainer_chemistry.datasets.numpy_tuple_dataset import NumpyTupleDataset\n\n\nclass SDFFileParser(BaseFileParser):\n \"\"\"sdf file parser\n\n Args:\n filepath:\n preprocessor:\n labels (str or list): labels column\n postprocess_label (Callable): post processing function if necessary\n postprocess_fn (Callable): post processing function if necessary\n \"\"\"\n\n def __init__(self, preprocessor, labels=None, postprocess_label=None,\n postprocess_fn=None):\n super(SDFFileParser, self).__init__(preprocessor)\n self.labels = labels\n self.postprocess_label = postprocess_label\n self.postprocess_fn = postprocess_fn\n self.smiles = None\n\n def parse(self, filepath, retain_smiles=False):\n \"\"\"parse sdf file using `preprocessor`\n\n Note that label is extracted from preprocessor's method.\n\n Args:\n filepath (str): file path to be parsed.\n retain_smiles (bool): If set to True, smiles list is saved to\n `smiles` property.\n\n Returns: Dataset\n\n \"\"\"\n logger = getLogger(__name__)\n pp = self.preprocessor\n if retain_smiles:\n self.smiles = [] # Initialize\n\n if isinstance(pp, MolPreprocessor):\n mol_supplier = Chem.SDMolSupplier(filepath)\n\n features = None\n\n total_count = len(mol_supplier)\n fail_count = 0\n success_count = 0\n for mol in tqdm(mol_supplier):\n if mol is None:\n total_count -= 1\n continue\n try:\n # Labels need to be extracted from `mol` before standardize\n # smiles.\n if self.labels is not None:\n label = pp.get_label(mol, self.labels)\n if self.postprocess_label is not None:\n label = self.postprocess_label(label)\n\n # Note that smiles expression is not unique.\n # we should re-obtain smiles from `mol`, so that the\n # smiles order does not contradict with input features'\n # order.\n # Here, `smiles` and `standardized_smiles` expresses\n # same molecule, but the expression may be different!\n smiles = Chem.MolToSmiles(mol)\n mol = Chem.MolFromSmiles(smiles)\n standardized_smiles, mol = pp.prepare_smiles_and_mol(mol)\n input_features = pp.get_input_features(mol)\n\n # Initialize features: list of list\n if features is None:\n if isinstance(input_features, tuple):\n num_features = len(input_features)\n else:\n num_features = 1\n if self.labels is not None:\n num_features += 1\n features = [[] for _ in range(num_features)]\n\n if retain_smiles:\n assert standardized_smiles == Chem.MolToSmiles(mol)\n self.smiles.append(standardized_smiles)\n except MolFeatureExtractionError as e:\n # This is expected error that extracting feature failed,\n # skip this molecule.\n fail_count += 1\n continue\n except Exception as e:\n logger = getLogger(__name__)\n logger.warning('parse() error, type: {}, {}'\n .format(type(e).__name__, e.args))\n continue\n\n if isinstance(input_features, tuple):\n for i in range(len(input_features)):\n features[i].append(input_features[i])\n else:\n features[0].append(input_features)\n if self.labels is not None:\n features[len(features) - 1].append(label)\n success_count += 1\n\n ret = []\n\n for feature in features:\n try:\n feat_array = numpy.asarray(feature)\n except ValueError:\n # Temporal work around to convert object-type list into\n # numpy array.\n # See, https://goo.gl/kgJXwb\n feat_array = numpy.empty(len(feature), dtype=numpy.ndarray)\n feat_array[:] = feature[:]\n ret.append(feat_array)\n result = tuple(ret)\n logger.info('Preprocess finished. FAIL {}, SUCCESS {}, TOTAL {}'\n .format(fail_count, success_count, total_count))\n else:\n # Spec not finalized yet for general case\n result = pp.process(filepath)\n\n if isinstance(result, tuple):\n if self.postprocess_fn is not None:\n result = self.postprocess_fn(*result)\n return NumpyTupleDataset(*result)\n else:\n if self.postprocess_fn is not None:\n result = self.postprocess_fn(result)\n return NumpyTupleDataset(result)\n"
] |
[
[
"numpy.asarray"
]
] |
SriSatyaT/jina
|
[
"c687fac6dbe4c9c6bd02dcdcdfaee28458222cd7"
] |
[
"jina/types/document/__init__.py"
] |
[
"import base64\nimport io\nimport json\nimport mimetypes\nimport os\nimport urllib.parse\nimport urllib.request\nimport warnings\nfrom hashlib import blake2b\nfrom typing import (\n Iterable,\n Union,\n Dict,\n Optional,\n TypeVar,\n Any,\n Tuple,\n List,\n Type,\n)\n\nimport numpy as np\nfrom google.protobuf import json_format\nfrom google.protobuf.field_mask_pb2 import FieldMask\n\nfrom .converters import png_to_buffer, to_datauri, to_image_blob\nfrom ..mixin import ProtoTypeMixin\nfrom ..ndarray.generic import NdArray, BaseSparseNdArray\nfrom ..score import NamedScore\nfrom ..score.map import NamedScoreMapping\nfrom ..struct import StructView\nfrom ...excepts import BadDocType\nfrom ...helper import (\n typename,\n random_identity,\n download_mermaid_url,\n dunder_get,\n)\nfrom ...importer import ImportExtensions\nfrom ...logging.predefined import default_logger\nfrom ...proto import jina_pb2\n\nif False:\n from ..arrays.chunk import ChunkArray\n from ..arrays.match import MatchArray\n\n from scipy.sparse import coo_matrix\n\n # fix type-hint complain for sphinx and flake\n import scipy\n import tensorflow as tf\n import torch\n\n ArrayType = TypeVar(\n 'ArrayType',\n np.ndarray,\n scipy.sparse.csr_matrix,\n scipy.sparse.coo_matrix,\n scipy.sparse.bsr_matrix,\n scipy.sparse.csc_matrix,\n torch.sparse_coo_tensor,\n tf.SparseTensor,\n )\n\n SparseArrayType = TypeVar(\n 'SparseArrayType',\n np.ndarray,\n scipy.sparse.csr_matrix,\n scipy.sparse.coo_matrix,\n scipy.sparse.bsr_matrix,\n scipy.sparse.csc_matrix,\n torch.sparse_coo_tensor,\n tf.SparseTensor,\n )\n\n__all__ = ['Document', 'DocumentContentType', 'DocumentSourceType']\nDIGEST_SIZE = 8\n\n# This list is not exhaustive because we cannot add the `sparse` types without adding the `dependencies`\nDocumentContentType = TypeVar('DocumentContentType', bytes, str, 'ArrayType')\nDocumentSourceType = TypeVar(\n 'DocumentSourceType', jina_pb2.DocumentProto, bytes, str, Dict, 'Document'\n)\n\n_all_mime_types = set(mimetypes.types_map.values())\n\n_all_doc_content_keys = ('content', 'uri', 'blob', 'text', 'buffer')\n_all_doc_array_keys = ('blob', 'embedding')\n_special_mapped_keys = ('scores', 'evaluations')\n\n\nclass Document(ProtoTypeMixin):\n \"\"\"\n :class:`Document` is one of the **primitive data type** in Jina.\n\n It offers a Pythonic interface to allow users access and manipulate\n :class:`jina.jina_pb2.DocumentProto` object without working with Protobuf itself.\n\n To create a :class:`Document` object, simply:\n\n .. highlight:: python\n .. code-block:: python\n\n from jina import Document\n d = Document()\n d.text = 'abc'\n\n Jina requires each Document to have a string id. You can set a custom one,\n or if non has been set a random one will be assigned.\n\n Or you can use :class:`Document` as a context manager:\n\n .. highlight:: python\n .. code-block:: python\n\n with Document() as d:\n d.text = 'hello'\n\n assert d.id # now `id` has value\n\n To access and modify the content of the document, you can use :attr:`text`, :attr:`blob`, and :attr:`buffer`.\n Each property is implemented with proper setter, to improve the integrity and user experience. For example,\n assigning ``doc.blob`` or ``doc.embedding`` can be simply done via:\n\n .. highlight:: python\n .. code-block:: python\n\n import numpy as np\n\n # to set as content\n d.content = np.random.random([10, 5])\n\n # to set as embedding\n d.embedding = np.random.random([10, 5])\n\n MIME type is auto set/guessed when setting :attr:`content` and :attr:`uri`\n\n :class:`Document` also provides multiple way to build from existing Document. You can build :class:`Document`\n from ``jina_pb2.DocumentProto``, ``bytes``, ``str``, and ``Dict``. You can also use it as view (i.e.\n weak reference when building from an existing ``jina_pb2.DocumentProto``). For example,\n\n .. highlight:: python\n .. code-block:: python\n\n a = DocumentProto()\n b = Document(a, copy=False)\n a.text = 'hello'\n assert b.text == 'hello'\n\n You can leverage the :meth:`convert_a_to_b` interface to convert between content forms.\n\n \"\"\"\n\n def __init__(\n self,\n document: Optional[DocumentSourceType] = None,\n field_resolver: Dict[str, str] = None,\n copy: bool = False,\n hash_content: bool = True,\n **kwargs,\n ):\n \"\"\"\n :param document: the document to construct from. If ``bytes`` is given\n then deserialize a :class:`DocumentProto`; ``dict`` is given then\n parse a :class:`DocumentProto` from it; ``str`` is given, then consider\n it as a JSON string and parse a :class:`DocumentProto` from it; finally,\n one can also give `DocumentProto` directly, then depending on the ``copy``,\n it builds a view or a copy from it.\n :param copy: when ``document`` is given as a :class:`DocumentProto` object, build a\n view (i.e. weak reference) from it or a deep copy from it.\n :param field_resolver: a map from field names defined in ``document`` (JSON, dict) to the field\n names defined in Protobuf. This is only used when the given ``document`` is\n a JSON string or a Python dict.\n :param kwargs: other parameters to be set _after_ the document is constructed\n :param hash_content: whether to hash the content of the Document\n\n .. note::\n\n When ``document`` is a JSON string or Python dictionary object, the constructor will only map the values\n from known fields defined in Protobuf, all unknown fields are mapped to ``document.tags``. For example,\n\n .. highlight:: python\n .. code-block:: python\n\n d = Document({'id': '123', 'hello': 'world', 'tags': {'good': 'bye'}})\n\n assert d.id == '123' # true\n assert d.tags['hello'] == 'world' # true\n assert d.tags['good'] == 'bye' # true\n \"\"\"\n self._pb_body = jina_pb2.DocumentProto()\n try:\n if isinstance(document, jina_pb2.DocumentProto):\n if copy:\n self._pb_body.CopyFrom(document)\n else:\n self._pb_body = document\n elif isinstance(document, (dict, str)):\n if isinstance(document, str):\n document = json.loads(document)\n\n def _update_doc(d: Dict):\n for key in _all_doc_array_keys:\n if key in d:\n value = d[key]\n if isinstance(value, list):\n d[key] = NdArray(np.array(d[key])).dict()\n if 'chunks' in d:\n for chunk in d['chunks']:\n _update_doc(chunk)\n if 'matches' in d:\n for match in d['matches']:\n _update_doc(match)\n\n _update_doc(document)\n\n if field_resolver:\n document = {\n field_resolver.get(k, k): v for k, v in document.items()\n }\n\n user_fields = set(document.keys())\n support_fields = set(\n self.attributes(\n include_proto_fields_camelcase=True, include_properties=False\n )\n )\n\n if support_fields.issuperset(user_fields):\n json_format.ParseDict(document, self._pb_body)\n else:\n _intersect = support_fields.intersection(user_fields)\n _remainder = user_fields.difference(_intersect)\n if _intersect:\n json_format.ParseDict(\n {k: document[k] for k in _intersect}, self._pb_body\n )\n if _remainder:\n support_prop = set(\n self.attributes(\n include_proto_fields=False, include_properties=True\n )\n )\n _intersect2 = support_prop.intersection(_remainder)\n _remainder2 = _remainder.difference(_intersect2)\n\n if _intersect2:\n self.set_attributes(**{p: document[p] for p in _intersect2})\n\n if _remainder2:\n self._pb_body.tags.update(\n {k: document[k] for k in _remainder}\n )\n elif isinstance(document, bytes):\n # directly parsing from binary string gives large false-positive\n # fortunately protobuf throws a warning when the parsing seems go wrong\n # the context manager below converts this warning into exception and throw it\n # properly\n with warnings.catch_warnings():\n warnings.filterwarnings(\n 'error', 'Unexpected end-group tag', category=RuntimeWarning\n )\n try:\n self._pb_body.ParseFromString(document)\n except RuntimeWarning as ex:\n raise BadDocType(\n f'fail to construct a document from {document}'\n ) from ex\n elif isinstance(document, Document):\n if copy:\n self._pb_body.CopyFrom(document.proto)\n else:\n self._pb_body = document.proto\n elif document is not None:\n # note ``None`` is not considered as a bad type\n raise ValueError(f'{typename(document)} is not recognizable')\n except Exception as ex:\n raise BadDocType(\n f'fail to construct a document from {document}, '\n f'if you are trying to set the content '\n f'you may use \"Document(content=your_content)\"'\n ) from ex\n\n if self._pb_body.id is None or not self._pb_body.id:\n self.id = random_identity(use_uuid1=True)\n\n # check if there are mutually exclusive content fields\n if _contains_conflicting_content(**kwargs):\n raise ValueError(\n f'Document content fields are mutually exclusive, please provide only one of {_all_doc_content_keys}'\n )\n self.set_attributes(**kwargs)\n self._mermaid_id = random_identity() #: for mermaid visualize id\n if hash_content and not copy:\n self.update_content_hash()\n\n def pop(self, *fields) -> None:\n \"\"\"Remove the values from the given fields of this Document.\n\n :param fields: field names\n \"\"\"\n for k in fields:\n self._pb_body.ClearField(k)\n\n def clear(self) -> None:\n \"\"\"Remove all values from all fields of this Document.\"\"\"\n self._pb_body.Clear()\n\n @property\n def weight(self) -> float:\n \"\"\"\n :return: the weight of the document\n \"\"\"\n return self._pb_body.weight\n\n @weight.setter\n def weight(self, value: float):\n \"\"\"\n Set the weight of the document.\n\n :param value: the float weight of the document.\n \"\"\"\n self._pb_body.weight = value\n\n @property\n def modality(self) -> str:\n \"\"\"\n :return: the modality of the document.\"\"\"\n return self._pb_body.modality\n\n @modality.setter\n def modality(self, value: str):\n \"\"\"Set the modality of the document.\n\n :param value: The modality of the document\n \"\"\"\n self._pb_body.modality = value\n\n @property\n def content_hash(self):\n \"\"\"Get the content hash of the document.\n\n :return: the content_hash from the proto\n \"\"\"\n return self._pb_body.content_hash\n\n @property\n def tags(self) -> StructView:\n \"\"\"Return the `tags` field of this Document as a Python dict\n\n :return: a Python dict view of the tags.\n \"\"\"\n return StructView(self._pb_body.tags)\n\n @tags.setter\n def tags(self, value: Union[Dict, StructView]):\n \"\"\"Set the `tags` field of this Document to a Python dict\n\n :param value: a Python dict or a StructView\n \"\"\"\n if isinstance(value, StructView):\n self._pb_body.tags.Clear()\n self._pb_body.tags.update(value._pb_body)\n elif isinstance(value, dict):\n self._pb_body.tags.Clear()\n self._pb_body.tags.update(value)\n else:\n raise TypeError(f'{value!r} is not supported.')\n\n def _update(\n self,\n source: 'Document',\n destination: 'Document',\n fields: Optional[List[str]] = None,\n ) -> None:\n \"\"\"Merge fields specified in ``fields`` from source to destination.\n\n :param source: source :class:`Document` object.\n :param destination: the destination :class:`Document` object to be merged into.\n :param fields: a list of field names that included from destination document\n\n .. note::\n *. if ``fields`` is empty, then destination is overridden by the source completely.\n *. ``destination`` will be modified in place, ``source`` will be unchanged.\n *. the ``fields`` has value in destination while not in source will be preserved.\n \"\"\"\n # We do a safe update: only update existent (value being set) fields from source.\n fields_can_be_updated = []\n # ListFields returns a list of (FieldDescriptor, value) tuples for present fields.\n present_fields = source._pb_body.ListFields()\n for field_descriptor, _ in present_fields:\n fields_can_be_updated.append(field_descriptor.name)\n if not fields:\n fields = fields_can_be_updated # if `fields` empty, update all fields.\n for field in fields:\n if (\n field == 'tags'\n ): # For the tags, stay consistent with the python update method.\n destination._pb_body.tags.update(source.tags)\n else:\n destination._pb_body.ClearField(field)\n try:\n setattr(destination, field, getattr(source, field))\n except AttributeError: # some fields such as `content_hash` do not have a setter method.\n setattr(destination._pb_body, field, getattr(source, field))\n\n def update(\n self,\n source: 'Document',\n fields: Optional[List[str]] = None,\n ) -> None:\n \"\"\"Updates fields specified in ``fields`` from the source to current Document.\n\n :param source: source :class:`Document` object.\n :param fields: a list of field names that included from the current document,\n if not specified, merge all fields.\n\n .. note::\n *. ``destination`` will be modified in place, ``source`` will be unchanged\n \"\"\"\n if fields and not isinstance(fields, list):\n raise TypeError('Parameter `fields` must be list of str')\n self._update(\n source,\n self,\n fields=fields,\n )\n\n def update_content_hash(\n self,\n exclude_fields: Optional[Tuple[str]] = (\n 'id',\n 'chunks',\n 'matches',\n 'content_hash',\n 'parent_id',\n ),\n include_fields: Optional[Tuple[str]] = None,\n ) -> None:\n \"\"\"Update the document hash according to its content.\n\n :param exclude_fields: a tuple of field names that excluded when computing content hash\n :param include_fields: a tuple of field names that included when computing content hash\n\n .. note::\n \"exclude_fields\" and \"include_fields\" are mutually exclusive, use one only\n \"\"\"\n masked_d = jina_pb2.DocumentProto()\n masked_d.CopyFrom(self._pb_body)\n empty_doc = jina_pb2.DocumentProto()\n if include_fields and exclude_fields:\n raise ValueError(\n '\"exclude_fields\" and \"exclude_fields\" are mutually exclusive, use one only'\n )\n\n if include_fields is not None:\n FieldMask(paths=include_fields).MergeMessage(masked_d, empty_doc)\n masked_d = empty_doc\n elif exclude_fields is not None:\n FieldMask(paths=exclude_fields).MergeMessage(\n empty_doc, masked_d, replace_repeated_field=True\n )\n\n self._pb_body.content_hash = blake2b(\n masked_d.SerializeToString(), digest_size=DIGEST_SIZE\n ).hexdigest()\n\n @property\n def id(self) -> str:\n \"\"\"The document id in hex string, for non-binary environment such as HTTP, CLI, HTML and also human-readable.\n it will be used as the major view.\n\n :return: the id from the proto\n \"\"\"\n return self._pb_body.id\n\n @property\n def parent_id(self) -> str:\n \"\"\"The document's parent id in hex string, for non-binary environment such as HTTP, CLI, HTML and also human-readable.\n it will be used as the major view.\n\n :return: the parent id from the proto\n \"\"\"\n return self._pb_body.parent_id\n\n @id.setter\n def id(self, value: Union[bytes, str, int]):\n \"\"\"Set document id to a string value.\n\n :param value: id as bytes, int or str\n \"\"\"\n self._pb_body.id = str(value)\n\n @parent_id.setter\n def parent_id(self, value: Union[bytes, str, int]):\n \"\"\"Set document's parent id to a string value.\n\n :param value: id as bytes, int or str\n \"\"\"\n self._pb_body.parent_id = str(value)\n\n @property\n def blob(self) -> 'ArrayType':\n \"\"\"Return ``blob``, one of the content form of a Document.\n\n .. note::\n Use :attr:`content` to return the content of a Document\n\n This property will return the `blob` of the `Document` as a `Dense` or `Sparse` array depending on the actual\n proto instance stored. In the case where the `blob` stored is sparse, it will return them as a `coo` matrix.\n If any other type of `sparse` type is desired, use the `:meth:`get_sparse_blob`.\n\n :return: the blob content from the proto\n \"\"\"\n return NdArray(self._pb_body.blob).value\n\n def get_sparse_blob(\n self, sparse_ndarray_cls_type: Type[BaseSparseNdArray], **kwargs\n ) -> 'SparseArrayType':\n \"\"\"Return ``blob`` of the content of a Document as an sparse array.\n\n :param sparse_ndarray_cls_type: Sparse class type, such as `SparseNdArray`.\n :param kwargs: Additional key value argument, for `scipy` backend, we need to set\n the keyword `sp_format` as one of the scipy supported sparse format, such as `coo`\n or `csr`.\n :return: the blob from the proto as an sparse array\n \"\"\"\n return NdArray(\n self._pb_body.blob,\n sparse_cls=sparse_ndarray_cls_type,\n is_sparse=True,\n **kwargs,\n ).value\n\n @blob.setter\n def blob(self, value: Union['ArrayType', 'jina_pb2.NdArrayProto', 'NdArray']):\n \"\"\"Set the `blob` to :param:`value`.\n\n :param value: the array value to set the blob\n \"\"\"\n self._update_ndarray('blob', value)\n\n @property\n def embedding(self) -> 'SparseArrayType':\n \"\"\"Return ``embedding`` of the content of a Document.\n\n .. note::\n This property will return the `embedding` of the `Document` as a `Dense` or `Sparse` array depending on the actual\n proto instance stored. In the case where the `embedding` stored is sparse, it will return them as a `coo` matrix.\n If any other type of `sparse` type is desired, use the `:meth:`get_sparse_embedding`.\n\n :return: the embedding from the proto\n \"\"\"\n return NdArray(self._pb_body.embedding).value\n\n def get_sparse_embedding(\n self, sparse_ndarray_cls_type: Type[BaseSparseNdArray], **kwargs\n ) -> 'SparseArrayType':\n \"\"\"Return ``embedding`` of the content of a Document as an sparse array.\n\n :param sparse_ndarray_cls_type: Sparse class type, such as `SparseNdArray`.\n :param kwargs: Additional key value argument, for `scipy` backend, we need to set\n the keyword `sp_format` as one of the scipy supported sparse format, such as `coo`\n or `csr`.\n :return: the embedding from the proto as an sparse array\n \"\"\"\n return NdArray(\n self._pb_body.embedding,\n sparse_cls=sparse_ndarray_cls_type,\n is_sparse=True,\n **kwargs,\n ).value\n\n @embedding.setter\n def embedding(self, value: Union['ArrayType', 'jina_pb2.NdArrayProto', 'NdArray']):\n \"\"\"Set the ``embedding`` of the content of a Document.\n\n :param value: the array value to set the embedding\n \"\"\"\n self._update_ndarray('embedding', value)\n\n def _update_sparse_ndarray(self, k, v, sparse_cls):\n NdArray(\n is_sparse=True,\n sparse_cls=sparse_cls,\n proto=getattr(self._pb_body, k),\n ).value = v\n\n @staticmethod\n def _check_installed_array_packages():\n from ... import JINA_GLOBAL\n\n if JINA_GLOBAL.scipy_installed is None:\n JINA_GLOBAL.scipy_installed = False\n with ImportExtensions(required=False, pkg_name='scipy'):\n import scipy\n\n JINA_GLOBAL.scipy_installed = True\n\n if JINA_GLOBAL.tensorflow_installed is None:\n JINA_GLOBAL.tensorflow_installed = False\n with ImportExtensions(required=False, pkg_name='tensorflow'):\n import tensorflow\n\n JINA_GLOBAL.tensorflow_installed = True\n\n if JINA_GLOBAL.torch_installed is None:\n JINA_GLOBAL.torch_installed = False\n with ImportExtensions(required=False, pkg_name='torch'):\n import torch\n\n JINA_GLOBAL.torch_installed = True\n\n def _update_if_sparse(self, k, v):\n\n from ... import JINA_GLOBAL\n\n v_valid_sparse_type = False\n Document._check_installed_array_packages()\n\n if JINA_GLOBAL.scipy_installed:\n import scipy\n\n if scipy.sparse.issparse(v):\n from ..ndarray.sparse.scipy import SparseNdArray\n\n self._update_sparse_ndarray(k=k, v=v, sparse_cls=SparseNdArray)\n v_valid_sparse_type = True\n\n if JINA_GLOBAL.tensorflow_installed:\n import tensorflow\n\n if isinstance(v, tensorflow.SparseTensor):\n from ..ndarray.sparse.tensorflow import SparseNdArray\n\n self._update_sparse_ndarray(k=k, v=v, sparse_cls=SparseNdArray)\n v_valid_sparse_type = True\n\n if JINA_GLOBAL.torch_installed:\n import torch\n\n if isinstance(v, torch.Tensor) and v.is_sparse:\n from ..ndarray.sparse.pytorch import SparseNdArray\n\n self._update_sparse_ndarray(k=k, v=v, sparse_cls=SparseNdArray)\n v_valid_sparse_type = True\n\n return v_valid_sparse_type\n\n def _update_ndarray(self, k, v):\n if isinstance(v, jina_pb2.NdArrayProto):\n getattr(self._pb_body, k).CopyFrom(v)\n elif isinstance(v, np.ndarray):\n NdArray(getattr(self._pb_body, k)).value = v\n elif isinstance(v, NdArray):\n NdArray(getattr(self._pb_body, k)).is_sparse = v.is_sparse\n NdArray(getattr(self._pb_body, k)).value = v.value\n else:\n v_valid_sparse_type = self._update_if_sparse(k, v)\n\n if not v_valid_sparse_type:\n raise TypeError(f'{k} is in unsupported type {typename(v)}')\n\n @property\n def matches(self) -> 'MatchArray':\n \"\"\"Get all matches of the current document.\n\n :return: the array of matches attached to this document\n \"\"\"\n # Problem with cyclic dependency\n from ..arrays.match import MatchArray\n\n return MatchArray(self._pb_body.matches, reference_doc=self)\n\n @matches.setter\n def matches(self, value: Iterable['Document']):\n \"\"\"Get all chunks of the current document.\n\n :param value: value to set\n \"\"\"\n self.pop('matches')\n self.matches.extend(value)\n\n @property\n def chunks(self) -> 'ChunkArray':\n \"\"\"Get all chunks of the current document.\n\n :return: the array of chunks of this document\n \"\"\"\n # Problem with cyclic dependency\n from ..arrays.chunk import ChunkArray\n\n return ChunkArray(self._pb_body.chunks, reference_doc=self)\n\n @chunks.setter\n def chunks(self, value: Iterable['Document']):\n \"\"\"Get all chunks of the current document.\n\n :param value: the array of chunks of this document\n \"\"\"\n self.pop('chunks')\n self.chunks.extend(value)\n\n def set_attributes(self, **kwargs):\n \"\"\"Bulk update Document fields with key-value specified in kwargs\n\n .. seealso::\n :meth:`get_attrs` for bulk get attributes\n\n :param kwargs: the keyword arguments to set the values, where the keys are the fields to set\n \"\"\"\n for k, v in kwargs.items():\n if isinstance(v, (list, tuple)):\n if k == 'chunks':\n self.chunks.extend(v)\n elif k == 'matches':\n self.matches.extend(v)\n else:\n self._pb_body.ClearField(k)\n getattr(self._pb_body, k).extend(v)\n elif isinstance(v, dict) and k not in _special_mapped_keys:\n self._pb_body.ClearField(k)\n getattr(self._pb_body, k).update(v)\n else:\n if (\n hasattr(Document, k)\n and isinstance(getattr(Document, k), property)\n and getattr(Document, k).fset\n ):\n # if class property has a setter\n setattr(self, k, v)\n elif hasattr(self._pb_body, k):\n # no property setter, but proto has this attribute so fallback to proto\n setattr(self._pb_body, k, v)\n else:\n raise AttributeError(f'{k} is not recognized')\n\n def get_attributes(self, *fields: str) -> Union[Any, List[Any]]:\n \"\"\"Bulk fetch Document fields and return a list of the values of these fields\n\n .. note::\n Arguments will be extracted using `dunder_get`\n .. highlight:: python\n .. code-block:: python\n\n d = Document({'id': '123', 'hello': 'world', 'tags': {'id': 'external_id', 'good': 'bye'}})\n\n assert d.id == '123' # true\n assert d.tags['hello'] == 'world' # true\n assert d.tags['good'] == 'bye' # true\n assert d.tags['id'] == 'external_id' # true\n\n res = d.get_attrs_values(*['id', 'tags__hello', 'tags__good', 'tags__id'])\n\n assert res == ['123', 'world', 'bye', 'external_id']\n\n :param fields: the variable length values to extract from the document\n :return: a list with the attributes of this document ordered as the args\n \"\"\"\n\n ret = []\n for k in fields:\n try:\n value = getattr(self, k)\n\n if value is None:\n raise ValueError\n\n ret.append(value)\n except (AttributeError, ValueError):\n default_logger.warning(\n f'Could not get attribute `{typename(self)}.{k}`, returning `None`'\n )\n ret.append(None)\n\n # unboxing if args is single\n if len(fields) == 1:\n ret = ret[0]\n\n return ret\n\n @property\n def buffer(self) -> bytes:\n \"\"\"Return ``buffer``, one of the content form of a Document.\n\n .. note::\n Use :attr:`content` to return the content of a Document\n\n :return: the buffer bytes from this document\n \"\"\"\n return self._pb_body.buffer\n\n @buffer.setter\n def buffer(self, value: bytes):\n \"\"\"Set the ``buffer`` to :param:`value`.\n\n :param value: the bytes value to set the buffer\n \"\"\"\n self._pb_body.buffer = value\n\n @property\n def text(self):\n \"\"\"Return ``text``, one of the content form of a Document.\n\n .. note::\n Use :attr:`content` to return the content of a Document\n\n :return: the text from this document content\n \"\"\"\n return self._pb_body.text\n\n @text.setter\n def text(self, value: str):\n \"\"\"Set the `text` to :param:`value`\n\n :param value: the text value to set as content\n \"\"\"\n self._pb_body.text = value\n self.mime_type = 'text/plain'\n\n @property\n def uri(self) -> str:\n \"\"\"Return the URI of the document.\n\n :return: the uri from this document proto\n \"\"\"\n return self._pb_body.uri\n\n @uri.setter\n def uri(self, value: str):\n \"\"\"Set the URI of the document.\n\n .. note::\n :attr:`mime_type` will be updated accordingly\n\n :param value: acceptable URI/URL, raise ``ValueError`` when it is not a valid URI\n \"\"\"\n self._pb_body.uri = value\n mime_type = mimetypes.guess_type(value)[0]\n if mime_type:\n self.mime_type = mime_type # Remote http/https contents mime_type will not be recognized.\n\n @property\n def mime_type(self) -> str:\n \"\"\"Get MIME type of the document\n\n :return: the mime_type from this document proto\n \"\"\"\n return self._pb_body.mime_type\n\n @mime_type.setter\n def mime_type(self, value: str):\n \"\"\"Set MIME type of the document\n\n :param value: the acceptable MIME type, raise ``ValueError`` when MIME type is not\n recognizable.\n \"\"\"\n if value in _all_mime_types:\n self._pb_body.mime_type = value\n elif value:\n # given but not recognizable, do best guess\n r = mimetypes.guess_type(f'*.{value}')[0]\n if r:\n self._pb_body.mime_type = r\n else:\n self._pb_body.mime_type = value\n\n def __enter__(self):\n return self\n\n def __eq__(self, other):\n return self.proto == other.proto\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.update_content_hash()\n\n @property\n def content_type(self) -> str:\n \"\"\"Return the content type of the document, possible values: text, blob, buffer\n\n :return: the type of content present in this document proto\n \"\"\"\n return self._pb_body.WhichOneof('content')\n\n @property\n def content(self) -> DocumentContentType:\n \"\"\"Return the content of the document. It checks whichever field among :attr:`blob`, :attr:`text`,\n :attr:`buffer` has value and return it.\n\n .. seealso::\n :attr:`blob`, :attr:`buffer`, :attr:`text`\n\n :return: the value of the content depending on `:meth:`content_type`\n \"\"\"\n attr = self.content_type\n if attr:\n return getattr(self, attr)\n\n @content.setter\n def content(self, value: DocumentContentType):\n \"\"\"Set the content of the document. It assigns the value to field with the right type.\n\n .. seealso::\n :attr:`blob`, :attr:`buffer`, :attr:`text`\n\n :param value: the value from which to set the content of the Document\n \"\"\"\n if isinstance(value, bytes):\n self.buffer = value\n elif isinstance(value, str):\n if _is_uri(value):\n self.uri = value\n else:\n self.text = value\n elif isinstance(value, np.ndarray):\n self.blob = value\n else:\n try:\n # try to set blob to `sparse` without needing to import all the `scipy` sparse requirements\n self.blob = value\n except:\n # ``None`` is also considered as bad type\n raise TypeError(f'{typename(value)} is not recognizable')\n\n @property\n def granularity(self):\n \"\"\"Return the granularity of the document.\n\n :return: the granularity from this document proto\n \"\"\"\n return self._pb_body.granularity\n\n @granularity.setter\n def granularity(self, value: int):\n \"\"\"Set the granularity of the document.\n\n :param value: the value of the granularity to be set\n \"\"\"\n self._pb_body.granularity = value\n\n @property\n def adjacency(self):\n \"\"\"Return the adjacency of the document.\n\n :return: the adjacency from this document proto\n \"\"\"\n return self._pb_body.adjacency\n\n @adjacency.setter\n def adjacency(self, value: int):\n \"\"\"Set the adjacency of the document.\n\n :param value: the value of the adjacency to be set\n \"\"\"\n self._pb_body.adjacency = value\n\n @property\n def scores(self):\n \"\"\"Return the scores of the document.\n\n :return: the scores attached to this document as `:class:NamedScoreMapping`\n \"\"\"\n return NamedScoreMapping(self._pb_body.scores)\n\n @scores.setter\n def scores(\n self,\n value: Dict[\n str, Union[NamedScore, jina_pb2.NamedScoreProto, float, np.generic]\n ],\n ):\n \"\"\"Sets the scores of the `Document`. Specially important to provide the ability to start `scores` as:\n\n .. highlight:: python\n .. code-block:: python\n\n from jina import Document\n from jina.types.score import NamedScore\n d = Document(scores={'euclidean': 5, 'cosine': NamedScore(value=0.5)})\n\n :param value: the dictionary to set the scores\n \"\"\"\n scores = NamedScoreMapping(self._pb_body.scores)\n for k, v in value.items():\n scores[k] = v\n\n @property\n def evaluations(self):\n \"\"\"Return the evaluations of the document.\n\n :return: the evaluations attached to this document as `:class:NamedScoreMapping`\n \"\"\"\n return NamedScoreMapping(self._pb_body.evaluations)\n\n @evaluations.setter\n def evaluations(\n self,\n value: Dict[\n str, Union[NamedScore, jina_pb2.NamedScoreProto, float, np.generic]\n ],\n ):\n \"\"\"Sets the evaluations of the `Document`. Specially important to provide the ability to start `evaluations` as:\n\n .. highlight:: python\n .. code-block:: python\n\n from jina import Document\n from jina.types.score import NamedScore\n d = Document(evaluations={'precision': 0.9, 'recall': NamedScore(value=0.5)})\n\n :param value: the dictionary to set the evaluations\n \"\"\"\n scores = NamedScoreMapping(self._pb_body.evaluations)\n for k, v in value.items():\n scores[k] = v\n\n def convert_image_buffer_to_blob(self, color_axis: int = -1):\n \"\"\"Convert an image buffer to blob\n\n :param color_axis: the axis id of the color channel, ``-1`` indicates the color channel info at the last axis\n \"\"\"\n self.blob = to_image_blob(io.BytesIO(self.buffer), color_axis)\n\n def convert_image_blob_to_uri(\n self, width: int, height: int, resize_method: str = 'BILINEAR'\n ):\n \"\"\"Assuming :attr:`blob` is a _valid_ image, set :attr:`uri` accordingly\n :param width: the width of the blob\n :param height: the height of the blob\n :param resize_method: the resize method name\n \"\"\"\n png_bytes = png_to_buffer(self.blob, width, height, resize_method)\n self.uri = 'data:image/png;base64,' + base64.b64encode(png_bytes).decode()\n\n def convert_image_uri_to_blob(\n self, color_axis: int = -1, uri_prefix: Optional[str] = None\n ):\n \"\"\"Convert uri to blob\n\n :param color_axis: the axis id of the color channel, ``-1`` indicates the color channel info at the last axis\n :param uri_prefix: the prefix of the uri\n \"\"\"\n self.blob = to_image_blob(\n (uri_prefix + self.uri) if uri_prefix else self.uri, color_axis\n )\n\n def convert_image_datauri_to_blob(self, color_axis: int = -1):\n \"\"\"Convert data URI to image blob\n\n :param color_axis: the axis id of the color channel, ``-1`` indicates the color channel info at the last axis\n \"\"\"\n req = urllib.request.Request(self.uri, headers={'User-Agent': 'Mozilla/5.0'})\n with urllib.request.urlopen(req) as fp:\n buffer = fp.read()\n self.blob = to_image_blob(io.BytesIO(buffer), color_axis)\n\n def convert_buffer_to_blob(self, dtype=None, count=-1, offset=0):\n \"\"\"Assuming the :attr:`buffer` is a _valid_ buffer of Numpy ndarray,\n set :attr:`blob` accordingly.\n\n :param dtype: Data-type of the returned array; default: float.\n :param count: Number of items to read. ``-1`` means all data in the buffer.\n :param offset: Start reading the buffer from this offset (in bytes); default: 0.\n\n .. note::\n One can only recover values not shape information from pure buffer.\n \"\"\"\n self.blob = np.frombuffer(self.buffer, dtype, count, offset)\n\n def convert_blob_to_buffer(self):\n \"\"\"Convert blob to buffer\"\"\"\n self.buffer = self.blob.tobytes()\n\n def convert_uri_to_buffer(self):\n \"\"\"Convert uri to buffer\n Internally it downloads from the URI and set :attr:`buffer`.\n\n \"\"\"\n if urllib.parse.urlparse(self.uri).scheme in {'http', 'https', 'data'}:\n req = urllib.request.Request(\n self.uri, headers={'User-Agent': 'Mozilla/5.0'}\n )\n with urllib.request.urlopen(req) as fp:\n self.buffer = fp.read()\n elif os.path.exists(self.uri):\n with open(self.uri, 'rb') as fp:\n self.buffer = fp.read()\n else:\n raise FileNotFoundError(f'{self.uri} is not a URL or a valid local path')\n\n def convert_uri_to_datauri(self, charset: str = 'utf-8', base64: bool = False):\n \"\"\"Convert uri to data uri.\n Internally it reads uri into buffer and convert it to data uri\n\n :param charset: charset may be any character set registered with IANA\n :param base64: used to encode arbitrary octet sequences into a form that satisfies the rules of 7bit. Designed to be efficient for non-text 8 bit and binary data. Sometimes used for text data that frequently uses non-US-ASCII characters.\n \"\"\"\n if not _is_datauri(self.uri):\n self.convert_uri_to_buffer()\n self.uri = to_datauri(\n self.mime_type, self.buffer, charset, base64, binary=True\n )\n\n def convert_buffer_to_uri(self, charset: str = 'utf-8', base64: bool = False):\n \"\"\"Convert buffer to data uri.\n Internally it first reads into buffer and then converts it to data URI.\n\n :param charset: charset may be any character set registered with IANA\n :param base64: used to encode arbitrary octet sequences into a form that satisfies the rules of 7bit.\n Designed to be efficient for non-text 8 bit and binary data. Sometimes used for text data that\n frequently uses non-US-ASCII characters.\n \"\"\"\n\n if not self.mime_type:\n raise ValueError(\n f'{self.mime_type} is unset, can not convert it to data uri'\n )\n\n self.uri = to_datauri(self.mime_type, self.buffer, charset, base64, binary=True)\n\n def convert_text_to_uri(self, charset: str = 'utf-8', base64: bool = False):\n \"\"\"Convert text to data uri.\n\n :param charset: charset may be any character set registered with IANA\n :param base64: used to encode arbitrary octet sequences into a form that satisfies the rules of 7bit.\n Designed to be efficient for non-text 8 bit and binary data.\n Sometimes used for text data that frequently uses non-US-ASCII characters.\n \"\"\"\n\n self.uri = to_datauri(self.mime_type, self.text, charset, base64, binary=False)\n\n def convert_uri_to_text(self):\n \"\"\"Assuming URI is text, convert it to text\"\"\"\n self.convert_uri_to_buffer()\n self.text = self.buffer.decode()\n\n def convert_content_to_uri(self):\n \"\"\"Convert content in URI with best effort\"\"\"\n if self.text:\n self.convert_text_to_uri()\n elif self.buffer:\n self.convert_buffer_to_uri()\n elif self.content_type:\n raise NotImplementedError\n\n def MergeFrom(self, doc: 'Document'):\n \"\"\"Merge the content of target\n\n :param doc: the document to merge from\n \"\"\"\n self._pb_body.MergeFrom(doc.proto)\n\n def CopyFrom(self, doc: 'Document'):\n \"\"\"Copy the content of target\n\n :param doc: the document to copy from\n \"\"\"\n self._pb_body.CopyFrom(doc.proto)\n\n def __mermaid_str__(self):\n results = []\n from google.protobuf.json_format import MessageToDict\n\n content = MessageToDict(self._pb_body, preserving_proto_field_name=True)\n\n _id = f'{self._mermaid_id[:3]}~Document~'\n\n for idx, c in enumerate(self.chunks):\n results.append(\n f'{_id} --> \"{idx + 1}/{len(self.chunks)}\" {c._mermaid_id[:3]}~Document~: chunks'\n )\n results.append(c.__mermaid_str__())\n\n for idx, c in enumerate(self.matches):\n results.append(\n f'{_id} ..> \"{idx + 1}/{len(self.matches)}\" {c._mermaid_id[:3]}~Document~: matches'\n )\n results.append(c.__mermaid_str__())\n if 'chunks' in content:\n content.pop('chunks')\n if 'matches' in content:\n content.pop('matches')\n if content:\n results.append(f'class {_id}{{')\n for k, v in content.items():\n if isinstance(v, (str, int, float, bytes)):\n results.append(f'+{k} {str(v)[:10]}')\n else:\n results.append(f'+{k}({type(getattr(self, k, v))})')\n results.append('}')\n\n return '\\n'.join(results)\n\n def _mermaid_to_url(self, img_type: str) -> str:\n \"\"\"\n Rendering the current flow as a url points to a SVG, it needs internet connection\n\n :param img_type: the type of image to be generated\n :return: the url pointing to a SVG\n \"\"\"\n if img_type == 'jpg':\n img_type = 'img'\n\n mermaid_str = (\n \"\"\"\n %%{init: {'theme': 'base', 'themeVariables': { 'primaryColor': '#FFC666'}}}%%\n classDiagram\n\n \"\"\"\n + self.__mermaid_str__()\n )\n\n encoded_str = base64.b64encode(bytes(mermaid_str.strip(), 'utf-8')).decode(\n 'utf-8'\n )\n\n return f'https://mermaid.ink/{img_type}/{encoded_str}'\n\n def _ipython_display_(self):\n \"\"\"Displays the object in IPython as a side effect\"\"\"\n self.plot(inline_display=True)\n\n def plot(self, output: Optional[str] = None, inline_display: bool = False) -> None:\n \"\"\"\n Visualize the Document recursively.\n\n :param output: a filename specifying the name of the image to be created,\n the suffix svg/jpg determines the file type of the output image\n :param inline_display: show image directly inside the Jupyter Notebook\n \"\"\"\n image_type = 'svg'\n if output and output.endswith('jpg'):\n image_type = 'jpg'\n\n url = self._mermaid_to_url(image_type)\n showed = False\n if inline_display:\n try:\n from IPython.display import display, Image\n\n display(Image(url=url))\n showed = True\n except:\n # no need to panic users\n pass\n\n if output:\n download_mermaid_url(url, output)\n elif not showed:\n from jina.logging.predefined import default_logger\n\n default_logger.info(f'Document visualization: {url}')\n\n def _prettify_doc_dict(self, d: Dict):\n \"\"\"Changes recursively a dictionary to show nd array fields as lists of values\n\n :param d: the dictionary to prettify\n \"\"\"\n for key in _all_doc_array_keys:\n if key in d:\n value = getattr(self, key)\n if isinstance(value, np.ndarray):\n d[key] = value.tolist()\n if 'chunks' in d:\n for chunk_doc, chunk_dict in zip(self.chunks, d['chunks']):\n chunk_doc._prettify_doc_dict(chunk_dict)\n if 'matches' in d:\n for match_doc, match_dict in zip(self.matches, d['matches']):\n match_doc._prettify_doc_dict(match_dict)\n\n def dict(self, prettify_ndarrays=False, *args, **kwargs):\n \"\"\"Return the object in Python dictionary\n\n :param prettify_ndarrays: boolean indicating if the ndarrays need to be prettified to be shown as lists of values\n :param args: Extra positional arguments\n :param kwargs: Extra keyword arguments\n :return: dict representation of the object\n \"\"\"\n d = super().dict(*args, **kwargs)\n if prettify_ndarrays:\n self._prettify_doc_dict(d)\n return d\n\n def json(self, prettify_ndarrays=False, *args, **kwargs):\n \"\"\"Return the object in JSON string\n\n :param prettify_ndarrays: boolean indicating if the ndarrays need to be prettified to be shown as lists of values\n :param args: Extra positional arguments\n :param kwargs: Extra keyword arguments\n :return: JSON string of the object\n \"\"\"\n if prettify_ndarrays:\n import json\n\n d = super().dict(*args, **kwargs)\n self._prettify_doc_dict(d)\n return json.dumps(d, sort_keys=True, **kwargs)\n else:\n return super().json(*args, **kwargs)\n\n @property\n def non_empty_fields(self) -> Tuple[str]:\n \"\"\"Return the set fields of the current document that are not empty\n\n :return: the tuple of non-empty fields\n \"\"\"\n return tuple(field[0].name for field in self.ListFields())\n\n @staticmethod\n def attributes(\n include_proto_fields: bool = True,\n include_proto_fields_camelcase: bool = False,\n include_properties: bool = False,\n ) -> List[str]:\n \"\"\"Return all attributes supported by the Document, which can be accessed by ``doc.attribute``\n\n :param include_proto_fields: if set, then include all protobuf fields\n :param include_proto_fields_camelcase: if set, then include all protobuf fields in CamelCase\n :param include_properties: if set, then include all properties defined for Document class\n :return: a list of attributes in string.\n \"\"\"\n import inspect\n\n support_keys = []\n\n if include_proto_fields:\n support_keys = list(jina_pb2.DocumentProto().DESCRIPTOR.fields_by_name)\n if include_proto_fields_camelcase:\n support_keys += list(\n jina_pb2.DocumentProto().DESCRIPTOR.fields_by_camelcase_name\n )\n\n if include_properties:\n support_keys += [\n name\n for (name, value) in inspect.getmembers(\n Document, lambda x: isinstance(x, property)\n )\n ]\n return list(set(support_keys))\n\n def __getattr__(self, item):\n if hasattr(self._pb_body, item):\n value = getattr(self._pb_body, item)\n else:\n value = dunder_get(self._pb_body, item)\n return value\n\n\ndef _is_uri(value: str) -> bool:\n scheme = urllib.parse.urlparse(value).scheme\n return (\n (scheme in {'http', 'https'})\n or (scheme in {'data'})\n or os.path.exists(value)\n or os.access(os.path.dirname(value), os.W_OK)\n )\n\n\ndef _is_datauri(value: str) -> bool:\n scheme = urllib.parse.urlparse(value).scheme\n return scheme in {'data'}\n\n\ndef _contains_conflicting_content(**kwargs):\n content_keys = 0\n for k in kwargs.keys():\n if k in _all_doc_content_keys:\n content_keys += 1\n if content_keys > 1:\n return True\n\n return False\n"
] |
[
[
"numpy.frombuffer",
"numpy.array",
"scipy.sparse.issparse"
]
] |
williamFalcon/theano-deep-neural-net
|
[
"94b7cbe56d504c94ed8851a01d6ed7b2b82029e5"
] |
[
"dnn_demo.py"
] |
[
"\nimport sys\nimport os\nfrom dnn.dnn import MLP\nimport pandas as pd\nimport numpy as np\n\n\ndef load_dataset():\n data_path = os.path.dirname(os.path.realpath(__file__)) + '/db/train.csv'\n train_df = pd.read_csv(data_path)\n\n # extract the x and y for the models\n # format for the nn\n X = train_df.values[:, 1:] / np.float32(256)\n Y = train_df.values[:, 0].astype(np.int32)\n\n val_size = 10000\n tng_end = (.80 * len(X)) - val_size\n val_end = tng_end + val_size\n\n # tng data 70%\n X_train = X[0:tng_end]\n y_train = Y[0:tng_end]\n\n # validation\n # 20%\n X_val = X[tng_end: val_end]\n y_val = Y[tng_end: val_end]\n\n # test accuracy of classifier\n # 10%\n X_test = X[val_end:]\n y_test = Y[val_end:]\n\n return X_train, y_train, X_val, y_val, X_test, y_test\n\n\ndef run_demo():\n # Load the dataset\n print(\"Loading data...\")\n X_train, y_train, X_val, y_val, X_test, y_test = load_dataset()\n\n # images are 28*28 pixels. Shown as a vector of 28*28 length\n nn = MLP(input_dim_count=28*28, output_size=10)\n nn.fit(X_train, y_train, X_val, y_val, X_test, y_test, epochs=50)\n\n # predict on first 5 of test\n x_preds = X_test[0:5]\n ans = nn.predict(x_preds)\n\n # print prediction results\n print(ans)\n print(y_test[0:5])\n\n\nif __name__ == '__main__':\n run_demo()"
] |
[
[
"pandas.read_csv",
"numpy.float32"
]
] |
m95music/yukarin
|
[
"87e4e813e1b846720ef7a89162edf1c379700619"
] |
[
"scripts/check_silence.py"
] |
[
"\"\"\"\ncheck silence of wave.\n\"\"\"\n\nimport argparse\nimport glob\nimport math\nimport multiprocessing\nfrom pathlib import Path\nfrom pprint import pprint\n\nimport matplotlib.pyplot as plt\nimport numpy\nimport tqdm\nfrom sprocket.speech import FeatureExtractor\n\nfrom yukarin import Wave\nfrom yukarin.param import AcousticParam\nfrom yukarin.utility.sprocket_utility import SpeakerYML\nfrom yukarin.utility.sprocket_utility import low_cut_filter\n\nbase_acoustic_param = AcousticParam()\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--input_wave_glob', '-i')\nparser.add_argument('--candidate_threshold', '-th', nargs='+', type=float)\nparser.add_argument('--output_image', '-o', type=Path)\nparser.add_argument('--speaker_yml', type=Path)\nparser.add_argument('--pad_second', type=float, default=base_acoustic_param.pad_second)\narguments = parser.parse_args()\n\n# read parameters from speaker yml\nsconf = SpeakerYML(arguments.speaker_yml)\n\n# constract FeatureExtractor class\nfeat = FeatureExtractor(\n analyzer=sconf.analyzer,\n fs=sconf.wav_fs,\n fftl=sconf.wav_fftl,\n shiftms=sconf.wav_shiftms,\n minf0=sconf.f0_minf0,\n maxf0=sconf.f0_maxf0,\n)\n\n\ndef calc_score(path: Path):\n scores = []\n\n wave = Wave.load(path=path, sampling_rate=sconf.wav_fs)\n wave = wave.pad(pre_second=arguments.pad_second, post_second=arguments.pad_second)\n\n hop = sconf.wav_fs * sconf.wav_shiftms // 1000\n length = int(math.ceil(len(wave.wave) / hop + 0.0001))\n\n # for sprocket\n x = low_cut_filter(wave.wave, wave.sampling_rate, cutoff=70)\n feat.analyze(x)\n npow = feat.npow()\n effective1: numpy.ndarray = (npow > sconf.power_threshold)\n assert len(effective1) == length, str(path)\n\n # for yukarin\n for th in arguments.candidate_threshold:\n effective2 = wave.get_effective_frame(\n threshold_db=th,\n fft_length=sconf.wav_fftl,\n frame_period=sconf.wav_shiftms,\n )\n scores.append([\n (effective1 == effective2).sum(),\n (effective1 == effective2)[effective1].sum(),\n (effective1 == effective2)[~effective1].sum(),\n length,\n ])\n\n return scores\n\n\ndef main():\n pprint(vars(arguments))\n\n paths = [Path(p) for p in sorted(glob.glob(arguments.input_wave_glob))]\n pool = multiprocessing.Pool()\n it = pool.imap(calc_score, paths)\n scores_list = list(tqdm.tqdm(it, total=len(paths)))\n\n pprint({\n th: score\n for th, score in zip(arguments.candidate_threshold, numpy.array(scores_list).sum(axis=0))\n })\n\n fig = plt.figure(figsize=[10, 5])\n plt.plot(arguments.candidate_threshold, numpy.array(scores_list).sum(axis=0))\n fig.savefig(arguments.output_image)\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"numpy.array",
"matplotlib.pyplot.figure"
]
] |
jgritans/torrent_finder
|
[
"4270ebab208727fe5ea559b97ed74b25ea2d37d3"
] |
[
"torrents.py"
] |
[
"import pandas as pd\nfrom web import soup\n\n\n\n\nclass EmptyTable(Exception):\n def __init__(self, message):\n self.message = message\n\n\nclass FailedDfCreation(Exception):\n def __init__(self, message):\n self.message = message\n\n\nclass HardToFixBetterSkip(Exception):\n def __init__(self, message):\n self.message = message\n\n\ndef get_torrent_table(movie, res_kwds, criteria):\n try:\n sizes = {\n 'M': 1,\n 'G': 1000\n }\n\n # get HTML table from 1337x.to\n link = f\"https://1337x.to/category-search/{movie.title} {movie.year}/Movies/1/\".replace(\" \", \"+\")\n\n table_rows = soup(link).table.find_all('tr')\n if len(table_rows) < 2:\n raise Exception\n\n table_rows.pop(0)\n\n l = []\n for tr in table_rows:\n try:\n td = tr.find_all('td')\n row = [cell.text for cell in td] + [f\"https://1337x.to{a['href']}\" for a in tr.find_all(href=True)] +[\"\",\"\",\"\"]\n row.pop(6)\n l.append(row)\n except:\n continue\n\n # Create, clean up and sort downloads_df\n\n downloads_df = pd.DataFrame(l, columns=['Title', 'Se', 'Le', 'Time', 'Size', 'Uploader', 'Item_link',\n 'User_link','Language','Id','Status']).dropna()\n\n downloads_df.Size = [val.replace(',', '').split('B')[0].split(' ') for val in downloads_df.Size]\n downloads_df[['Size', 'Unit']] = downloads_df.Size.tolist()\n downloads_df.Unit = downloads_df.Unit.map(sizes)\n downloads_df.Size = downloads_df.Size.map(float) * downloads_df.Unit\n\n downloads_df.Se = downloads_df.Se.map(int)\n downloads_df.Le = downloads_df.Le.map(int)\n downloads_df.Size = downloads_df.Size.map(int)\n\n downloads_df = downloads_df[downloads_df['Se'] >= criteria['seeders']].sort_values(['Size', 'Se'], ascending=False)\n\n downloads_df['Year'] = movie.year\n # Infer and add resolution to df\n downloads_df['Resolution'] = ''\n for res in res_kwds: # add resolutions to df\n for i in range(len(downloads_df)):\n for keyword in res_kwds[res]:\n if keyword in downloads_df.iloc[i, downloads_df.columns.get_loc('Title')]:\n downloads_df.iloc[i, downloads_df.columns.get_loc('Resolution')] = res\n\n return downloads_df\n\n\n\n except:\n raise EmptyTable\n\n\ndef get_mirror(soup, mirror_site):\n info_hash = next(p.text.split(' ')[-1] for p in soup.find_all('p') if 'hash' in p.text.lower())\n return mirror_site.format(info_hash)\n"
] |
[
[
"pandas.DataFrame"
]
] |
kshi1126/CarND-Traffic-Sign-Classifier-Project
|
[
"7d96575933944309c6de0edc00ac9f03b077c0e2"
] |
[
"Traffic_Sign_Classifier.py"
] |
[
"#!/usr/bin/env python\n# coding: utf-8\n\n# # Self-Driving Car Engineer Nanodegree\n# \n# ## Deep Learning\n# \n# ## Project: Build a Traffic Sign Recognition Classifier\n# \n# In this notebook, a template is provided for you to implement your functionality in stages, which is required to successfully complete this project. If additional code is required that cannot be included in the notebook, be sure that the Python code is successfully imported and included in your submission if necessary. \n# \n# > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the iPython Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to \\n\",\n# \"**File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission. \n# \n# In addition to implementing code, there is a writeup to complete. The writeup should be completed in a separate file, which can be either a markdown file or a pdf document. There is a [write up template](https://github.com/udacity/CarND-Traffic-Sign-Classifier-Project/blob/master/writeup_template.md) that can be used to guide the writing process. Completing the code template and writeup template will cover all of the [rubric points](https://review.udacity.com/#!/rubrics/481/view) for this project.\n# \n# The [rubric](https://review.udacity.com/#!/rubrics/481/view) contains \"Stand Out Suggestions\" for enhancing the project beyond the minimum requirements. The stand out suggestions are optional. If you decide to pursue the \"stand out suggestions\", you can include the code in this Ipython notebook and also discuss the results in the writeup file.\n# \n# \n# >**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. In addition, Markdown cells can be edited by typically double-clicking the cell to enter edit mode.\n\n# ---\n# ## Step 0: Load The Data\n\n# In[1]:\n\n\n# Load pickled data\nimport pickle\nimport matplotlib.pyplot as plt\n# TODO: Fill this in based on where you saved the training and testing data\n\ntraining_file = \"../data/train.p\"\nvalidation_file=\"../data/valid.p\"\ntesting_file = \"../data/test.p\"\n\nwith open(training_file, mode='rb') as f:\n train = pickle.load(f)\nwith open(validation_file, mode='rb') as f:\n valid = pickle.load(f)\nwith open(testing_file, mode='rb') as f:\n test = pickle.load(f)\n \nX_train, y_train = train['features'], train['labels']\nX_valid, y_valid = valid['features'], valid['labels']\nX_test, y_test = test['features'], test['labels']\n\n\n# In[2]:\n\n\n#plt.imshow(X_train[1]) \n\n#print(y_train[1])\nall_image_list=[]\nfor i in range(43):\n for j in range(len(y_train)):\n if i == y_train[j]:\n #print(\"fount\"+str(i))\n all_image_list.append(X_train[j])\n break\nprint(len(all_image_list))\n#plt.imshow(all_image_list[0])\nfig, axs = plt.subplots(3, 5, figsize=(24, 9))\nfig.tight_layout()\n\nfor i in range(3):\n for j in range(5):\n axs[i][j].imshow(all_image_list[j+5*i])\n axs[i][j].set_title(j+5*i, fontsize=30)\n\n\n# In[3]:\n\n\nfig, axs = plt.subplots(3, 5, figsize=(24, 9))\nfig.tight_layout()\n\nfor i in range(3):\n for j in range(5):\n axs[i][j].imshow(all_image_list[15+j+5*i])\n axs[i][j].set_title(15+j+5*i, fontsize=30)\n\n\n# In[4]:\n\n\nfig, axs = plt.subplots(3, 5, figsize=(24, 9))\nfig.tight_layout()\n\nfor i in range(3):\n for j in range(5):\n if(30+j+5*i < 43):\n axs[i][j].imshow(all_image_list[30+j+5*i])\n axs[i][j].set_title(30+j+5*i, fontsize=30)\n\n\n# ---\n# \n# ## Step 1: Dataset Summary & Exploration\n# \n# The pickled data is a dictionary with 4 key/value pairs:\n# \n# - `'features'` is a 4D array containing raw pixel data of the traffic sign images, (num examples, width, height, channels).\n# - `'labels'` is a 1D array containing the label/class id of the traffic sign. The file `signnames.csv` contains id -> name mappings for each id.\n# - `'sizes'` is a list containing tuples, (width, height) representing the original width and height the image.\n# - `'coords'` is a list containing tuples, (x1, y1, x2, y2) representing coordinates of a bounding box around the sign in the image. **THESE COORDINATES ASSUME THE ORIGINAL IMAGE. THE PICKLED DATA CONTAINS RESIZED VERSIONS (32 by 32) OF THESE IMAGES**\n# \n# Complete the basic data summary below. Use python, numpy and/or pandas methods to calculate the data summary rather than hard coding the results. For example, the [pandas shape method](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.shape.html) might be useful for calculating some of the summary results. \n\n# ### Provide a Basic Summary of the Data Set Using Python, Numpy and/or Pandas\n\n# In[5]:\n\n\n### Replace each question mark with the appropriate value. \n### Use python, pandas or numpy methods rather than hard coding the results\n\n# TODO: Number of training examples\nn_train = len(X_train)\n\n# TODO: Number of validation examples\nn_validation = len(X_valid)\n\n# TODO: Number of testing examples.\nn_test = len(X_test)\n\n# TODO: What's the shape of an traffic sign image?\nimage_shape = X_train[0].shape\n\n# TODO: How many unique classes/labels there are in the dataset.\nn_classes = set(y_train)\n\nprint(\"Number of training examples =\", n_train)\nprint(\"Number of validation examples =\", n_validation)\nprint(\"Number of testing examples =\", n_test)\nprint(\"Image data shape =\", image_shape)\nprint(\"Number of classes =\", n_classes)\n\ndemo_image = X_train[400]\nplt.imshow(demo_image)\n\n\n# ### Include an exploratory visualization of the dataset\n\n# Visualize the German Traffic Signs Dataset using the pickled file(s). This is open ended, suggestions include: plotting traffic sign images, plotting the count of each sign, etc. \n# \n# The [Matplotlib](http://matplotlib.org/) [examples](http://matplotlib.org/examples/index.html) and [gallery](http://matplotlib.org/gallery.html) pages are a great resource for doing visualizations in Python.\n# \n# **NOTE:** It's recommended you start with something simple first. If you wish to do more, come back to it after you've completed the rest of the sections. It can be interesting to look at the distribution of classes in the training, validation and test set. Is the distribution the same? Are there more examples of some classes than others?\n\n# In[6]:\n\n\n### Data exploration visualization code goes here.\n### Feel free to use as many code cells as needed.\n\n# Visualizations will be shown in the notebook.\nget_ipython().run_line_magic('matplotlib', 'inline')\n\ny_train_list = [0] * 43\ny_valid_list = [0] * 43\ny_test_list = [0] * 43\nnumber_class = list(range(0, 43))\n#print(number_class)\n\nfor i in y_train:\n y_train_list[i] += 1\nfor i in y_valid:\n y_valid_list[i] += 1\nfor i in y_test:\n y_test_list[i] += 1\n\n#plt.subplot(3, 1, 1)\nplt.bar(number_class, y_train_list)\nplt.xlabel(\"Traffic Sign Type for Training Data\")\nplt.ylabel(\"Numbers\")\n\n\n# In[7]:\n\n\n#plt.subplot(3, 1, 2)\nplt.bar(number_class, y_valid_list)\nplt.xlabel(\"Traffic Sign Type for Validation Data\")\nplt.ylabel(\"Numbers\")\n\n\n# In[8]:\n\n\n#plt.subplot(3, 1, 3)\nplt.bar(number_class, y_test_list)\nplt.xlabel(\"Traffic Sign Type for Testing Data\")\nplt.ylabel(\"Numbers\")\n\n\n# ----\n# \n# ## Step 2: Design and Test a Model Architecture\n# \n# Design and implement a deep learning model that learns to recognize traffic signs. Train and test your model on the [German Traffic Sign Dataset](http://benchmark.ini.rub.de/?section=gtsrb&subsection=dataset).\n# \n# The LeNet-5 implementation shown in the [classroom](https://classroom.udacity.com/nanodegrees/nd013/parts/fbf77062-5703-404e-b60c-95b78b2f3f9e/modules/6df7ae49-c61c-4bb2-a23e-6527e69209ec/lessons/601ae704-1035-4287-8b11-e2c2716217ad/concepts/d4aca031-508f-4e0b-b493-e7b706120f81) at the end of the CNN lesson is a solid starting point. You'll have to change the number of classes and possibly the preprocessing, but aside from that it's plug and play! \n# \n# With the LeNet-5 solution from the lecture, you should expect a validation set accuracy of about 0.89. To meet specifications, the validation set accuracy will need to be at least 0.93. It is possible to get an even higher accuracy, but 0.93 is the minimum for a successful project submission. \n# \n# There are various aspects to consider when thinking about this problem:\n# \n# - Neural network architecture (is the network over or underfitting?)\n# - Play around preprocessing techniques (normalization, rgb to grayscale, etc)\n# - Number of examples per label (some have more than others).\n# - Generate fake data.\n# \n# Here is an example of a [published baseline model on this problem](http://yann.lecun.com/exdb/publis/pdf/sermanet-ijcnn-11.pdf). It's not required to be familiar with the approach used in the paper but, it's good practice to try to read papers like these.\n\n# ### Pre-process the Data Set (normalization, grayscale, etc.)\n\n# Minimally, the image data should be normalized so that the data has mean zero and equal variance. For image data, `(pixel - 128)/ 128` is a quick way to approximately normalize the data and can be used in this project. \n# \n# Other pre-processing steps are optional. You can try different techniques to see if it improves performance. \n# \n# Use the code cell (or multiple code cells, if necessary) to implement the first step of your project.\n\n# In[9]:\n\n\nfrom sklearn.utils import shuffle\nfrom skimage.color import rgb2gray\nimport numpy as np\n\ndef normalize(image):\n return (image.astype(float) - 128) / 128\n\n\ndef grayscale(image):\n gray = rgb2gray(image)\n return np.expand_dims(gray, axis=3)\n\n\n\nX_train_pre = []\nfor image in X_train:\n image = normalize(image)\n image = grayscale(image)\n X_train_pre.append(image)\nX_train = X_train_pre\n\nX_valid_pre = []\nfor image in X_valid:\n image = normalize(image)\n image = grayscale(image)\n X_valid_pre.append(image)\nX_valid = X_valid_pre\n\nX_test_pre = []\nfor image in X_test:\n image = normalize(image)\n image = grayscale(image)\n X_test_pre.append(image)\nX_test = X_test_pre\n\n\nprint(X_train_pre[1].shape)\n#X_train_pre, y_train = shuffle(X_train_pre, y_train)\n\n\n# ### Model Architecture\n\n# In[10]:\n\n\n### Define your architecture here.\n### Feel free to use as many code cells as needed.\nimport tensorflow as tf\n\nEPOCHS = 100\nBATCH_SIZE = 128 #128\n\n\n# In[11]:\n\n\nfrom tensorflow.contrib.layers import flatten\n\ndef LeNet(x): \n # Arguments used for tf.truncated_normal, randomly defines variables for the weights and biases for each layer\n mu = 0\n sigma = 0.1 #0.1\n \n # Layer 1: Convolutional. Input = 32x32x1. Output = 28x28x6.\n conv1_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 1, 6), mean = mu, stddev = sigma))\n conv1_b = tf.Variable(tf.zeros(6))\n conv1 = tf.nn.conv2d(x, conv1_W, strides=[1, 1, 1, 1], padding='VALID') + conv1_b\n\n # Activation.\n conv1 = tf.nn.relu(conv1)\n\n # Pooling. Input = 28x28x6. Output = 14x14x6.\n conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')\n\n # Layer 2: Convolutional. Output = 10x10x16.\n conv2_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 6, 16), mean = mu, stddev = sigma))\n conv2_b = tf.Variable(tf.zeros(16))\n conv2 = tf.nn.conv2d(conv1, conv2_W, strides=[1, 1, 1, 1], padding='VALID') + conv2_b\n \n # Activation.\n conv2 = tf.nn.relu(conv2)\n\n # Pooling. Input = 10x10x16. Output = 5x5x16.\n conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')\n\n # Flatten. Input = 5x5x16. Output = 400.\n fc0 = flatten(conv2)\n \n # Layer 3: Fully Connected. Input = 400. Output = 200.\n fc1_W = tf.Variable(tf.truncated_normal(shape=(400, 200), mean = mu, stddev = sigma))\n fc1_b = tf.Variable(tf.zeros(200))\n fc1 = tf.matmul(fc0, fc1_W) + fc1_b\n \n # Activation.\n fc1 = tf.nn.relu(fc1)\n\n # Layer 4: Fully Connected. Input = 120. Output = 84.\n fc2_W = tf.Variable(tf.truncated_normal(shape=(200, 84), mean = mu, stddev = sigma))\n fc2_b = tf.Variable(tf.zeros(84))\n fc2 = tf.matmul(fc1, fc2_W) + fc2_b\n \n # Activation.\n fc2 = tf.nn.relu(fc2)\n\n # Layer 5: Fully Connected. Input = 84. Output = 43.\n fc3_W = tf.Variable(tf.truncated_normal(shape=(84, 43), mean = mu, stddev = sigma))\n fc3_b = tf.Variable(tf.zeros(43))\n logits = tf.matmul(fc2, fc3_W) + fc3_b\n \n return logits\n\n\n# In[12]:\n\n\nx = tf.placeholder(tf.float32, (None, 32, 32, 1))\ny = tf.placeholder(tf.int32, (None))\none_hot_y = tf.one_hot(y, 43)\n\n\n# In[13]:\n\n\nrate = 0.001\n\nlogits = LeNet(x)\ncross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=one_hot_y, logits=logits)\nloss_operation = tf.reduce_mean(cross_entropy)\noptimizer = tf.train.AdamOptimizer(learning_rate = rate)\ntraining_operation = optimizer.minimize(loss_operation)\n\n\n# In[14]:\n\n\ncorrect_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1))\naccuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\nsaver = tf.train.Saver()\n\ndef evaluate(X_data, y_data):\n num_examples = len(X_data)\n total_accuracy = 0\n sess = tf.get_default_session()\n for offset in range(0, num_examples, BATCH_SIZE):\n batch_x, batch_y = X_data[offset:offset+BATCH_SIZE], y_data[offset:offset+BATCH_SIZE]\n accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y})\n total_accuracy += (accuracy * len(batch_x))\n return total_accuracy / num_examples\n\n\n# ### Train, Validate and Test the Model\n\n# A validation set can be used to assess how well the model is performing. A low accuracy on the training and validation\n# sets imply underfitting. A high accuracy on the training set but low accuracy on the validation set implies overfitting.\n\n# In[13]:\n\n\n### Train your model here.\n### Calculate and report the accuracy on the training and validation set.\n### Once a final model architecture is selected, \n### the accuracy on the test set should be calculated and reported as well.\n### Feel free to use as many code cells as needed.\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n num_examples = len(X_train)\n \n print(\"Training...\")\n print()\n for i in range(EPOCHS):\n X_train, y_train = shuffle(X_train, y_train)\n for offset in range(0, num_examples, BATCH_SIZE):\n end = offset + BATCH_SIZE\n batch_x, batch_y = X_train[offset:end], y_train[offset:end]\n sess.run(training_operation, feed_dict={x: batch_x, y: batch_y})\n \n validation_accuracy = evaluate(X_valid, y_valid)\n print(\"EPOCH {} ...\".format(i+1))\n print(\"Validation Accuracy = {:.3f}\".format(validation_accuracy))\n print()\n \n saver.save(sess, './lenet')\n print(\"Model saved\")\n\n\n# In[15]:\n\n\nwith tf.Session() as sess:\n saver.restore(sess, tf.train.latest_checkpoint('.'))\n\n test_accuracy = evaluate(X_train, y_train)\n print(\"Train Accuracy = {:.3f}\".format(test_accuracy))\n\n\n# In[16]:\n\n\nwith tf.Session() as sess:\n saver.restore(sess, tf.train.latest_checkpoint('.'))\n\n test_accuracy = evaluate(X_valid, y_valid)\n print(\"Validation Accuracy = {:.3f}\".format(test_accuracy))\n\n\n# In[17]:\n\n\nwith tf.Session() as sess:\n saver.restore(sess, tf.train.latest_checkpoint('.'))\n\n test_accuracy = evaluate(X_test, y_test)\n print(\"Test Accuracy = {:.3f}\".format(test_accuracy))\n\n\n# ---\n# \n# ## Step 3: Test a Model on New Images\n# \n# To give yourself more insight into how your model is working, download at least five pictures of German traffic signs from the web and use your model to predict the traffic sign type.\n# \n# You may find `signnames.csv` useful as it contains mappings from the class id (integer) to the actual sign name.\n\n# ### Load and Output the Images\n\n# In[18]:\n\n\n### Load the images and plot them here.\n### Feel free to use as many code cells as needed.\nimport matplotlib.image as mpimg\nimport matplotlib.pyplot as plt\nimport cv2\n\nonline_images = []\n\nfor i in range(1, 6):\n image = mpimg.imread('Online_Picture/'+str(i)+'.jpg')\n image = cv2.resize(image,(32,32))\n online_images.append(image)\n plt.subplot(1, 5, i)\n plt.imshow(image) \n\n\n# ### Predict the Sign Type for Each Image\n\n# In[19]:\n\n\n### Run the predictions here and use the model to output the prediction for each image.\n### Make sure to pre-process the images with the same pre-processing pipeline used earlier.\n### Feel free to use as many code cells as needed.\nX_online_images_pre = []\nfor image in online_images:\n image = normalize(image)\n image = grayscale(image)\n X_online_images_pre.append(image)\nX_online_images = X_online_images_pre\ny_online_images = [26, 23, 31, 9, 3]\n\n\n# ### Analyze Performance\n\n# In[20]:\n\n\n### Calculate the accuracy for these 5 new images. \n### For example, if the model predicted 1 out of 5 signs correctly, it's 20% accurate on these new images.\nwith tf.Session() as sess:\n saver.restore(sess, tf.train.latest_checkpoint('.'))\n\n test_accuracy = evaluate(X_online_images, y_online_images)\n print(\"Test Accuracy = {:.3f}\".format(test_accuracy))\n\n\n# ### Output Top 5 Softmax Probabilities For Each Image Found on the Web\n\n# For each of the new images, print out the model's softmax probabilities to show the **certainty** of the model's predictions (limit the output to the top 5 probabilities for each image). [`tf.nn.top_k`](https://www.tensorflow.org/versions/r0.12/api_docs/python/nn.html#top_k) could prove helpful here. \n# \n# The example below demonstrates how tf.nn.top_k can be used to find the top k predictions for each image.\n# \n# `tf.nn.top_k` will return the values and indices (class ids) of the top k predictions. So if k=3, for each sign, it'll return the 3 largest probabilities (out of a possible 43) and the correspoding class ids.\n# \n# Take this numpy array as an example. The values in the array represent predictions. The array contains softmax probabilities for five candidate images with six possible classes. `tf.nn.top_k` is used to choose the three classes with the highest probability:\n# \n# ```\n# # (5, 6) array\n# a = np.array([[ 0.24879643, 0.07032244, 0.12641572, 0.34763842, 0.07893497,\n# 0.12789202],\n# [ 0.28086119, 0.27569815, 0.08594638, 0.0178669 , 0.18063401,\n# 0.15899337],\n# [ 0.26076848, 0.23664738, 0.08020603, 0.07001922, 0.1134371 ,\n# 0.23892179],\n# [ 0.11943333, 0.29198961, 0.02605103, 0.26234032, 0.1351348 ,\n# 0.16505091],\n# [ 0.09561176, 0.34396535, 0.0643941 , 0.16240774, 0.24206137,\n# 0.09155967]])\n# ```\n# \n# Running it through `sess.run(tf.nn.top_k(tf.constant(a), k=3))` produces:\n# \n# ```\n# TopKV2(values=array([[ 0.34763842, 0.24879643, 0.12789202],\n# [ 0.28086119, 0.27569815, 0.18063401],\n# [ 0.26076848, 0.23892179, 0.23664738],\n# [ 0.29198961, 0.26234032, 0.16505091],\n# [ 0.34396535, 0.24206137, 0.16240774]]), indices=array([[3, 0, 5],\n# [0, 1, 4],\n# [0, 5, 1],\n# [1, 3, 5],\n# [1, 4, 3]], dtype=int32))\n# ```\n# \n# Looking just at the first row we get `[ 0.34763842, 0.24879643, 0.12789202]`, you can confirm these are the 3 largest probabilities in `a`. You'll also notice `[3, 0, 5]` are the corresponding indices.\n\n# In[21]:\n\n\n### Print out the top five softmax probabilities for the predictions on the German traffic sign images found on the web. \n### Feel free to use as many code cells as needed.\n\nsoftmax = tf.nn.softmax(logits)\npred = tf.nn.top_k(softmax, 5)\n\nwith tf.Session() as sess:\n saver.restore(sess, tf.train.latest_checkpoint('.'))\n probability = sess.run(pred, feed_dict={x: X_online_images})\n print(probability)\n\n\n# ### Project Writeup\n# \n# Once you have completed the code implementation, document your results in a project writeup using this [template](https://github.com/udacity/CarND-Traffic-Sign-Classifier-Project/blob/master/writeup_template.md) as a guide. The writeup can be in a markdown or pdf file. \n\n# > **Note**: Once you have completed all of the code implementations and successfully answered each question above, you may finalize your work by exporting the iPython Notebook as an HTML document. You can do this by using the menu above and navigating to \\n\",\n# \"**File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.\n\n# ---\n# \n# ## Step 4 (Optional): Visualize the Neural Network's State with Test Images\n# \n# This Section is not required to complete but acts as an additional excersise for understaning the output of a neural network's weights. While neural networks can be a great learning device they are often referred to as a black box. We can understand what the weights of a neural network look like better by plotting their feature maps. After successfully training your neural network you can see what it's feature maps look like by plotting the output of the network's weight layers in response to a test stimuli image. From these plotted feature maps, it's possible to see what characteristics of an image the network finds interesting. For a sign, maybe the inner network feature maps react with high activation to the sign's boundary outline or to the contrast in the sign's painted symbol.\n# \n# Provided for you below is the function code that allows you to get the visualization output of any tensorflow weight layer you want. The inputs to the function should be a stimuli image, one used during training or a new one you provided, and then the tensorflow variable name that represents the layer's state during the training process, for instance if you wanted to see what the [LeNet lab's](https://classroom.udacity.com/nanodegrees/nd013/parts/fbf77062-5703-404e-b60c-95b78b2f3f9e/modules/6df7ae49-c61c-4bb2-a23e-6527e69209ec/lessons/601ae704-1035-4287-8b11-e2c2716217ad/concepts/d4aca031-508f-4e0b-b493-e7b706120f81) feature maps looked like for it's second convolutional layer you could enter conv2 as the tf_activation variable.\n# \n# For an example of what feature map outputs look like, check out NVIDIA's results in their paper [End-to-End Deep Learning for Self-Driving Cars](https://devblogs.nvidia.com/parallelforall/deep-learning-self-driving-cars/) in the section Visualization of internal CNN State. NVIDIA was able to show that their network's inner weights had high activations to road boundary lines by comparing feature maps from an image with a clear path to one without. Try experimenting with a similar test to show that your trained network's weights are looking for interesting features, whether it's looking at differences in feature maps from images with or without a sign, or even what feature maps look like in a trained network vs a completely untrained one on the same sign image.\n# \n# <figure>\n# <img src=\"visualize_cnn.png\" width=\"380\" alt=\"Combined Image\" />\n# <figcaption>\n# <p></p> \n# <p style=\"text-align: center;\"> Your output should look something like this (above)</p> \n# </figcaption>\n# </figure>\n# <p></p> \n# \n\n# In[6]:\n\n\n### Visualize your network's feature maps here.\n### Feel free to use as many code cells as needed.\n\n# image_input: the test image being fed into the network to produce the feature maps\n# tf_activation: should be a tf variable name used during your training procedure that represents the calculated state of a specific weight layer\n# activation_min/max: can be used to view the activation contrast in more detail, by default matplot sets min and max to the actual min and max values of the output\n# plt_num: used to plot out multiple different weight feature map sets on the same block, just extend the plt number for each new feature map entry\n\ndef outputFeatureMap(image_input, tf_activation, activation_min=-1, activation_max=-1 ,plt_num=1):\n # Here make sure to preprocess your image_input in a way your network expects\n # with size, normalization, ect if needed\n # image_input =\n # Note: x should be the same name as your network's tensorflow data placeholder variable\n # If you get an error tf_activation is not defined it may be having trouble accessing the variable from inside a function\n activation = tf_activation.eval(session=sess,feed_dict={x : image_input})\n featuremaps = activation.shape[3]\n plt.figure(plt_num, figsize=(15,15))\n for featuremap in range(featuremaps):\n plt.subplot(6,8, featuremap+1) # sets the number of feature maps to show on each row and column\n plt.title('FeatureMap ' + str(featuremap)) # displays the feature map number\n if activation_min != -1 & activation_max != -1:\n plt.imshow(activation[0,:,:, featuremap], interpolation=\"nearest\", vmin =activation_min, vmax=activation_max, cmap=\"gray\")\n elif activation_max != -1:\n plt.imshow(activation[0,:,:, featuremap], interpolation=\"nearest\", vmax=activation_max, cmap=\"gray\")\n elif activation_min !=-1:\n plt.imshow(activation[0,:,:, featuremap], interpolation=\"nearest\", vmin=activation_min, cmap=\"gray\")\n else:\n plt.imshow(activation[0,:,:, featuremap], interpolation=\"nearest\", cmap=\"gray\")\n\n\n# In[ ]:\n\n\n\n\n"
] |
[
[
"matplotlib.pyplot.imshow",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"numpy.expand_dims",
"tensorflow.zeros",
"tensorflow.nn.max_pool",
"tensorflow.cast",
"tensorflow.contrib.layers.flatten",
"tensorflow.train.AdamOptimizer",
"tensorflow.nn.conv2d",
"tensorflow.nn.top_k",
"matplotlib.pyplot.subplot",
"tensorflow.Session",
"tensorflow.train.Saver",
"tensorflow.argmax",
"matplotlib.pyplot.figure",
"tensorflow.matmul",
"tensorflow.truncated_normal",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.one_hot",
"matplotlib.pyplot.ylabel",
"tensorflow.get_default_session",
"tensorflow.nn.relu",
"tensorflow.nn.softmax",
"tensorflow.train.latest_checkpoint",
"tensorflow.reduce_mean",
"sklearn.utils.shuffle",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.xlabel"
]
] |
sadmanca/soccerbot
|
[
"5e60eacb51ff1b063ae8c1caf7eb01053add43eb"
] |
[
"soccer_strategy/src/strategy/dummy_strategy.py"
] |
[
"import math\nimport numpy as np\nimport rospy\n\nimport config as config\nfrom strategy.strategy import Strategy\nfrom soccer_msgs.msg import GameState\nfrom robot import Robot\n\nHAVENT_SEEN_THE_BALL_TIMEOUT = 10\n\nclass DummyStrategy(Strategy):\n\n def __init__(self):\n self.havent_seen_the_ball_timeout = HAVENT_SEEN_THE_BALL_TIMEOUT\n super(DummyStrategy, self).__init__()\n\n # generate goal position\n @staticmethod\n def generate_goal_position(ball, teamcolor, is_first_half, secondaryState):\n is_penalty_shoot = (secondaryState == GameState.STATE_PENALTYSHOOT)\n goal_position = config.position_map_goal(config.GOAL_POSITION, teamcolor, is_first_half, is_penalty_shoot)\n\n if abs(ball.get_position()[1]) < 1.0:\n goal_position[1] = ball.get_position()[1]\n return goal_position\n\n def update_next_strategy(self, friendly, opponent, ball, teamcolor, is_first_half, secondaryState):\n if self.check_ball_avaliable(ball):\n self.havent_seen_the_ball_timeout = HAVENT_SEEN_THE_BALL_TIMEOUT\n\n # generate goal pose\n goal_position = self.generate_goal_position(ball, teamcolor, is_first_half, secondaryState)\n\n if abs(ball.get_position()[1]) < 3.5 and abs(ball.get_position()[0]) < 5:\n\n current_closest = self.who_has_the_ball(friendly, ball) # Guess who has the ball\n if current_closest is not None: #and current_closest.send_nav:\n\n # generate destination pose\n ball_position = ball.get_position()\n player_position = current_closest.get_position()[0:2]\n player_angle = current_closest.get_position()[2]\n\n diff = ball_position - goal_position\n diff_unit = diff / np.linalg.norm(diff)\n diff_angle = math.atan2(-diff_unit[1], -diff_unit[0])\n\n distance_of_player_goal_to_ball = 0.1\n destination_position = ball_position + diff_unit * distance_of_player_goal_to_ball\n\n navigation_bias = 1\n diff = destination_position - player_position\n # nav bias offset nav goal to be behind the ball\n destination_position_biased = player_position + diff * navigation_bias\n\n # nav goal behind the ball\n destination_position_biased = [destination_position_biased[0], destination_position_biased[1], diff_angle]\n\n # print(\"Position of closest player\")\n # print(player_position)\n # print(\"Ball Position\")\n # print(ball_position)\n # print(\"Destination Position\")\n # print(destination_position)\n # print(\"Distance between player and ball\")\n # print(distance_of_player_to_ball)\n\n # difference between robot angle and nav goal angle\n nav_angle__diff = math.atan2(math.sin(player_angle - diff_angle), math.cos(player_angle - diff_angle))\n\n distance_of_player_to_ball = np.linalg.norm(player_position - ball_position)\n if distance_of_player_to_ball < 0.18 and abs(nav_angle__diff) > 0.15:\n print(\"robot ball angle too large, unable to kick \" + str(abs(nav_angle__diff)))\n\n if distance_of_player_to_ball < 0.18 and abs(nav_angle__diff) < 0.15 and current_closest.path.isFinished(current_closest.path_time):\n if nav_angle__diff > 0.03:\n # right foot\n current_closest.kick_with_right_foot = True\n else:\n current_closest.kick_with_right_foot = False\n\n delta = goal_position - ball.get_position()\n unit = delta / np.linalg.norm(delta)\n\n current_closest.status = Robot.Status.KICKING\n current_closest.set_kick_velocity(unit * current_closest.max_kick_speed)\n else:\n current_closest.set_navigation_position(destination_position_biased)\n\n else:\n # If player is not facing the right direction, and not seeing the ball, then face the goal\n self.havent_seen_the_ball_timeout = self.havent_seen_the_ball_timeout - 1\n for player in friendly:\n if player.status != Robot.Status.READY:\n continue\n\n player_angle = player.get_position()[2]\n player_position = player.get_position()[0:2]\n\n # Haven't seen the ball timeout\n if self.havent_seen_the_ball_timeout == 0:\n rospy.loginfo(\"Havent seen the ball for a while. Rototating robot \" + player.robot_name)\n self.havent_seen_the_ball_timeout = HAVENT_SEEN_THE_BALL_TIMEOUT\n turn_position = [player_position[0], player_position[1], player_angle + math.pi]\n player.set_navigation_position(turn_position)\n\n\n # If the robot is walking and a detected obstacle in the direction of the robot\n # for player in friendly:\n # if player.status == Robot.Status.WALKING:\n # player_angle = player.get_position()[2]\n # player_position = player.get_position()[0:2]\n # player_vector = [math.cos(player_angle), math.sin(player_angle)]\n #\n # obstacles = player.get_detected_obstacles()\n #\n # for obs in obstacles:\n # obs_position = np.array(obs[0:2])\n #\n # if np.linalg.norm(obs_position - player_position) > 0.5:\n # continue\n #\n # player_to_ball_vector = obs_position - player_position\n # cross = float(np.cross(player_to_ball_vector, player_vector))\n #\n # if abs(cross) > 0.15 * 2: # TODO tune\n # continue\n #\n # player.terminate_walking_publisher.publish()\n # player.status = Robot.Status.READY\n # rospy.sleep(0.5)\n # player.completed_trajectory_publisher.publish(True)"
] |
[
[
"numpy.linalg.norm"
]
] |
X0Leon/pyfin
|
[
"2bdf8ae5f5600838012e1f470a979f3e219df870"
] |
[
"pyfin/data.py"
] |
[
"# -*- coding: utf-8 -*-\n\nimport datetime\n\nimport pandas as pd\nimport requests\n\nimport pyfin\nimport pyfin.utils as utils\n\n\ndef get(symbols, provider=None, common_dates=False, forward_fill=True,\n clean_symbols=True, column_names=None, symbol_field_sep=':',\n existing=None, **kwargs):\n \"\"\"\n 获取数据,返回DataFrame\n 参数:\n symbols: list, string, csv string\n provider (function): 下载数据的函数,默认为pyfin.DEFAULT_PROVIDER\n common_dates (bool): 是否保存相同是否,如果是,剔除NaN\n forward_fill (bool): 是否向前填充NaN\n clean_symbols (bool): 使用pyfin.utils.clean_symbols标准化代码\n column_names (list): 列名\n symbol_field_sep (char): symbol和field分隔符,如'600008:open'\n existing (DataFrame): 多数据源下载时用于合并df\n kwargs: 传给provider\n \"\"\"\n\n if provider is None:\n provider = DEFAULT_PROVIDER\n\n symbols = utils.parse_arg(symbols)\n\n data = {}\n for symbol in symbols:\n s = symbol\n f = None\n\n bits = symbol.split(symbol_field_sep, 1)\n if len(bits) == 2:\n s = bits[0]\n f = bits[1]\n\n data[symbol] = provider(s, field=f, **kwargs)\n\n df = pd.DataFrame(data)\n df = df[symbols]\n\n if existing is not None:\n df = pyfin.merge(existing, df)\n\n if common_dates:\n df = df.dropna()\n\n if forward_fill:\n df = df.fillna(method='ffill')\n\n if column_names:\n cnames = utils.parse_arg(column_names)\n if len(cnames) != len(df.columns):\n raise ValueError('Column names must be of same length as symbols!')\n df.columns = cnames\n elif clean_symbols:\n df.columns = map(utils.clean_symbol, df.columns)\n\n return df\n\n\ndef web(symbol, field=None, start=None, end=None, source='netease'):\n \"\"\"\n web数据源,可选:netease\n \"\"\"\n tmp = None\n if source == 'netease':\n tmp = _get_netease(symbol, start=start, end=end)\n if tmp is None:\n raise ValueError('Failed to retrieve data for %s:%s' % (symbol, field))\n\n if field is not None:\n return tmp[field]\n else:\n return tmp['close']\n\n\ndef _get_netease(symbol, start='', end=''):\n \"\"\"\n 网易财经数据源,获得日线数据\n 示例:http://quotes.money.163.com/service/chddata.html?code=600008&start=20150508&end=20150512\n \"\"\"\n if not start:\n start = (datetime.datetime.now().date() + datetime.timedelta(days=-300)).strftime('%Y-%m-%d')\n if not end:\n end = datetime.datetime.now().date().strftime('%Y-%m-%d')\n start = start.replace('-', '')\n end = end.replace('-', '')\n data_url = \"http://quotes.money.163.com/service/chddata.html?code=0\" + symbol + \"&start=\" + start + \"&end=\" + end\n r = requests.get(data_url, stream=True)\n lines = r.content.decode('gb2312').split(\"\\n\")\n lines = lines[1:len(lines) - 1]\n bars = []\n for line in lines[::-1]:\n stock_info = line.split(\",\", 14)\n s_date = stock_info[0]\n s_close = float(stock_info[3])\n s_high = float(stock_info[4])\n s_low = float(stock_info[5])\n s_open = float(stock_info[6])\n s_volume = float(stock_info[11])\n bars.append([s_date, s_open, s_high, s_low, s_close, s_volume])\n bars = pd.DataFrame(bars, columns=['datetime', 'open', 'high', 'low', 'close', 'volume'])\n bars.index = pd.to_datetime(bars['datetime'], format='%Y-%m-%d')\n\n return bars\n\n\ndef csv(symbol, path='data.csv', field='', **kwargs):\n \"\"\"\n 本地csv数据源\n \"\"\"\n if 'index_col' not in kwargs:\n kwargs['index_col'] = 0\n if 'parse_dates' not in kwargs:\n kwargs['parse_dates'] = True\n\n df = pd.read_csv(path, **kwargs)\n\n syb = symbol\n if field is not '' and field is not None:\n syb = '%s:%s' % (syb, field)\n\n if syb not in df:\n raise ValueError('Symbol(field) not present in csv file!')\n\n return df[syb]\n\n\nDEFAULT_PROVIDER = web\n"
] |
[
[
"pandas.to_datetime",
"pandas.read_csv",
"pandas.DataFrame"
]
] |
sallypannn/pytorch-widedeep
|
[
"0c79deb5de7b5005613fd926dee726f3227f5375"
] |
[
"pytorch_widedeep/models/wide_deep.py"
] |
[
"import warnings\n\nimport torch\nimport torch.nn as nn\n\nfrom pytorch_widedeep.wdtypes import * # noqa: F403\nfrom pytorch_widedeep.models.tab_mlp import MLP\nfrom pytorch_widedeep.models.tabnet.tab_net import TabNetPredLayer\n\nwarnings.filterwarnings(\"default\", category=UserWarning)\n\nuse_cuda = torch.cuda.is_available()\ndevice = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n\nclass WideDeep(nn.Module):\n r\"\"\"Main collector class that combines all ``wide``, ``deeptabular``\n (which can be a number of architectures), ``deeptext`` and\n ``deepimage`` models.\n\n There are two options to combine these models that correspond to the\n two main architectures that ``pytorch-widedeep`` can build.\n\n - Directly connecting the output of the model components to an ouput neuron(s).\n\n - Adding a `Fully-Connected Head` (FC-Head) on top of the deep models.\n This FC-Head will combine the output form the ``deeptabular``, ``deeptext`` and\n ``deepimage`` and will be then connected to the output neuron(s).\n\n Parameters\n ----------\n wide: ``nn.Module``, Optional, default = None\n ``Wide`` model. I recommend using the :obj:`Wide` class in this\n package. However, it is possible to use a custom model as long as\n is consistent with the required architecture, see\n :class:`pytorch_widedeep.models.wide.Wide`\n deeptabular: ``nn.Module``, Optional, default = None\n\n currently ``pytorch-widedeep`` implements four possible\n architectures for the `deeptabular` component. These are:\n TabMlp, TabResnet, TabNet, TabTransformer and SAINT.\n\n 1. TabMlp is simply an embedding layer encoding the categorical\n features that are then concatenated and passed through a series of\n dense (hidden) layers (i.e. and MLP).\n See: :obj:`pytorch_widedeep.models.tab_mlp.TabMlp`\n\n 2. TabResnet is an embedding layer encoding the categorical\n features that are then concatenated and passed through a series of\n ResNet blocks formed by dense layers.\n See :obj:`pytorch_widedeep.models.tab_resnet.TabResnet`\n\n 3. TabNet is detailed in `TabNet: Attentive Interpretable Tabular\n Learning <https://arxiv.org/abs/1908.07442>`_. The TabNet\n implementation in ``pytorch_widedeep`` is an adaptation of the\n `dreamquark-ai <https://github.com/dreamquark-ai/tabnet>`_\n implementation. See\n :obj:`pytorch_widedeep.models.tabnet.tab_net.TabNet`\n\n 3. TabTransformer is detailed in `TabTransformer: Tabular Data\n Modeling Using Contextual Embeddings\n <https://arxiv.org/abs/2012.06678>`_. The TabTransformer\n implementation in ``pytorch-widedeep`` is an adaptation of the\n original implementation. See\n :obj:`pytorch_widedeep.models.transformers.tab_transformer.TabTransformer`.\n\n 3. SAINT is detailed in `SAINT: Improved Neural Networks for Tabular\n Data via Row Attention and Contrastive Pre-Training\n <https://arxiv.org/abs/2106.01342>`_. The SAINT implementation in\n ``pytorch-widedeep`` is an adaptation of the original implementation.\n See\n :obj:`pytorch_widedeep.models.transformers.saint.SAINT`.\n\n I recommend using on of these as ``deeptabular``. However, it is\n possible to use a custom model as long as is consistent with the\n required architecture.\n\n deeptext: ``nn.Module``, Optional, default = None\n Model for the text input. Must be an object of class ``DeepText``\n or a custom model as long as is consistent with the required\n architecture. See\n :class:`pytorch_widedeep.models.deep_text.DeepText`\n deepimage: ``nn.Module``, Optional, default = None\n Model for the images input. Must be an object of class\n ``DeepImage`` or a custom model as long as is consistent with the\n required architecture. See\n :class:`pytorch_widedeep.models.deep_image.DeepImage`\n deephead: ``nn.Module``, Optional, default = None\n Custom model by the user that will receive the outtput of the deep\n component. Typically a FC-Head (MLP)\n head_hidden_dims: List, Optional, default = None\n Alternatively, the ``head_hidden_dims`` param can be used to\n specify the sizes of the stacked dense layers in the fc-head e.g:\n ``[128, 64]``. Use ``deephead`` or ``head_hidden_dims``, but not\n both.\n head_dropout: float, default = 0.1\n If ``head_hidden_dims`` is not None, dropout between the layers in\n ``head_hidden_dims``\n head_activation: str, default = \"relu\"\n If ``head_hidden_dims`` is not None, activation function of the\n head layers. One of \"relu\", gelu\" or \"leaky_relu\"\n head_batchnorm: bool, default = False\n If ``head_hidden_dims`` is not None, specifies if batch\n normalizatin should be included in the head layers\n head_batchnorm_last: bool, default = False\n If ``head_hidden_dims`` is not None, boolean indicating whether or\n not to apply batch normalization to the last of the dense layers\n head_linear_first: bool, default = False\n If ``head_hidden_dims`` is not None, boolean indicating whether\n the order of the operations in the dense layer. If ``True``:\n ``[LIN -> ACT -> BN -> DP]``. If ``False``: ``[BN -> DP -> LIN ->\n ACT]``\n pred_dim: int, default = 1\n Size of the final wide and deep output layer containing the\n predictions. `1` for regression and binary classification or number\n of classes for multiclass classification.\n\n Examples\n --------\n\n >>> from pytorch_widedeep.models import TabResnet, DeepImage, DeepText, Wide, WideDeep\n >>> embed_input = [(u, i, j) for u, i, j in zip([\"a\", \"b\", \"c\"][:4], [4] * 3, [8] * 3)]\n >>> column_idx = {k: v for v, k in enumerate([\"a\", \"b\", \"c\"])}\n >>> wide = Wide(10, 1)\n >>> deeptabular = TabResnet(blocks_dims=[8, 4], column_idx=column_idx, embed_input=embed_input)\n >>> deeptext = DeepText(vocab_size=10, embed_dim=4, padding_idx=0)\n >>> deepimage = DeepImage(pretrained=False)\n >>> model = WideDeep(wide=wide, deeptabular=deeptabular, deeptext=deeptext, deepimage=deepimage)\n\n\n .. note:: While I recommend using the ``wide`` and ``deeptabular`` components\n within this package when building the corresponding model components,\n it is very likely that the user will want to use custom text and image\n models. That is perfectly possible. Simply, build them and pass them\n as the corresponding parameters. Note that the custom models MUST\n return a last layer of activations (i.e. not the final prediction) so\n that these activations are collected by ``WideDeep`` and combined\n accordingly. In addition, the models MUST also contain an attribute\n ``output_dim`` with the size of these last layers of activations. See\n for example :class:`pytorch_widedeep.models.tab_mlp.TabMlp`\n\n \"\"\"\n\n def __init__(\n self,\n wide: Optional[nn.Module] = None,\n deeptabular: Optional[nn.Module] = None,\n deeptext: Optional[nn.Module] = None,\n deepimage: Optional[nn.Module] = None,\n deephead: Optional[nn.Module] = None,\n head_hidden_dims: Optional[List[int]] = None,\n head_activation: str = \"relu\",\n head_dropout: float = 0.1,\n head_batchnorm: bool = False,\n head_batchnorm_last: bool = False,\n head_linear_first: bool = False,\n pred_dim: int = 1,\n ):\n super(WideDeep, self).__init__()\n\n self._check_model_components(\n wide,\n deeptabular,\n deeptext,\n deepimage,\n deephead,\n head_hidden_dims,\n pred_dim,\n )\n\n # required as attribute just in case we pass a deephead\n self.pred_dim = pred_dim\n\n # The main 5 components of the wide and deep assemble\n self.wide = wide\n self.deeptabular = deeptabular\n self.deeptext = deeptext\n self.deepimage = deepimage\n self.deephead = deephead\n\n if self.deeptabular is not None:\n self.is_tabnet = deeptabular.__class__.__name__ == \"TabNet\"\n else:\n self.is_tabnet = False\n\n if self.deephead is None:\n if head_hidden_dims is not None:\n self._build_deephead(\n head_hidden_dims,\n head_activation,\n head_dropout,\n head_batchnorm,\n head_batchnorm_last,\n head_linear_first,\n )\n else:\n self._add_pred_layer()\n\n def forward(self, X: Dict[str, Tensor]):\n wide_out = self._forward_wide(X)\n if self.deephead:\n return self._forward_deephead(X, wide_out)\n else:\n return self._forward_deep(X, wide_out)\n\n def _build_deephead(\n self,\n head_hidden_dims,\n head_activation,\n head_dropout,\n head_batchnorm,\n head_batchnorm_last,\n head_linear_first,\n ):\n deep_dim = 0\n if self.deeptabular is not None:\n deep_dim += self.deeptabular.output_dim\n if self.deeptext is not None:\n deep_dim += self.deeptext.output_dim\n if self.deepimage is not None:\n deep_dim += self.deepimage.output_dim\n\n head_hidden_dims = [deep_dim] + head_hidden_dims\n self.deephead = MLP(\n head_hidden_dims,\n head_activation,\n head_dropout,\n head_batchnorm,\n head_batchnorm_last,\n head_linear_first,\n )\n\n self.deephead.add_module(\n \"head_out\", nn.Linear(head_hidden_dims[-1], self.pred_dim)\n )\n\n def _add_pred_layer(self):\n if self.deeptabular is not None:\n if self.is_tabnet:\n self.deeptabular = nn.Sequential(\n self.deeptabular,\n TabNetPredLayer(self.deeptabular.output_dim, self.pred_dim),\n )\n else:\n self.deeptabular = nn.Sequential(\n self.deeptabular,\n nn.Linear(self.deeptabular.output_dim, self.pred_dim),\n )\n if self.deeptext is not None:\n self.deeptext = nn.Sequential(\n self.deeptext, nn.Linear(self.deeptext.output_dim, self.pred_dim)\n )\n if self.deepimage is not None:\n self.deepimage = nn.Sequential(\n self.deepimage, nn.Linear(self.deepimage.output_dim, self.pred_dim)\n )\n\n def _forward_wide(self, X):\n if self.wide is not None:\n out = self.wide(X[\"wide\"])\n else:\n batch_size = X[list(X.keys())[0]].size(0)\n out = torch.zeros(batch_size, self.pred_dim).to(device)\n\n return out\n\n def _forward_deephead(self, X, wide_out):\n if self.deeptabular is not None:\n if self.is_tabnet:\n tab_out = self.deeptabular(X[\"deeptabular\"])\n deepside, M_loss = tab_out[0], tab_out[1]\n else:\n deepside = self.deeptabular(X[\"deeptabular\"])\n else:\n deepside = torch.FloatTensor().to(device)\n if self.deeptext is not None:\n deepside = torch.cat([deepside, self.deeptext(X[\"deeptext\"])], axis=1)\n if self.deepimage is not None:\n deepside = torch.cat([deepside, self.deepimage(X[\"deepimage\"])], axis=1)\n\n deephead_out = self.deephead(deepside)\n deepside_out = nn.Linear(deephead_out.size(1), self.pred_dim).to(device)\n\n if self.is_tabnet:\n res = (wide_out.add_(deepside_out(deephead_out)), M_loss)\n else:\n res = wide_out.add_(deepside_out(deephead_out))\n\n return res\n\n def _forward_deep(self, X, wide_out):\n if self.deeptabular is not None:\n if self.is_tabnet:\n tab_out, M_loss = self.deeptabular(X[\"deeptabular\"])\n wide_out.add_(tab_out)\n else:\n wide_out.add_(self.deeptabular(X[\"deeptabular\"]))\n if self.deeptext is not None:\n wide_out.add_(self.deeptext(X[\"deeptext\"]))\n if self.deepimage is not None:\n wide_out.add_(self.deepimage(X[\"deepimage\"]))\n\n if self.is_tabnet:\n res = (wide_out, M_loss)\n else:\n res = wide_out\n\n return res\n\n @staticmethod # noqa: C901\n def _check_model_components(\n wide,\n deeptabular,\n deeptext,\n deepimage,\n deephead,\n head_hidden_dims,\n pred_dim,\n ):\n\n if wide is not None:\n assert wide.wide_linear.weight.size(1) == pred_dim, (\n \"the 'pred_dim' of the wide component ({}) must be equal to the 'pred_dim' \"\n \"of the deep component and the overall model itself ({})\".format(\n wide.wide_linear.weight.size(1), pred_dim\n )\n )\n if deeptabular is not None and not hasattr(deeptabular, \"output_dim\"):\n raise AttributeError(\n \"deeptabular model must have an 'output_dim' attribute. \"\n \"See pytorch-widedeep.models.deep_text.DeepText\"\n )\n if deeptabular is not None:\n is_tabnet = deeptabular.__class__.__name__ == \"TabNet\"\n has_wide_text_or_image = (\n wide is not None or deeptext is not None or deepimage is not None\n )\n if is_tabnet and has_wide_text_or_image:\n warnings.warn(\n \"'WideDeep' is a model comprised by multiple components and the 'deeptabular'\"\n \" component is 'TabNet'. We recommend using 'TabNet' in isolation.\"\n \" The reasons are: i)'TabNet' uses sparse regularization which partially losses\"\n \" its purpose when used in combination with other components.\"\n \" If you still want to use a multiple component model with 'TabNet',\"\n \" consider setting 'lambda_sparse' to 0 during training. ii) The feature\"\n \" importances will be computed only for TabNet but the model will comprise multiple\"\n \" components. Therefore, such importances will partially lose their 'meaning'.\",\n UserWarning,\n )\n if deeptext is not None and not hasattr(deeptext, \"output_dim\"):\n raise AttributeError(\n \"deeptext model must have an 'output_dim' attribute. \"\n \"See pytorch-widedeep.models.deep_text.DeepText\"\n )\n if deepimage is not None and not hasattr(deepimage, \"output_dim\"):\n raise AttributeError(\n \"deepimage model must have an 'output_dim' attribute. \"\n \"See pytorch-widedeep.models.deep_text.DeepText\"\n )\n if deephead is not None and head_hidden_dims is not None:\n raise ValueError(\n \"both 'deephead' and 'head_hidden_dims' are not None. Use one of the other, but not both\"\n )\n if (\n head_hidden_dims is not None\n and not deeptabular\n and not deeptext\n and not deepimage\n ):\n raise ValueError(\n \"if 'head_hidden_dims' is not None, at least one deep component must be used\"\n )\n if deephead is not None:\n deephead_inp_feat = next(deephead.parameters()).size(1)\n output_dim = 0\n if deeptabular is not None:\n output_dim += deeptabular.output_dim\n if deeptext is not None:\n output_dim += deeptext.output_dim\n if deepimage is not None:\n output_dim += deepimage.output_dim\n assert deephead_inp_feat == output_dim, (\n \"if a custom 'deephead' is used its input features ({}) must be equal to \"\n \"the output features of the deep component ({})\".format(\n deephead_inp_feat, output_dim\n )\n )\n"
] |
[
[
"torch.zeros",
"torch.nn.Linear",
"torch.FloatTensor",
"torch.cuda.is_available",
"torch.device"
]
] |
datduong/deepgoplus
|
[
"4e39e9785f196bc2be8fb37dc81a8481b63b1532"
] |
[
"evaluate_naive.py"
] |
[
"#!/usr/bin/env python\n\nimport numpy as np\nimport pandas as pd\nimport click as ck\n# from tensorflow.keras.models import Sequential, Model, load_model\n# from tensorflow.keras.layers import (\n# Dense, Dropout, Activation, Input, Reshape,\n# Flatten, BatchNormalization, Embedding,\n# Conv1D, MaxPooling1D, Add, Concatenate)\n# from tensorflow.keras.optimizers import Adam, RMSprop, Adadelta, SGD\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics.pairwise import cosine_similarity\n# from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint\nimport sys\nfrom collections import deque, Counter\nimport time\nimport logging\n# import tensorflow as tf\nfrom sklearn.metrics import roc_curve, auc, matthews_corrcoef\nfrom scipy.spatial import distance\nfrom scipy import sparse\nimport math\nfrom utils import FUNC_DICT, Ontology, NAMESPACES\nfrom matplotlib import pyplot as plt\n\nlogging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)\n\n\[email protected]()\[email protected](\n '--train-data-file', '-trdf', default='data-cafa3/train_data.pkl',\n help='Data file with training features')\[email protected](\n '--test-data-file', '-tsdf', default='data-cafa3/test_data.pkl',\n help='Test data')\[email protected](\n '--ont', '-o', default='mf',\n help='GO subontology (bp, mf, cc)')\n\ndef main(train_data_file, test_data_file, ont):\n\n go_rels = Ontology('data/go.obo', with_rels=True)\n # terms_df = pd.read_pickle('data-deepgo/' + ont + '.pkl')\n # terms = terms_df['functions'].values.flatten()\n # terms_dict = {v: i for i, v in enumerate(terms)}\n\n train_df = pd.read_pickle(train_data_file)\n annotations = train_df['annotations'].values\n annotations = list(map(lambda x: set(x), annotations))\n\n test_df = pd.read_pickle(test_data_file)\n test_annotations = test_df['annotations'].values\n test_annotations = list(map(lambda x: set(x), test_annotations))\n\n go_rels.calculate_ic(annotations + test_annotations)\n\n go_set = go_rels.get_namespace_terms(NAMESPACES[ont])\n go_set.remove(FUNC_DICT[ont])\n\n annotations = list(map(lambda x: set(filter(lambda y: y in go_set, x)), annotations))\n\n cnt = Counter()\n max_n = 0\n for x in annotations:\n cnt.update(x)\n print(cnt.most_common(10))\n max_n = cnt.most_common(1)[0][1]\n print(max_n)\n scores = {}\n for go_id, n in cnt.items():\n score = n / max_n\n scores[go_id] = score #! IC score?\n\n prot_index = {}\n for i, row in enumerate(train_df.itertuples()):\n prot_index[row.proteins] = i\n\n\n labels = test_annotations\n labels = list(map(lambda x: set(filter(lambda y: y in go_set, x)), labels))\n print(len(go_set))\n fmax = 0.0\n tmax = 0.0\n smin = 1000.0\n precisions = []\n recalls = []\n for threshold in np.arange(0.005,.5,.01): # \n # threshold = t / 100.0\n preds = []\n annots = set()\n for go_id, score in scores.items():\n if score >= threshold:\n annots.add(go_id)\n # new_annots = set()\n # for go_id in annots:\n # new_annots |= go_rels.get_anchestors(go_id)\n # new_annots = set(filter(lambda y: y in go_set, new_annots))\n for i, row in enumerate(test_df.itertuples()):\n preds.append(annots.copy())\n\n fscore, prec, rec, s = evaluate_annotations(go_rels, labels, preds)\n precisions.append(prec)\n recalls.append(rec)\n print(f'Fscore: {fscore}, S: {s}, threshold: {threshold}')\n if fmax < fscore:\n fmax = fscore\n tmax = threshold\n if smin > s:\n smin = s\n print(f'Fmax: {fmax:0.3f}, Smin: {smin:0.3f}, threshold: {tmax}')\n precisions = np.array(precisions)\n recalls = np.array(recalls)\n sorted_index = np.argsort(recalls)\n recalls = recalls[sorted_index]\n precisions = precisions[sorted_index]\n aupr = np.trapz(precisions, recalls)\n print(f'AUPR: {aupr:0.3f}')\n plt.figure()\n lw = 2\n plt.plot(recalls, precisions, color='darkorange',\n lw=lw, label=f'AUPR curve (area = {aupr:0.3f})')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('Recall')\n plt.ylabel('Precision')\n plt.title('Area Under the Precision-Recall curve')\n plt.legend(loc=\"lower right\")\n plt.savefig('aupr.pdf')\n plt.show()\n\n\ndef compute_roc(labels, preds):\n # Compute ROC curve and ROC area for each class\n fpr, tpr, _ = roc_curve(labels.flatten(), preds.flatten())\n roc_auc = auc(fpr, tpr)\n return roc_auc\n\ndef compute_mcc(labels, preds):\n # Compute ROC curve and ROC area for each class\n mcc = matthews_corrcoef(labels.flatten(), preds.flatten())\n return mcc\n\ndef evaluate_annotations(go, real_annots, pred_annots):\n total = 0\n p = 0.0\n r = 0.0\n p_total= 0\n ru = 0.0\n mi = 0.0\n for i in range(len(real_annots)):\n if len(real_annots[i]) == 0:\n continue\n tp = real_annots[i].intersection(pred_annots[i])\n fp = pred_annots[i] - tp\n fn = real_annots[i] - tp\n for go_id in fp:\n mi += go.get_ic(go_id)\n for go_id in fn:\n ru += go.get_ic(go_id)\n tpn = len(tp)\n fpn = len(fp)\n fnn = len(fn)\n total += 1\n recall = tpn / (1.0 * (tpn + fnn))\n r += recall\n if len(pred_annots[i]) > 0:\n p_total += 1\n precision = tpn / (1.0 * (tpn + fpn))\n p += precision\n ru /= total\n mi /= total\n r /= total\n if p_total > 0:\n p /= p_total\n f = 0.0\n if p + r > 0:\n f = 2 * p * r / (p + r)\n s = math.sqrt(ru * ru + mi * mi)\n print ('total protein count is {}, total with valid prediction {}'.format(total,p_total))\n return f, p, r, s\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"matplotlib.pyplot.legend",
"pandas.read_pickle",
"matplotlib.pyplot.title",
"sklearn.metrics.auc",
"numpy.arange",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"numpy.argsort",
"numpy.array",
"numpy.trapz",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
bm424/texpy
|
[
"8d78b568209a6da36fc831c6bc9e2b0cb4c740c8"
] |
[
"orix/io/plugins/orix_hdf5.py"
] |
[
"# -*- coding: utf-8 -*-\n# Copyright 2018-2022 the orix developers\n#\n# This file is part of orix.\n#\n# orix is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# orix is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with orix. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Reader and writer of a crystal map to and from orix' own HDF5 file\nformat.\n\"\"\"\n\n\nimport copy\nfrom warnings import warn\n\nfrom diffpy.structure import Atom, Lattice, Structure\nfrom h5py import File\nimport numpy as np\n\nfrom orix.crystal_map import CrystalMap, Phase, PhaseList\nfrom orix.io.plugins._h5ebsd import hdf5group2dict\nfrom orix.quaternion import Rotation\n\n__all__ = [\"file_reader\", \"file_writer\"]\n\n# Plugin description\nformat_name = \"orix_hdf5\"\nmanufacturer = \"orix\"\nfile_extensions = [\"h5\", \"hdf5\"]\nwrites = True\nwrites_this = CrystalMap\n# TODO: Extend reader/writer to Phase and PhaseList objects\n\n\ndef file_reader(filename, **kwargs):\n \"\"\"Return a :class:`~orix.crystal_map.crystal_map.CrystalMap` object\n from a file in orix' HDF5 file format.\n\n Parameters\n ----------\n filename : str\n Path and file name.\n kwargs\n Keyword arguments passed to :func:`h5py.File`.\n\n Returns\n -------\n CrystalMap\n \"\"\"\n mode = kwargs.pop(\"mode\", \"r\")\n with File(filename, mode=mode, **kwargs) as f:\n file_dict = hdf5group2dict(f[\"/\"], recursive=True)\n return dict2crystalmap(file_dict[\"crystal_map\"])\n\n\ndef dict2crystalmap(dictionary):\n \"\"\"Get a crystal map from necessary items in a dictionary.\n\n Parameters\n ----------\n dictionary : dict\n Dictionary with crystal map information.\n\n Returns\n -------\n CrystalMap\n \"\"\"\n dictionary = copy.deepcopy(dictionary)\n\n data = dictionary[\"data\"]\n header = dictionary[\"header\"]\n\n # New dictionary with CrystalMap initialization arguments as keys\n crystal_map_dict = {\n # Use dstack and squeeze to allow more rotations per data point\n \"rotations\": Rotation.from_euler(\n np.dstack((data.pop(\"phi1\"), data.pop(\"Phi\"), data.pop(\"phi2\"))).squeeze(),\n ),\n \"scan_unit\": header[\"scan_unit\"],\n \"phase_list\": dict2phaselist(header[\"phases\"]),\n \"phase_id\": data.pop(\"phase_id\"),\n \"is_in_data\": data.pop(\"is_in_data\"),\n }\n # Add standard items by updating the new dictionary\n for direction in [\"z\", \"y\", \"x\"]:\n this_direction = data.pop(direction)\n if hasattr(this_direction, \"__iter__\") is False:\n this_direction = None\n crystal_map_dict[direction] = this_direction\n _ = [data.pop(i) for i in [\"id\"]]\n # What's left should be properties like quality metrics etc.\n crystal_map_dict.update({\"prop\": data})\n\n return CrystalMap(**crystal_map_dict)\n\n\ndef dict2phaselist(dictionary):\n \"\"\"Get a :class:`~orix.crystal_map.phase_list.PhaseList` object from a\n dictionary.\n\n Parameters\n ----------\n dictionary : dict\n Dictionary with phase list information.\n\n Returns\n -------\n PhaseList\n \"\"\"\n dictionary = copy.deepcopy(dictionary)\n return PhaseList(phases={int(k): dict2phase(v) for k, v in dictionary.items()})\n\n\ndef dict2phase(dictionary):\n \"\"\"Get a :class:`~orix.crystal_map.phase_list.Phase` object from a\n dictionary.\n\n Parameters\n ----------\n dictionary : dict\n Dictionary with phase information.\n\n Returns\n -------\n Phase\n \"\"\"\n dictionary = copy.deepcopy(dictionary)\n structure = dict2structure(dictionary[\"structure\"])\n structure.title = dictionary[\"name\"]\n # TODO: Remove this check in v0.6.0, since space_group was introduced in v0.4.0\n try:\n space_group = dictionary[\"space_group\"] # Either \"None\" or int\n except KeyError: # v0.3.0\n space_group = \"None\"\n if space_group == \"None\":\n space_group = None\n else:\n space_group = int(space_group)\n # TODO: Remove this check in v0.6.0, since name change was introduced in v0.4.0\n try:\n point_group = dictionary[\"point_group\"]\n except KeyError: # v0.3.0\n point_group = dictionary[\"symmetry\"]\n if point_group == \"None\":\n point_group = None\n return Phase(\n name=dictionary[\"name\"],\n space_group=space_group,\n point_group=point_group,\n structure=structure,\n color=dictionary[\"color\"],\n )\n\n\ndef dict2structure(dictionary):\n \"\"\"Get a :class:`diffpy.structure.Structure` object from a dictionary.\n\n Parameters\n ----------\n dictionary : dict\n Dictionary with structure information.\n\n Returns\n -------\n Structure\n \"\"\"\n dictionary = copy.deepcopy(dictionary)\n return Structure(\n lattice=dict2lattice(dictionary[\"lattice\"]),\n atoms=[dict2atom(atom) for atom in dictionary[\"atoms\"].values()],\n )\n\n\ndef dict2lattice(dictionary):\n \"\"\"Get a :class:`diffpy.structure.Lattice` object from a dictionary.\n\n Parameters\n ----------\n dictionary : dict\n Dictionary with lattice information.\n\n Returns\n -------\n Lattice\n \"\"\"\n dictionary = copy.deepcopy(dictionary)\n lattice_dict = {\n k: v\n for k, v in zip([\"a\", \"b\", \"c\", \"alpha\", \"beta\", \"gamma\"], dictionary[\"abcABG\"])\n }\n lattice_dict[\"baserot\"] = dictionary[\"baserot\"]\n return Lattice(**lattice_dict)\n\n\ndef dict2atom(dictionary):\n \"\"\"Get a :class:`diffpy.structure.Atom.atom` object from a dictionary.\n\n Parameters\n ----------\n dictionary : dict\n Dictionary with atom information.\n\n Returns\n -------\n Atom\n \"\"\"\n dictionary = copy.deepcopy(dictionary)\n atom_dict = {\"atype\": dictionary.pop(\"element\")}\n atom_dict.update(dictionary)\n return Atom(**atom_dict)\n\n\ndef file_writer(filename, crystal_map, **kwargs):\n \"\"\"Write a :class:`~orix.crystal_map.crystal_map.CrystalMap` object to\n an HDF5 file.\n\n Parameters\n ----------\n filename : str\n Name of file to write to.\n crystal_map : CrystalMap\n Object to write to file.\n kwargs\n Keyword arguments passed to :meth:`h5py:Group.require_dataset`.\n \"\"\"\n # Open file in correct mode\n try:\n f = File(filename, mode=\"w\")\n except OSError:\n raise OSError(f\"Cannot write to the already open file '{filename}'.\")\n\n from orix import __version__\n\n file_dict = {\n \"manufacturer\": \"orix\",\n \"version\": __version__,\n \"crystal_map\": crystalmap2dict(crystal_map),\n }\n dict2hdf5group(file_dict, f[\"/\"], **kwargs)\n\n f.close()\n\n\ndef crystalmap2dict(crystal_map, dictionary=None):\n \"\"\"Get a dictionary from a\n :class:`~orix.crystal_map.crystal_map.CrystalMap` object with `data`\n and `header` keys with values.\n\n Parameters\n ----------\n crystal_map : CrystalMap\n Crystal map.\n dictionary : dict, optional\n Dictionary to update with crystal map information. If None\n (default), a new dictionary is created.\n\n Returns\n -------\n dictionary : dict\n Dictionary with crystal map information.\n \"\"\"\n if dictionary is None:\n dictionary = {}\n\n # Get data cube coordinates in step size\n z, y, x = [\n 0 if i is None else i for i in [crystal_map._z, crystal_map._y, crystal_map._x]\n ]\n # Get euler angles phi1, Phi, phi2\n eulers = crystal_map._rotations.to_euler()\n dictionary.update(\n {\n \"data\": {\n \"z\": z,\n \"y\": y,\n \"x\": x,\n \"phi1\": eulers[..., 0],\n \"Phi\": eulers[..., 1],\n \"phi2\": eulers[..., 2],\n \"phase_id\": crystal_map._phase_id,\n \"id\": crystal_map._id,\n \"is_in_data\": crystal_map.is_in_data,\n },\n \"header\": {\n \"grid_type\": \"square\",\n \"nz\": z.size if isinstance(z, np.ndarray) else 1,\n \"ny\": y.size if isinstance(y, np.ndarray) else 1,\n \"nx\": x.size if isinstance(x, np.ndarray) else 1,\n \"z_step\": crystal_map.dz,\n \"y_step\": crystal_map.dy,\n \"x_step\": crystal_map.dx,\n \"rotations_per_point\": crystal_map.rotations_per_point,\n \"scan_unit\": crystal_map.scan_unit,\n },\n }\n )\n dictionary[\"data\"].update(crystal_map.prop)\n dictionary[\"header\"].update({\"phases\": phaselist2dict(crystal_map.phases)})\n\n return dictionary\n\n\ndef dict2hdf5group(dictionary, group, **kwargs):\n \"\"\"Write a dictionary to datasets in a new group in an opened HDF5\n file.\n\n Parameters\n ----------\n dictionary : dict\n Dataset names as keys with datasets as values.\n group : h5py:Group\n HDF5 group to write dictionary to.\n kwargs\n Keyword arguments passed to :meth:`h5py:Group.require_dataset`.\n \"\"\"\n for key, val in dictionary.items():\n ddtype = type(val)\n dshape = (1,)\n if isinstance(val, dict):\n dict2hdf5group(val, group.create_group(key), **kwargs)\n continue # Jump to next item in dictionary\n elif isinstance(val, str):\n ddtype = \"S\" + str(len(val) + 1)\n val = val.encode()\n elif ddtype == np.dtype(\"O\"):\n try:\n if isinstance(val, np.ndarray):\n ddtype = val.dtype\n else:\n ddtype = val[0].dtype\n dshape = np.shape(val)\n except TypeError:\n warn(\n \"The orix HDF5 writer could not write the following information to \"\n f\"the file '{key} : {val}'.\"\n )\n break\n group.create_dataset(key, shape=dshape, dtype=ddtype, **kwargs)\n group[key][()] = val\n\n\ndef phaselist2dict(phases, dictionary=None):\n \"\"\"Get a dictionary of phases.\n\n Parameters\n ----------\n phases : PhaseList\n Phases to write to file.\n dictionary : dict, optional\n Dictionary to update with information from multiple phases. If\n None (default), a new dictionary is created.\n\n Returns\n -------\n dictionary : dict\n Dictionary with information from multiple phases.\n \"\"\"\n if dictionary is None:\n dictionary = {}\n dictionary.update({str(i): phase2dict(p) for i, p in phases})\n return dictionary\n\n\ndef phase2dict(phase, dictionary=None):\n \"\"\"Get a dictionary of a phase.\n\n Parameters\n __________\n phase : Phase\n Phase to write to file.\n dictionary : dict, optional\n Dictionary to update with information from a single phase. If None\n (default), a new dictionary is created.\n\n Returns\n -------\n dictionary : dict\n Dictionary with information from a single phase.\n \"\"\"\n if dictionary is None:\n dictionary = {}\n\n dictionary[\"name\"] = phase.name\n if hasattr(phase.space_group, \"number\"):\n space_group = phase.space_group.number\n else:\n space_group = \"None\"\n if hasattr(phase.point_group, \"name\"):\n point_group = phase.point_group.name\n else:\n point_group = \"None\"\n dictionary[\"space_group\"] = space_group\n dictionary[\"point_group\"] = point_group\n dictionary[\"color\"] = phase.color\n dictionary[\"structure\"] = structure2dict(phase.structure)\n\n return dictionary\n\n\ndef structure2dict(structure, dictionary=None):\n \"\"\"Get a dictionary of a phase's\n :class:`diffpy.structure.Structure` content.\n\n Only values necessary to initialize a structure object are returned.\n\n Parameters\n ----------\n structure : diffpy.structure.Structure\n Phase structure with a lattice and atoms.\n dictionary : dict, optional\n Dictionary to update with structure information. If None\n (default), a new dictionary is created.\n\n Returns\n -------\n dictionary : dict\n Dictionary with structure information.\n \"\"\"\n if dictionary is None:\n dictionary = {}\n dictionary[\"lattice\"] = lattice2dict(structure.lattice)\n atoms = structure.tolist()\n dictionary[\"atoms\"] = {str(i): atom2dict(atom) for i, atom in enumerate(atoms)}\n return dictionary\n\n\ndef lattice2dict(lattice, dictionary=None):\n \"\"\"Get a dictionary of a structure's\n :class:`diffpy.structure.Structure.lattice` content.\n\n Only values necessary to initialize a lattice object are returned.\n\n Parameters\n ----------\n lattice : diffpy.structure.Structure.lattice\n Structure lattice.\n dictionary : dict, optional\n Dictionary to update with structure lattice information. If None\n (default), a new dictionary is created.\n\n Returns\n -------\n dictionary : dict\n Dictionary with structure lattice information.\n \"\"\"\n if dictionary is None:\n dictionary = {}\n dictionary[\"abcABG\"] = np.array(lattice.abcABG())\n dictionary[\"baserot\"] = lattice.baserot\n return dictionary\n\n\ndef atom2dict(atom, dictionary=None):\n \"\"\"Get a dictionary of one of a structure's\n :class:`diffpy.structure.Structure.atoms` content.\n\n Only values necessary to initialize an atom object are returned.\n\n Parameters\n ----------\n atom : diffpy.structure.Structure.atom\n Atom in a structure.\n dictionary : dict, optional\n Dictionary to update with structure atom information. If None\n (default), a new dictionary is created.\n\n Returns\n -------\n dictionary : dict\n Dictionary with structure atoms information.\n \"\"\"\n if dictionary is None:\n dictionary = {}\n dictionary.update(\n {\n attribute: atom.__getattribute__(attribute)\n for attribute in [\"element\", \"label\", \"occupancy\", \"xyz\", \"U\"]\n }\n )\n return dictionary\n"
] |
[
[
"numpy.shape",
"numpy.dtype"
]
] |
schrma/oloid
|
[
"49fc62cd83e9839620292139cc55e5ec848b386c"
] |
[
"src/oloid/circle.py"
] |
[
"import numpy as np\nfrom matplotlib.lines import Line2D\nfrom mpl_toolkits.mplot3d import proj3d\nfrom mpl_toolkits.mplot3d.art3d import Line3D\n\nimport geometric_functions\n\nclass circle(Line3D):\n def __init__(self,center_point, normal_vector, radius=1,nr_of_points=100):\n self.center_point = np.array(center_point)\n self.normal_vector = np.array(normal_vector)\n self.radius = radius\n self.nr_of_points = nr_of_points\n Line3D.__init__(self, [], [], [], label='parametric curve')\n\n\n def draw(self, renderer):\n x,y,z = self.calculate()\n xs3d, ys3d, zs3d = x,y,z\n xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)\n self.set_data_3d(x, y,z)\n Line3D.draw(self, renderer)\n\n def get_x(self):\n x,y,z = self.calculate()\n return x\n\n def get_y(self):\n x,y,z = self.calculate()\n return y\n\n def get_z(self):\n x,y,z = self.calculate()\n return z\n\n def calculate(self):\n theta = np.linspace(0, 2 * np.pi, self.nr_of_points)\n center_vector = self.center_point\n\n u_vector = center_vector + geometric_functions.get_orthogonal_vector(self.normal_vector)\n u_vector_norm = u_vector / np.linalg.norm(u_vector)\n\n v_vector = center_vector + np.cross(u_vector_norm, self.normal_vector)\n v_vector_norm = v_vector / np.linalg.norm(v_vector)\n\n x = self.center_point[0] + v_vector_norm[0] * self.radius * np.cos(theta) + u_vector_norm[0] * self.radius * np.sin(theta)\n y = self.center_point[1] + v_vector_norm[1] * self.radius * np.cos(theta) + u_vector_norm[1] * self.radius * np.sin(theta)\n z = self.center_point[2] + v_vector_norm[2] * self.radius * np.cos(theta) + u_vector_norm[2] * self.radius * np.sin(theta)\n return x,y,z\n"
] |
[
[
"numpy.linspace",
"numpy.linalg.norm",
"numpy.cos",
"numpy.sin",
"numpy.cross",
"numpy.array"
]
] |
Morgan-Gan/Slowfast-fb
|
[
"f7387d5f30d609f6d0e022a81c51729b647dae97"
] |
[
"tools/test_net.py"
] |
[
"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n\n\"\"\"Multi-view test a video classification model.\"\"\"\n\nimport numpy as np\nimport os\nimport pickle\nimport torch\nfrom iopath.common.file_io import g_pathmgr\n\nimport slowfast.utils.checkpoint as cu\nimport slowfast.utils.distributed as du\nimport slowfast.utils.loggings as logging\nimport slowfast.utils.misc as misc\nimport slowfast.visualization.tensorboard_vis as tb\nfrom slowfast.datasets import loader\nfrom slowfast.models import build_model\nfrom slowfast.utils.meters import AVAMeter, TestMeter\n\nlogger = logging.get_logger(__name__)\n\n\[email protected]_grad()\ndef perform_test(test_loader, model, test_meter, cfg, writer=None):\n \"\"\"\n For classification:\n Perform mutli-view testing that uniformly samples N clips from a video along\n its temporal axis. For each clip, it takes 3 crops to cover the spatial\n dimension, followed by averaging the softmax scores across all Nx3 views to\n form a video-level prediction. All video predictions are compared to\n ground-truth labels and the final testing performance is logged.\n For detection:\n Perform fully-convolutional testing on the full frames without crop.\n Args:\n test_loader (loader): video testing loader.\n model (model): the pretrained video model to test.\n test_meter (TestMeter): testing meters to log and ensemble the testing\n results.\n cfg (CfgNode): configs. Details can be found in\n slowfast/config/defaults.py\n writer (TensorboardWriter object, optional): TensorboardWriter object\n to writer Tensorboard log.\n \"\"\"\n # Enable eval mode.\n model.eval()\n test_meter.iter_tic()\n\n for cur_iter, (inputs, labels, video_idx, meta) in enumerate(test_loader):\n if cfg.NUM_GPUS:\n # Transfer the data to the current GPU device.\n if isinstance(inputs, (list,)):\n for i in range(len(inputs)):\n inputs[i] = inputs[i].cuda(non_blocking=True)\n else:\n inputs = inputs.cuda(non_blocking=True)\n\n # Transfer the data to the current GPU device.\n labels = labels.cuda()\n video_idx = video_idx.cuda()\n for key, val in meta.items():\n if isinstance(val, (list,)):\n for i in range(len(val)):\n val[i] = val[i].cuda(non_blocking=True)\n else:\n meta[key] = val.cuda(non_blocking=True)\n test_meter.data_toc()\n\n if cfg.DETECTION.ENABLE:\n # Compute the predictions.\n preds = model(inputs, meta[\"boxes\"])\n ori_boxes = meta[\"ori_boxes\"]\n metadata = meta[\"metadata\"]\n\n preds = preds.detach().cpu() if cfg.NUM_GPUS else preds.detach()\n ori_boxes = (\n ori_boxes.detach().cpu() if cfg.NUM_GPUS else ori_boxes.detach()\n )\n metadata = (\n metadata.detach().cpu() if cfg.NUM_GPUS else metadata.detach()\n )\n\n if cfg.NUM_GPUS > 1:\n preds = torch.cat(du.all_gather_unaligned(preds), dim=0)\n ori_boxes = torch.cat(du.all_gather_unaligned(ori_boxes), dim=0)\n metadata = torch.cat(du.all_gather_unaligned(metadata), dim=0)\n\n test_meter.iter_toc()\n # Update and log stats.\n test_meter.update_stats(preds, ori_boxes, metadata)\n test_meter.log_iter_stats(None, cur_iter)\n else:\n # Perform the forward pass.\n preds = model(inputs)\n\n # Gather all the predictions across all the devices to perform ensemble.\n if cfg.NUM_GPUS > 1:\n preds, labels, video_idx = du.all_gather(\n [preds, labels, video_idx]\n )\n if cfg.NUM_GPUS:\n preds = preds.cpu()\n labels = labels.cpu()\n video_idx = video_idx.cpu()\n\n test_meter.iter_toc()\n # Update and log stats.\n test_meter.update_stats(\n preds.detach(), labels.detach(), video_idx.detach()\n )\n test_meter.log_iter_stats(cur_iter)\n\n test_meter.iter_tic()\n\n # Log epoch stats and print the final testing results.\n if not cfg.DETECTION.ENABLE:\n all_preds = test_meter.video_preds.clone().detach()\n all_labels = test_meter.video_labels\n if cfg.NUM_GPUS:\n all_preds = all_preds.cpu()\n all_labels = all_labels.cpu()\n if writer is not None:\n writer.plot_eval(preds=all_preds, labels=all_labels)\n\n if cfg.TEST.SAVE_RESULTS_PATH != \"\":\n save_path = os.path.join(cfg.OUTPUT_DIR, cfg.TEST.SAVE_RESULTS_PATH)\n\n if du.is_root_proc():\n with g_pathmgr.open(save_path, \"wb\") as f:\n pickle.dump([all_preds, all_labels], f)\n\n logger.info(\n \"Successfully saved prediction results to {}\".format(save_path)\n )\n\n test_meter.finalize_metrics()\n return test_meter\n\n\ndef test(cfg):\n \"\"\"\n Perform multi-view testing on the pretrained video model.\n Args:\n cfg (CfgNode): configs. Details can be found in\n slowfast/config/defaults.py\n \"\"\"\n # Set up environment.\n du.init_distributed_training(cfg)\n # Set random seed from configs.\n np.random.seed(cfg.RNG_SEED)\n torch.manual_seed(cfg.RNG_SEED)\n\n # Setup logging format.\n logging.setup_logging(cfg.OUTPUT_DIR)\n\n # Print config.\n logger.info(\"Test with config:\")\n logger.info(cfg)\n\n # Build the video model and print model statistics.\n model = build_model(cfg)\n if du.is_master_proc() and cfg.LOG_MODEL_INFO:\n misc.log_model_info(model, cfg, use_train_input=False)\n\n cu.load_test_checkpoint(cfg, model)\n\n # Create video testing loaders.\n test_loader = loader.construct_loader(cfg, \"test\")\n logger.info(\"Testing model for {} iterations\".format(len(test_loader)))\n\n if cfg.DETECTION.ENABLE:\n assert cfg.NUM_GPUS == cfg.TEST.BATCH_SIZE or cfg.NUM_GPUS == 0\n test_meter = AVAMeter(len(test_loader), cfg, mode=\"test\")\n else:\n assert (\n test_loader.dataset.num_videos\n % (cfg.TEST.NUM_ENSEMBLE_VIEWS * cfg.TEST.NUM_SPATIAL_CROPS)\n == 0\n )\n # Create meters for multi-view testing.\n test_meter = TestMeter(\n test_loader.dataset.num_videos\n // (cfg.TEST.NUM_ENSEMBLE_VIEWS * cfg.TEST.NUM_SPATIAL_CROPS),\n cfg.TEST.NUM_ENSEMBLE_VIEWS * cfg.TEST.NUM_SPATIAL_CROPS,\n cfg.MODEL.NUM_CLASSES,\n len(test_loader),\n cfg.DATA.MULTI_LABEL,\n cfg.DATA.ENSEMBLE_METHOD,\n )\n\n # Set up writer for logging to Tensorboard format.\n if cfg.TENSORBOARD.ENABLE and du.is_master_proc(\n cfg.NUM_GPUS * cfg.NUM_SHARDS\n ):\n writer = tb.TensorboardWriter(cfg)\n else:\n writer = None\n\n # # Perform multi-view test on the entire dataset.\n test_meter = perform_test(test_loader, model, test_meter, cfg, writer)\n if writer is not None:\n writer.close()\n"
] |
[
[
"torch.manual_seed",
"torch.no_grad",
"numpy.random.seed"
]
] |
jtschindler/sculptor
|
[
"67b7ebfb05ee8ec9d00399c6d80c238d2104eebc"
] |
[
"sculptor/specfitgui.py"
] |
[
"\nimport numpy as np\n\nfrom PyQt5 import QtWidgets, QtCore\n\nfrom PyQt5.QtWidgets import QMainWindow, QWidget, QPushButton, QAction, \\\n QLabel, QTabWidget, QVBoxLayout, QHBoxLayout, QFileDialog, QLineEdit, \\\n QComboBox, QCheckBox, QGroupBox, QScrollArea\n\nfrom matplotlib.backends.backend_qt5agg import NavigationToolbar2QT \\\n as NavigationToolbar\n\nfrom sculptor.specfit import SpecFit\nfrom sculptor.specmodel import fitting_methods\nfrom sculptor.masksmodels import mask_presets\nfrom sculptor.specmodelwidget import SpecModelWidget\nfrom sculptor.specfitcanvas import SpecFitCanvas\nfrom sculptor.menu_dialogs import ResampleWindow, EmceeWindow, NormalizeWindow\nfrom sculptor.menu_dialogs import ResampleWindow, EmceeWindow, NormalizeWindow\n\ndef update_float_variable_from_linedit(lineEdit, variable,\n expression='{:.2f}'):\n\n try:\n new_value = float(lineEdit.text())\n lineEdit.setText(expression.format(new_value))\n return new_value\n except:\n print('[INFO] Input value is not convertable to float.')\n return variable\n\n\n\nclass SpecFitGui(QMainWindow):\n \"\"\"\n\n \"\"\"\n\n def __init__(self, spectrum=None, redshift=0):\n\n QMainWindow.__init__(self)\n\n self.resize(1150, 824)\n self.setWindowTitle(\"Sculptor - Interactive Modelling of astronomic electromagnetic spectra\")\n\n # Add the SpecFit class to the GUI\n self.specfit = SpecFit(spectrum=spectrum, redshift=redshift)\n\n # Initialize class variables\n self.x_pos_a = 0\n self.x_pos_b = 0\n self.y_pos_a = 0\n self.y_pos_b = 0\n\n # Random seed variable\n\n\n # Resampling variables\n self.nsamples = 100\n self.resample_seed = 1234\n self.save_result_plots = True\n self.resample_foldername = '.'\n\n # MCMC variables\n\n\n # Setup the main menu\n # Add exit action\n exitAction = QAction('&Exit', self)\n exitAction.setShortcut('Ctrl+Q')\n exitAction.setStatusTip('Exit application')\n exitAction.triggered.connect(self.close)\n # Add save action\n saveAction = QAction('&Save', self)\n saveAction.setShortcut('Ctrl+S')\n saveAction.setStatusTip('Save to SpecFit Folder')\n saveAction.triggered.connect(self.save)\n # Add load action\n loadAction = QAction('&Load', self)\n loadAction.setShortcut('Ctrl+L')\n loadAction.setStatusTip('Load from SpecFit Folder')\n loadAction.triggered.connect(self.load)\n\n # Spectrum actions\n # Add import spectrum action\n importIrafSpecAction = QAction('Import IRAF spectrum', self)\n # loadAction.setShortcut('Ctrl+L')\n importIrafSpecAction.setStatusTip('Import IRAF Spectrum. Overrides '\n 'current spectrum')\n importIrafSpecAction.triggered.connect(lambda: self.import_spectrum(\n mode='IRAF'))\n\n importPypeitSpecAction = QAction('Import PypeIt spectrum', self)\n # loadAction.setShortcut('Ctrl+L')\n importPypeitSpecAction.setStatusTip('Import PypeIt Spectrum. Overrides '\n 'current spectrum')\n importPypeitSpecAction.triggered.connect(lambda: self.import_spectrum(\n mode='PypeIt'))\n\n importSodSpecAction = QAction('Import SpecOneD spectrum', self)\n # loadAction.setShortcut('Ctrl+L')\n importSodSpecAction.setStatusTip('Import SpecOneD Spectrum. Overrides '\n 'current spectrum')\n importSodSpecAction.triggered.connect(lambda: self.import_spectrum(\n mode='SpecOneD'))\n\n importSDSSSpecAction = QAction('Import SDSS spectrum', self)\n # loadAction.setShortcut('Ctrl+L')\n importSDSSSpecAction.setStatusTip('Import SDSS Spectrum. Overrides '\n 'current spectrum')\n importSDSSSpecAction.triggered.connect(lambda: self.import_spectrum(\n mode='SDSS'))\n\n normalizeByErrorAction = QAction('Normalize by error', self)\n normalizeByErrorAction.setStatusTip('Normalize the flux density, '\n 'flux density, and object model '\n 'numerical values by the median '\n 'value of the flux density error.'\n ' The physical flux properties will'\n ' be unchanged.')\n normalizeByErrorAction.triggered.connect(\n lambda: self.normalize_spectrum_by_error())\n\n normalizeByFactorAction = QAction('Open normalization menu', self)\n normalizeByFactorAction.setStatusTip('Normalize the flux density, '\n 'flux density, and object model '\n 'numerical values by/to a '\n 'specified factor. The physical '\n 'flux properties will be '\n 'unchanged.')\n normalizeByFactorAction.triggered.connect(\n lambda: self.open_normalize_dialog())\n\n # SpecModel Actions\n addSpecModelAction = QAction('Add SpecModel', self)\n addSpecModelAction.setStatusTip('Add a SpecModel to the fit')\n addSpecModelAction.triggered.connect(self.add_specmodel)\n\n removeSpecModelAction = QAction('Remove current SpecModel', self)\n removeSpecModelAction.setStatusTip('Remove current SpecModel from fit')\n removeSpecModelAction.triggered.connect(self.remove_current_spec_model)\n\n resetSpecModelAction = QAction('Remove all SpecModels', self)\n resetSpecModelAction.setStatusTip('Remove all SpecModels from fit')\n resetSpecModelAction.triggered.connect(self.remove_all_models)\n\n # Fit Actions\n\n runResampleAction = QAction('Run resample and fit', self)\n runResampleAction.setStatusTip('Resample the spectrum, fit all models '\n 'and save the posterior parameter '\n 'distributions.')\n runResampleAction.triggered.connect(self.resample_and_fit)\n\n setResampleAction = QAction('Set resample and fit parameters', self)\n setResampleAction.setStatusTip('Set the resample and fit parameters')\n setResampleAction.triggered.connect(self.open_resample_dialog)\n\n setEmceeAction = QAction('Set MCMC parameters', self)\n setEmceeAction.setStatusTip('Set the MCMC fit parameters')\n setEmceeAction.triggered.connect(self.open_emcee_dialog)\n\n self.statusBar()\n\n mainMenu = self.menuBar()\n mainMenu.setNativeMenuBar(False)\n fileMenu = mainMenu.addMenu('&File')\n # fileMenu.addSeparator()\n fileMenu.addAction(loadAction)\n fileMenu.addAction(saveAction)\n fileMenu.addAction(exitAction)\n\n spectrumMenu = mainMenu.addMenu('&Spectrum')\n spectrumMenu.addAction(importIrafSpecAction)\n spectrumMenu.addAction(importPypeitSpecAction)\n spectrumMenu.addAction(importSodSpecAction)\n spectrumMenu.addAction(importSDSSSpecAction)\n spectrumMenu.addSeparator()\n spectrumMenu.addAction(normalizeByErrorAction)\n spectrumMenu.addAction(normalizeByFactorAction)\n\n specModelMenu = mainMenu.addMenu('&SpecModel')\n specModelMenu.addAction(addSpecModelAction)\n specModelMenu.addAction(removeSpecModelAction)\n specModelMenu.addAction(resetSpecModelAction)\n\n fitMenu = mainMenu.addMenu('&Fit')\n fitMenu.addAction(setEmceeAction)\n fitMenu.addAction(setResampleAction)\n fitMenu.addAction(runResampleAction)\n\n\n # Setup the central widget and and the main tab widget\n self.centralwidget = QWidget()\n self.mainVLayout = QVBoxLayout(self.centralwidget)\n self.mainHLayout = QHBoxLayout()\n\n\n self.setCentralWidget(self.centralwidget)\n self.tabWidget = QTabWidget(self.centralwidget)\n self.tabWidget.setTabShape(QTabWidget.Rounded)\n self.tabWidget.setUsesScrollButtons(False)\n self.tabWidget.setDocumentMode(False)\n self.tabWidget.currentChanged.connect(self.tabchanged)\n\n self.mainHLayout.addWidget(self.tabWidget)\n self.mainHLayout.setStretch(1, 2)\n\n self.mainVLayout.addLayout(self.mainHLayout)\n\n self.initialize_main_tab()\n\n # Initialize the Key Press Event for the SpecFitCanvas\n self.gcid = self.specFitCanvas.mpl_connect('key_press_event',\n self.on_press)\n\n self.show()\n\n\n\n def initialize_main_tab(self):\n\n # Initialize the fit tab\n self.fittab = QWidget()\n self.hLayoutFit = QHBoxLayout(self.fittab)\n\n\n self.vLayoutFitProperties = QVBoxLayout()\n\n\n # Add positional input\n self.boxRegionSelect = QGroupBox('Region Select')\n self.labelDispersionPos = QLabel('Dispersion region (Shift + a/d)')\n self.leXposA = QLineEdit('{:.2f}'.format(self.specfit.xlim[0]))\n self.leXposB = QLineEdit('{:.2f}'.format(self.specfit.xlim[1]))\n self.hLayoutXpos = QHBoxLayout()\n self.hLayoutXpos.addWidget(self.leXposA)\n self.hLayoutXpos.addWidget(self.leXposB)\n self.labelFluxPos = QLabel('Flux region (Shift + w/s)')\n self.leYposA = QLineEdit('{:.2e}'.format(self.specfit.ylim[0]))\n self.leYposB = QLineEdit('{:.2e}'.format(self.specfit.ylim[1]))\n self.hLayoutYpos = QHBoxLayout()\n self.hLayoutYpos.addWidget(self.leYposA)\n self.hLayoutYpos.addWidget(self.leYposB)\n\n pos_le = [self.leXposA, self.leXposB, self.leYposA, self.leYposB]\n for le in pos_le:\n le.returnPressed.connect(self.update_region_from_ui)\n\n self.buttonSetX = QPushButton('Set dispersion range (x)')\n self.buttonSetX.clicked.connect(self.set_plot_dispersion)\n self.buttonSetY = QPushButton('Set flux range (y)')\n self.buttonSetY.clicked.connect(self.set_plot_fluxden)\n self.buttonResetPlot = QPushButton('Reset plot (r)')\n self.buttonResetPlot.clicked.connect(self.reset_plot_region)\n\n self.hLayoutRegionButtons = QHBoxLayout()\n self.hLayoutRegionButtons.addWidget(self.buttonSetX)\n self.hLayoutRegionButtons.addWidget(self.buttonSetY)\n self.hLayoutRegionButtons.addWidget(self.buttonResetPlot)\n\n self.vLayoutBoxRegionSelect = QVBoxLayout(self.boxRegionSelect)\n self.vLayoutBoxRegionSelect.addWidget(self.labelDispersionPos)\n self.vLayoutBoxRegionSelect.addLayout(self.hLayoutXpos)\n self.vLayoutBoxRegionSelect.addWidget(self.labelFluxPos)\n self.vLayoutBoxRegionSelect.addLayout(self.hLayoutYpos)\n self.vLayoutBoxRegionSelect.addLayout(self.hLayoutRegionButtons)\n\n\n # Masking\n self.boxMaskSelect = QGroupBox('Masking')\n self.vLayoutBoxMaskSelect = QVBoxLayout(self.boxMaskSelect)\n\n self.boxMaskPreset = QComboBox()\n for mask_preset_key in mask_presets.keys():\n self.boxMaskPreset.addItem(mask_preset_key)\n\n self.buttonLoadPreset = QPushButton('Load mask preset')\n self.buttonLoadPreset.clicked.connect(lambda:\n self.load_mask_preset())\n\n self.hLayoutMaskPreset = QHBoxLayout()\n self.hLayoutMaskPreset.addWidget(self.boxMaskPreset)\n self.hLayoutMaskPreset.addWidget(self.buttonLoadPreset)\n\n self.buttonMask = QPushButton('Mask (m)')\n self.buttonMask.clicked.connect(lambda: self.update_mask(mode='mask'))\n self.buttonUnmask = QPushButton('Unmask (u)')\n self.buttonUnmask.clicked.connect(lambda: self.update_mask(mode='unmask'))\n self.hLayoutMaskAction = QHBoxLayout()\n self.buttonResetMask = QPushButton('Reset mask (Shift + r)')\n self.buttonResetMask.clicked.connect(lambda: self.reset_mask())\n\n self.hLayoutMaskAction.addWidget(self.buttonMask)\n self.hLayoutMaskAction.addWidget(self.buttonUnmask)\n self.hLayoutMaskAction.addWidget(self.buttonResetMask)\n\n self.vLayoutBoxMaskSelect.addLayout(self.hLayoutMaskAction)\n self.vLayoutBoxMaskSelect.addLayout(self.hLayoutMaskPreset)\n\n # Super parameters\n # Add global parameter\n self.boxSuperParam = QGroupBox('Super parameters')\n self.vLayoutSuperParam = QVBoxLayout(self.boxSuperParam)\n self.hLayoutAddSuperParam = QHBoxLayout()\n self.hLayoutDelSuperParam = QHBoxLayout()\n self.leSuperParamName = QLineEdit('super_param')\n self.leSuperParamName.setMaxLength(20)\n self.buttonAddSuperParam = QPushButton('Add super parameter')\n self.buttonAddSuperParam.clicked.connect(self.add_super_param)\n\n self.boxSelectSuperParam = QComboBox()\n self.buttonDelSuperParam = QPushButton('Remove super parameter')\n self.buttonDelSuperParam.clicked.connect(self.remove_super_param)\n\n self.hLayoutAddSuperParam.addWidget(self.leSuperParamName)\n self.hLayoutAddSuperParam.addWidget(self.buttonAddSuperParam)\n self.hLayoutDelSuperParam.addWidget(self.boxSelectSuperParam)\n self.hLayoutDelSuperParam.addWidget(self.buttonDelSuperParam)\n\n self.vLayoutSuperParam.addLayout(self.hLayoutAddSuperParam)\n self.vLayoutSuperParam.addLayout(self.hLayoutDelSuperParam)\n\n # Redshift parameter\n self.labelRedshift = QLabel('Redshift')\n self.leRedshift = QLineEdit()\n if self.specfit.redshift is not None:\n self.leRedshift.setText('{}'.format(self.specfit.redshift))\n else:\n self.leRedshift.setText('None')\n self.leRedshift.setMaxLength(20)\n self.leRedshift.returnPressed.connect(self.update_specfit_from_ui)\n\n self.hLayoutRedshift = QHBoxLayout()\n self.hLayoutRedshift.addWidget(self.labelRedshift)\n self.hLayoutRedshift.addWidget(self.leRedshift)\n\n # Fitting\n self.buttonFit = QPushButton('Fit all')\n self.buttonFit.clicked.connect(self.fit)\n self.buttonFitSaveResults = QPushButton('Fit all + Save results')\n self.buttonFitSaveResults.clicked.connect(lambda: self.fit(\n save_results=True))\n self.boxFittingMethod = QComboBox()\n for key in fitting_methods.keys():\n self.boxFittingMethod.addItem(key)\n self.boxFittingMethod.currentTextChanged.connect(\n self.update_fitting_method)\n\n # Build vLayoutFitProperties\n self.vLayoutFitProperties.addWidget(self.boxRegionSelect)\n self.vLayoutFitProperties.addWidget(self.boxMaskSelect)\n self.vLayoutFitProperties.addWidget(self.boxSuperParam)\n self.vLayoutFitProperties.addLayout(self.hLayoutRedshift)\n self.vLayoutFitProperties.addWidget(self.boxFittingMethod)\n self.vLayoutFitProperties.addWidget(self.buttonFit)\n self.vLayoutFitProperties.addWidget(self.buttonFitSaveResults)\n\n # Add SpecFitCanvas\n self.specFitCanvas = SpecFitCanvas(self.specfit)\n self.toolbar = NavigationToolbar(self.specFitCanvas, self)\n self.vLayoutCanvas = QVBoxLayout()\n self.vLayoutCanvas.addWidget(self.specFitCanvas)\n self.vLayoutCanvas.addWidget(self.toolbar)\n\n # Set the ClickFocus active on the SpecFitCanvas\n self.specFitCanvas.setFocusPolicy(QtCore.Qt.ClickFocus)\n self.specFitCanvas.setFocus()\n\n # Super parameter scroll area widget\n self.build_super_param_widget()\n\n # Build hLayoutFit\n self.hLayoutFit.addLayout(self.vLayoutFitProperties)\n self.hLayoutFit.addLayout(self.vLayoutCanvas)\n self.hLayoutFit.setStretch(1, 2)\n\n # Add super parameter tab widget\n self.vLayoutCanvas.addWidget(self.ScArea)\n self.vLayoutCanvas.setStretch(0, 2)\n \n\n self.tabWidget.addTab(self.fittab, \"Fit\")\n\n\n def build_super_param_widget(self):\n\n superParamWidget = QWidget()\n hLayoutModel = QHBoxLayout(superParamWidget)\n\n superParams = self.specfit.super_params\n\n lineditlist = []\n varyboxlist = []\n widgetlist = []\n\n for jdx, param in enumerate(superParams):\n\n widgetlist = []\n\n groupBoxParam = QGroupBox(param)\n vLayoutGroupBoxParam = QVBoxLayout(groupBoxParam)\n\n label = QLabel(param)\n linedit = QLineEdit('{:.4E}'.format(superParams[param].value))\n linedit.setMaxLength(20)\n expr_linedit = QLineEdit('{}'.format(superParams[param].expr))\n expr_linedit.setMaxLength(20)\n min_label = QLabel(\"min\")\n min_linedit = QLineEdit('{:.4E}'.format(superParams[param].min))\n min_linedit.setMaxLength(20)\n max_label = QLabel(\"max\")\n max_linedit = QLineEdit('{:.4E}'.format(superParams[param].max))\n max_linedit.setMaxLength(20)\n vary_checkbox = QCheckBox(\"vary\")\n vary_checkbox.setChecked(superParams[param].vary)\n\n widgetlist.extend(\n [label, linedit, expr_linedit, min_label, min_linedit,\n max_label, max_linedit, vary_checkbox])\n lineditlist.extend(\n [linedit, expr_linedit, min_linedit, max_linedit])\n varyboxlist.append(vary_checkbox)\n\n for w in widgetlist:\n vLayoutGroupBoxParam.addWidget(w)\n\n hLayoutModel.addWidget(groupBoxParam)\n\n vary_checkbox.stateChanged.connect(self.update_super_params_from_ui)\n\n if widgetlist:\n self.super_params_widgetlist = widgetlist\n self.super_params_lineditlist = lineditlist\n self.super_params_varybox_list = varyboxlist\n\n # Activate input for lineEdit lists\n for l in lineditlist:\n l.returnPressed.connect(self.update_super_params_from_ui)\n\n # Add model parameters in scroll area\n if hasattr(self, 'ScArea'):\n self.vLayoutCanvas.removeWidget(self.ScArea)\n self.deleteItemsOfLayout(self.ScArea.layout())\n else:\n self.ScArea = QScrollArea()\n self.ScArea.setLayout(QHBoxLayout())\n self.ScArea.setWidgetResizable(True)\n self.ScArea.setHorizontalScrollBarPolicy(\n QtCore.Qt.ScrollBarAlwaysOn)\n\n self.ScArea.setWidget(superParamWidget)\n\n self.vLayoutCanvas.addWidget(self.ScArea)\n\n def on_press(self, event):\n\n if event.key == 'm':\n\n self.update_mask(mode='mask')\n\n elif event.key == 'u':\n\n self.update_mask(mode='unmask')\n\n elif event.key == 'f':\n\n self.fit()\n\n elif event.key == 'A':\n\n self.x_pos_a = event.xdata\n self.leXposA.setText('{:.2f}'.format(self.x_pos_a))\n\n\n elif event.key == 'D':\n\n self.x_pos_b = event.xdata\n self.leXposB.setText('{:.2f}'.format(self.x_pos_b))\n\n elif event.key == 'W':\n\n self.y_pos_b = event.ydata\n self.leYposB.setText('{:.2e}'.format(self.y_pos_b))\n\n\n elif event.key == 'S':\n\n self.y_pos_a = event.ydata\n self.leYposA.setText('{:.2e}'.format(self.y_pos_a))\n\n # Zoom into X-axis region\n elif event.key == 'x':\n\n self.set_plot_dispersion()\n\n elif event.key == 'y':\n\n self.set_plot_fluxden()\n\n # Reset the region values\n elif event.key == 'r':\n\n self.reset_plot_region()\n\n # Full reset (region + mask)\n elif event.key == 'R':\n self.reset_mask()\n\n # --------------------------------------------------------------------------\n # Region Actions\n # --------------------------------------------------------------------------\n\n def set_plot_dispersion(self):\n\n self.update_region_from_ui()\n self.specfit.xlim = [self.x_pos_a, self.x_pos_b]\n self.update_specfit_plot()\n\n def set_plot_fluxden(self):\n\n self.update_region_from_ui()\n self.specfit.ylim = [self.y_pos_a, self.y_pos_b]\n self.update_specfit_plot()\n\n def reset_plot_region(self):\n\n self.reset_region()\n self.update_specfit_plot()\n\n def reset_region(self):\n\n if self.specfit.spec is not None:\n self.x_pos_a = min(self.specfit.spec.dispersion)\n self.x_pos_b = max(self.specfit.spec.dispersion)\n self.y_pos_a = min(self.specfit.spec.fluxden)\n self.y_pos_b = max(self.specfit.spec.fluxden)\n\n else:\n self.x_pos_a = 0\n self.x_pos_b = 1\n self.y_pos_a = 0\n self.y_pos_b = 1\n\n # Excluded, to get back to previous region\n # self.leXposA.setText('{:.2f}'.format(self.x_pos_a))\n # self.leXposB.setText('{:.2f}'.format(self.x_pos_b))\n # self.leYposA.setText('{:.2E}'.format(self.y_pos_a))\n # self.leYposB.setText('{:.2E}'.format(self.y_pos_b))\n\n self.specfit.xlim = [self.x_pos_a, self.x_pos_b]\n self.specfit.ylim = [self.y_pos_a, self.y_pos_b]\n\n def update_region_from_ui(self):\n\n self.x_pos_a = update_float_variable_from_linedit(self.leXposA,\n self.x_pos_a)\n self.x_pos_b = update_float_variable_from_linedit(self.leXposB,\n self.x_pos_b)\n self.y_pos_a = update_float_variable_from_linedit(self.leYposA,\n self.y_pos_a,\n expression='{:.2E}')\n self.y_pos_b = update_float_variable_from_linedit(self.leYposB,\n self.y_pos_b,\n expression='{:.2E}')\n\n self.specFitCanvas.setFocus()\n\n # --------------------------------------------------------------------------\n # Mask Actions\n # --------------------------------------------------------------------------\n\n def update_mask(self, mode='mask'):\n\n self.x_pos_a = float(self.leXposA.text())\n self.x_pos_b = float(self.leXposB.text())\n\n self.mask_region(mode)\n\n self.update_specfit_plot()\n\n def mask_region(self, mode='mask'):\n\n if self.specfit.spec is not None:\n\n mask_between = np.sort(np.array([self.x_pos_a,\n self.x_pos_b]))\n\n spec = self.specfit.spec\n lo_index = np.argmin(np.abs(spec.dispersion - mask_between[0]))\n up_index = np.argmin(np.abs(spec.dispersion - mask_between[1]))\n if mode == 'mask':\n self.specfit.spec.mask[lo_index:up_index] = False\n elif mode == 'unmask':\n self.specfit.spec.mask[lo_index:up_index] = True\n\n self.specfit.update_specmodel_spectra()\n\n\n def reset_mask(self):\n\n self.reset_region()\n self.specfit.spec.reset_mask()\n self.update_specfit_plot()\n\n self.specfit.update_specmodel_spectra()\n\n\n def load_mask_preset(self):\n\n mask_preset_key = self.boxMaskPreset.currentText()\n\n mask_preset = mask_presets[mask_preset_key]\n\n if mask_preset['rest_frame']:\n one_p_z = 1 + self.specfit.redshift\n else:\n one_p_z = 1\n\n for mask_range in mask_preset['mask_ranges']:\n wave_a = mask_range[0] * one_p_z\n wave_b = mask_range[1] * one_p_z\n\n if self.specfit.spec is not None:\n self.specfit.spec.mask_between([wave_a, wave_b])\n\n self.update_specfit_plot()\n\n def update_specfit_from_ui(self):\n\n # update redshift\n self.specfit.redshift = float(self.leRedshift.text())\n self.specfit.update_specmodels()\n self.leRedshift.setText('{:.4f}'.format(self.specfit.redshift))\n\n self.specFitCanvas.plot(self.specfit)\n self.specFitCanvas.setFocus()\n\n # --------------------------------------------------------------------------\n # Super Param Actions\n # --------------------------------------------------------------------------\n\n def update_boxSelectSuperParam(self):\n \"\"\" Only used when loading a model to update the Combobox\"\"\"\n\n for param in self.specfit.super_params:\n if self.boxSelectSuperParam.findText(param) < 0:\n self.boxSelectSuperParam.addItem(param)\n\n def rebuild_super_params_widget(self):\n\n layout = self.ScArea.layout()\n self.deleteItemsOfLayout(layout)\n\n self.build_super_param_widget()\n\n def add_super_param(self):\n\n param_name = self.leSuperParamName.text()\n # Check if item already in QComboBox\n if self.boxSelectSuperParam.findText(param_name) < 0:\n # Add item to QComboBox\n self.boxSelectSuperParam.addItem(param_name)\n # Add param to SpecModel\n self.specfit.add_super_param(param_name)\n\n self.rebuild_super_params_widget()\n self.update_specmodelwidgets_for_global_params()\n\n def remove_super_param(self):\n\n param_name = self.boxSelectSuperParam.currentText()\n # Remove global param from SpecModel\n self.specfit.remove_super_param(param_name)\n # Remove item from QComboBox\n idx = self.boxSelectSuperParam.findText(param_name)\n self.boxSelectSuperParam.removeItem(idx)\n\n self.rebuild_super_params_widget()\n self.update_specmodelwidgets_for_global_params()\n\n\n def update_specmodelwidgets_for_global_params(self):\n\n for idx in range(self.tabWidget.count() - 1):\n self.tabWidget.setCurrentIndex(idx + 1)\n specmodel_widget = self.tabWidget.currentWidget()\n specmodel_widget.rebuild_global_params_tab()\n specmodel_widget.rebuild_model_tabs()\n\n self.tabWidget.setCurrentIndex(0)\n self.specFitCanvas.setFocus()\n\n # --------------------------------------------------------------------------\n # Spectrum Actions\n # --------------------------------------------------------------------------\n\n def normalize_spectrum_by_error(self):\n\n self.specfit.normalize_spectrum_by_error()\n current_widget = self.tabWidget.currentWidget()\n if self.tabWidget.currentIndex() > 0:\n current_widget.reset_plot_region()\n else:\n self.reset_plot_region()\n\n def normalize_spectrum_to_factor(self, factor):\n\n self.specfit.normalize_spectrum_to_factor(factor)\n current_widget = self.tabWidget.currentWidget()\n if self.tabWidget.currentIndex() > 0:\n current_widget.reset_plot_region()\n else:\n self.reset_plot_region()\n\n def normalize_spectrum_by_factor(self, factor):\n\n self.specfit.normalize_spectrum_by_factor(factor)\n current_widget = self.tabWidget.currentWidget()\n if self.tabWidget.currentIndex() > 0:\n current_widget.reset_plot_region()\n else:\n self.reset_plot_region()\n\n\n\n # --------------------------------------------------------------------------\n # Fit Actions\n # --------------------------------------------------------------------------\n\n def update_fitting_method(self, value):\n\n self.specfit.fitting_method = value\n\n def fit(self, save_results=False):\n\n if len(self.specfit.specmodels) > 0 and self.specfit.spec is not None:\n\n if save_results:\n # Select save folder\n foldername = str(\n QFileDialog.getExistingDirectory(self, \"Select Directory\"))\n if foldername:\n self.specfit.fit(save_results=save_results,\n foldername=foldername)\n self.specFitCanvas.fig.savefig(foldername+'/SpecFit.png')\n else:\n self.statusBar().showMessage('No SAVE folder selected.')\n else:\n self.specfit.fit()\n\n self.rebuild_super_params_widget()\n self.update_specfit_plot()\n\n def resample_and_fit(self):\n\n if len(self.specfit.specmodels) == 0:\n self.statusBar().showMessage('[ERROR] No SpecModel found. Cannot '\n 'resample and fit spectrum.')\n\n else:\n\n self.specfit.resample(n_samples=self.nsamples,\n foldername=self.resample_foldername,\n save_result_plots=self.save_result_plots,\n seed=self.resample_seed)\n\n def open_resample_dialog(self):\n\n self.resampleDialog = ResampleWindow(self)\n self.resampleDialog.show()\n\n def open_emcee_dialog(self):\n\n self.emceeDialog = EmceeWindow(self)\n self.emceeDialog.show()\n\n def open_normalize_dialog(self):\n self.normalizeDialog = NormalizeWindow(self)\n self.normalizeDialog.show()\n\n # --------------------------------------------------------------------------\n # File Actions\n # --------------------------------------------------------------------------\n\n def closeEvent(self, event):\n \"\"\"This function modifies the standard closing of the GUI\n\n Parameters\n ----------\n event : event\n The closing event.\n \"\"\"\n\n result = QtWidgets.QMessageBox.question(self,\n \"Exit Dialog\",\n \"Are you sure you want to exit ?\",\n QtWidgets.QMessageBox.Yes| QtWidgets.QMessageBox.No)\n event.ignore()\n\n if result == QtWidgets.QMessageBox.Yes:\n event.accept()\n\n def save(self):\n\n # Select save folder\n foldername = str(\n QFileDialog.getExistingDirectory(self, \"Select Directory\"))\n\n if foldername:\n self.specfit.save(foldername)\n else:\n self.statusBar().showMessage('[WARNING] No save directory '\n 'selected.')\n\n\n def load(self):\n\n # Select load folder\n foldername = str(\n QFileDialog.getExistingDirectory(self, \"Select Directory\"))\n\n if foldername:\n # Remove all SpecModels\n self.remove_all_models()\n\n self.specfit.load(foldername)\n\n # Update dispersion and flux density limits from specfit\n self.leXposA.setText('{:.2f}'.format(self.specfit.xlim[0]))\n self.leXposB.setText('{:.2f}'.format(self.specfit.xlim[1]))\n self.leYposA.setText('{:.2e}'.format(self.specfit.ylim[0]))\n self.leYposB.setText('{:.2e}'.format(self.specfit.ylim[1]))\n\n # Rebuild TabWidget with loaded SpecModels\n self.add_specmodels_in_specfit()\n\n self.tabWidget.setCurrentIndex(0)\n # self.reset_region()\n self.update_specfit_plot()\n self.update_boxSelectSuperParam()\n self.rebuild_super_params_widget()\n\n # Update redshift value from specfit\n self.leRedshift.setText('{}'.format(self.specfit.redshift))\n\n # Update fitting method from specfit\n index = self.boxFittingMethod.findText(self.specfit.fitting_method,\n QtCore.Qt.MatchFixedString)\n if index >= 0:\n self.boxFittingMethod.setCurrentIndex(index)\n\n\n else:\n self.statusBar().showMessage('[WARNING] No directory to '\n 'load a model from selected.')\n\n\n # --------------------------------------------------------------------------\n # Spectrum Actions\n # --------------------------------------------------------------------------\n\n def import_spectrum(self, mode='IRAF'):\n \"\"\" Import spectrum \"\"\"\n\n # Select spectrum\n fileName, fileFilter = QFileDialog.getOpenFileName(self, \"Import \"\n \"spectrum\")\n\n if fileName:\n self.specfit.import_spectrum(fileName, filetype=mode)\n\n # Re-plot main canvas -> later update fittab\n self.reset_region()\n self.specFitCanvas.plot(self.specfit)\n\n\n\n # Re-plot all SpecModel tabs\n for idx in range(self.tabWidget.count()-1):\n\n self.tabWidget.setCurrentIndex(idx+1)\n specmodel_widget = self.tabWidget.currentWidget()\n specmodel_widget.reset_plot_region()\n specmodel_widget.update_specmodel_plot()\n\n\n\n\n # --------------------------------------------------------------------------\n # SpecModel Actions\n # --------------------------------------------------------------------------\n\n def add_specmodel(self):\n\n # Add SpecModel to SpecFit\n self.specfit.add_specmodel()\n\n specmodel_widget = SpecModelWidget(self, self.specfit.specmodels[-1])\n\n self.add_specmodel_tab(specmodel_widget)\n\n def add_specmodel_tab(self, specmodel_widget, specmodel_name='SpecModel'):\n\n index = self.tabWidget.addTab(specmodel_widget, specmodel_name)\n self.tabWidget.setCurrentIndex(index)\n\n def add_specmodels_in_specfit(self):\n\n for specmodel in self.specfit.specmodels:\n specmodel_widget = SpecModelWidget(self, specmodel)\n self.add_specmodel_tab(specmodel_widget,\n specmodel_name=specmodel.name)\n\n specmodel_widget.rebuild_model_tabs()\n specmodel_widget.update_boxSelectGlobalParam()\n\n def remove_all_models(self):\n\n for tabindex in reversed(range(self.tabWidget.count())[1:]):\n self.remove_spec_model(tabindex)\n\n def remove_current_spec_model(self):\n\n # Get current SpecModel index from TabWidget\n tabindex = self.tabWidget.currentIndex()\n self.remove_spec_model(tabindex)\n\n # TODO UPDATE ALL TABS WITH REGARD TO THE PROPAGATED SPECTRUM\n\n def remove_spec_model(self, index):\n\n if index > 0:\n\n # Remove SpecModel from SpecFit\n self.specfit.delete_specmodel(index-1)\n # Update the TabWidget accordingly\n specmodel_widget = self.tabWidget.currentWidget()\n specmodel_widget.deleteLater()\n self.tabWidget.removeTab(index)\n\n else:\n self.statusBar().showMessage('Current tab does not contain a '\n 'SpecModel')\n\n # --------------------------------------------------------------------------\n # Update Actions (SpecModelWidget, SpecModel, TabWidget)\n # --------------------------------------------------------------------------\n\n def update_specfit_plot(self):\n self.specFitCanvas.plot(self.specfit)\n\n def tabchanged(self):\n\n # Get current tab widget\n idx = self.tabWidget.currentIndex()\n\n # Update the tab\n if idx == 0:\n self.specFitCanvas.plot(self.specfit)\n self.update_specfit_plot()\n self.specFitCanvas.setFocus()\n\n elif idx > 0:\n self.specfit.update_specmodel_spectra()\n self.update_specmodel_tab(idx)\n\n def update_specmodel_tab(self, index):\n\n self.tabWidget.setCurrentIndex(index)\n\n # UPDATE SpecMODEL???\n\n specmodel_widget = self.tabWidget.currentWidget()\n # specmodel_widget.reset_plot_region()\n specmodel_widget.update_specmodel_plot()\n specmodel_widget.specModelCanvas.setFocus()\n\n\n def update_super_params_from_ui(self):\n\n for jdx, param in enumerate(self.specfit.super_params):\n\n new_value = float(\n self.super_params_lineditlist[jdx * 4 + 0].text())\n new_expr = self.super_params_lineditlist[jdx * 4 + 1].text()\n new_min = float(\n self.super_params_lineditlist[jdx * 4 + 2].text())\n new_max = float(\n self.super_params_lineditlist[jdx * 4 + 3].text())\n new_vary = self.super_params_varybox_list[jdx].isChecked()\n\n if new_expr == 'None':\n new_expr = None\n\n for specmodel in self.specfit.specmodels:\n\n # Set the new parameter values\n specmodel.global_params[param].set(value=new_value,\n expr=new_expr,\n min=new_min,\n max=new_max,\n vary=new_vary)\n\n self.update_specmodelwidgets_for_global_params()\n\n\n def deleteItemsOfLayout(self, layout):\n if layout is not None:\n while layout.count():\n item = layout.takeAt(0)\n widget = item.widget()\n if widget is not None:\n widget.setParent(None)\n else:\n self.deleteItemsOfLayout(item.layout())"
] |
[
[
"matplotlib.backends.backend_qt5agg.NavigationToolbar2QT",
"numpy.array",
"numpy.abs"
]
] |
mohamed-ezz/tflite_workshop_guc
|
[
"9a201072e9cc6ad1c849dc88e541900398e036c5"
] |
[
"src/keras_layers/keras_layer_DecodeDetections1.py"
] |
[
"'''\nA custom Keras layer to decode the raw SSD prediction output. Corresponds to the\n`DetectionOutput` layer type in the original Caffe implementation of SSD.\n\nCopyright (C) 2018 Pierluigi Ferrari\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n'''\n\nfrom __future__ import division\nimport numpy as np\nimport tensorflow as tf\nimport keras.backend as K\nfrom keras.engine.topology import InputSpec\nfrom keras.engine.topology import Layer\n\nclass DecodeDetections(Layer):\n '''\n A Keras layer to decode the raw SSD prediction output.\n\n Input shape:\n 3D tensor of shape `(batch_size, n_boxes, n_classes + 12)`.\n\n Output shape:\n 3D tensor of shape `(batch_size, top_k, 6)`.\n '''\n\n def __init__(self,\n confidence_thresh=0.01,\n iou_threshold=0.45,\n top_k=200,\n nms_max_output_size=400,\n coords='centroids',\n normalize_coords=True,\n img_height=None,\n img_width=None,\n **kwargs):\n '''\n All default argument values follow the Caffe implementation.\n\n Arguments:\n confidence_thresh (float, optional): A float in [0,1), the minimum classification confidence in a specific\n positive class in order to be considered for the non-maximum suppression stage for the respective class.\n A lower value will result in a larger part of the selection process being done by the non-maximum suppression\n stage, while a larger value will result in a larger part of the selection process happening in the confidence\n thresholding stage.\n iou_threshold (float, optional): A float in [0,1]. All boxes with a Jaccard similarity of greater than `iou_threshold`\n with a locally maximal box will be removed from the set of predictions for a given class, where 'maximal' refers\n to the box score.\n top_k (int, optional): The number of highest scoring predictions to be kept for each batch item after the\n non-maximum suppression stage.\n nms_max_output_size (int, optional): The maximum number of predictions that will be left after performing non-maximum\n suppression.\n coords (str, optional): The box coordinate format that the model outputs. Must be 'centroids'\n i.e. the format `(cx, cy, w, h)` (box center coordinates, width, and height). Other coordinate formats are\n currently not supported.\n normalize_coords (bool, optional): Set to `True` if the model outputs relative coordinates (i.e. coordinates in [0,1])\n and you wish to transform these relative coordinates back to absolute coordinates. If the model outputs\n relative coordinates, but you do not want to convert them back to absolute coordinates, set this to `False`.\n Do not set this to `True` if the model already outputs absolute coordinates, as that would result in incorrect\n coordinates. Requires `img_height` and `img_width` if set to `True`.\n img_height (int, optional): The height of the input images. Only needed if `normalize_coords` is `True`.\n img_width (int, optional): The width of the input images. Only needed if `normalize_coords` is `True`.\n '''\n if K.backend() != 'tensorflow':\n raise TypeError(\"This layer only supports TensorFlow at the moment, but you are using the {} backend.\".format(K.backend()))\n\n if normalize_coords and ((img_height is None) or (img_width is None)):\n raise ValueError(\"If relative box coordinates are supposed to be converted to absolute coordinates, the decoder needs the image size in order to decode the predictions, but `img_height == {}` and `img_width == {}`\".format(img_height, img_width))\n\n if coords != 'centroids':\n raise ValueError(\"The DetectionOutput layer currently only supports the 'centroids' coordinate format.\")\n\n # We need these members for the config.\n self.confidence_thresh = confidence_thresh\n self.iou_threshold = iou_threshold\n self.top_k = top_k\n self.normalize_coords = normalize_coords\n self.img_height = img_height\n self.img_width = img_width\n self.coords = coords\n self.nms_max_output_size = nms_max_output_size\n\n # We need these members for TensorFlow.\n self.tf_confidence_thresh = tf.constant(self.confidence_thresh, name='confidence_thresh')\n self.tf_iou_threshold = tf.constant(self.iou_threshold, name='iou_threshold')\n self.tf_top_k = tf.constant(self.top_k, name='top_k')\n self.tf_normalize_coords = tf.constant(self.normalize_coords, name='normalize_coords')\n self.tf_img_height = tf.constant(self.img_height, dtype=tf.float32, name='img_height')\n self.tf_img_width = tf.constant(self.img_width, dtype=tf.float32, name='img_width')\n self.tf_nms_max_output_size = tf.constant(self.nms_max_output_size, name='nms_max_output_size')\n\n super(DecodeDetections, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.input_spec = [InputSpec(shape=input_shape)]\n super(DecodeDetections, self).build(input_shape)\n\n def call(self, y_pred, mask=None):\n '''\n Returns:\n 3D tensor of shape `(batch_size, top_k, 6)`. The second axis is zero-padded\n to always yield `top_k` predictions per batch item. The last axis contains\n the coordinates for each predicted box in the format\n `[class_id, confidence, xmin, ymin, xmax, ymax]`.\n '''\n\n #####################################################################################\n # 1. Convert the box coordinates from predicted anchor box offsets to predicted\n # absolute coordinates\n #####################################################################################\n\n # Convert anchor box offsets to image offsets.\n cx = y_pred[...,-12] * y_pred[...,-4] * y_pred[...,-6] + y_pred[...,-8] # cx = cx_pred * cx_variance * w_anchor + cx_anchor\n cy = y_pred[...,-11] * y_pred[...,-3] * y_pred[...,-5] + y_pred[...,-7] # cy = cy_pred * cy_variance * h_anchor + cy_anchor\n w = tf.exp(y_pred[...,-10] * y_pred[...,-2]) * y_pred[...,-6] # w = exp(w_pred * variance_w) * w_anchor\n h = tf.exp(y_pred[...,-9] * y_pred[...,-1]) * y_pred[...,-5] # h = exp(h_pred * variance_h) * h_anchor\n\n # Convert 'centroids' to 'corners'.\n xmin = cx - 0.5 * w\n ymin = cy - 0.5 * h\n xmax = cx + 0.5 * w\n ymax = cy + 0.5 * h\n\n # If the model predicts box coordinates relative to the image dimensions and they are supposed\n # to be converted back to absolute coordinates, do that.\n def normalized_coords():\n xmin1 = tf.expand_dims(xmin * self.tf_img_width, axis=-1)\n ymin1 = tf.expand_dims(ymin * self.tf_img_height, axis=-1)\n xmax1 = tf.expand_dims(xmax * self.tf_img_width, axis=-1)\n ymax1 = tf.expand_dims(ymax * self.tf_img_height, axis=-1)\n return xmin1, ymin1, xmax1, ymax1\n def non_normalized_coords():\n return tf.expand_dims(xmin, axis=-1), tf.expand_dims(ymin, axis=-1), tf.expand_dims(xmax, axis=-1), tf.expand_dims(ymax, axis=-1)\n\n if self.normalize_coords:\n xmin, ymin, xmax, ymax = normalized_coords()\n else:\n xmin, ymin, xmax, ymax = non_normalized_coords()\n\n # Concatenate the one-hot class confidences and the converted box coordinates to form the decoded predictions tensor.\n y_pred = tf.concat(values=[y_pred[...,:-12], xmin, ymin, xmax, ymax], axis=-1)\n\n #####################################################################################\n # 2. Perform confidence thresholding, per-class non-maximum suppression, and\n # top-k filtering.\n #####################################################################################\n\n batch_size = tf.shape(y_pred)[0] # Output dtype: tf.int32\n n_boxes = tf.shape(y_pred)[1]\n n_classes = y_pred.shape[2] - 4\n class_indices = tf.range(1, n_classes)\n\n # Create a function that filters the predictions for the given batch item. Specifically, it performs:\n # - confidence thresholding\n # - non-maximum suppression (NMS)\n # - top-k filtering\n def filter_predictions(batch_item):\n\n # Create a function that filters the predictions for one single class.\n def filter_single_class(index):\n\n # From a tensor of shape (n_boxes, n_classes + 4 coordinates) extract\n # a tensor of shape (n_boxes, 1 + 4 coordinates) that contains the\n # confidnece values for just one class, determined by `index`.\n confidences = tf.expand_dims(batch_item[..., index], axis=-1)\n class_id = tf.fill(dims=tf.shape(confidences), value=tf.to_float(index))\n box_coordinates = batch_item[...,-4:]\n\n single_class = tf.concat([class_id, confidences, box_coordinates], axis=-1)\n\n # Apply confidence thresholding with respect to the class defined by `index`.\n threshold_met = single_class[:,1] > self.tf_confidence_thresh\n single_class = tf.boolean_mask(tensor=single_class,\n mask=threshold_met)\n\n # If any boxes made the threshold, perform NMS.\n def perform_nms():\n scores = single_class[...,1]\n\n # `tf.image.non_max_suppression()` needs the box coordinates in the format `(ymin, xmin, ymax, xmax)`.\n xmin = tf.expand_dims(single_class[...,-4], axis=-1)\n ymin = tf.expand_dims(single_class[...,-3], axis=-1)\n xmax = tf.expand_dims(single_class[...,-2], axis=-1)\n ymax = tf.expand_dims(single_class[...,-1], axis=-1)\n boxes = tf.concat(values=[ymin, xmin, ymax, xmax], axis=-1)\n\n maxima_indices = tf.image.non_max_suppression(boxes=boxes,\n scores=scores,\n max_output_size=self.tf_nms_max_output_size,\n iou_threshold=self.iou_threshold,\n name='non_maximum_suppresion')\n maxima = tf.gather(params=single_class,\n indices=maxima_indices,\n axis=0)\n return maxima\n\n def no_confident_predictions():\n return tf.constant(value=0.0, shape=(1,6))\n\n single_class_nms = perform_nms()\n\n # Make sure `single_class` is exactly `self.nms_max_output_size` elements long.\n padded_single_class = tf.pad(tensor=single_class_nms,\n paddings=[[0, self.tf_nms_max_output_size - tf.shape(single_class_nms)[0]], [0, 0]],\n mode='CONSTANT',\n constant_values=0.0)\n\n return padded_single_class\n\n # Iterate `filter_single_class()` over all class indices.\n filtered_single_classes = tf.map_fn(fn=lambda i: filter_single_class(i),\n elems=tf.range(1,n_classes),\n dtype=tf.float32,\n parallel_iterations=128,\n back_prop=False,\n swap_memory=False,\n infer_shape=True,\n name='loop_over_classes')\n\n # Concatenate the filtered results for all individual classes to one tensor.\n filtered_predictions = tf.reshape(tensor=filtered_single_classes, shape=(-1,6))\n\n # Perform top-k filtering for this batch item or pad it in case there are\n # fewer than `self.top_k` boxes left at this point. Either way, produce a\n # tensor of length `self.top_k`. By the time we return the final results tensor\n # for the whole batch, all batch items must have the same number of predicted\n # boxes so that the tensor dimensions are homogenous. If fewer than `self.top_k`\n # predictions are left after the filtering process above, we pad the missing\n # predictions with zeros as dummy entries.\n def top_k():\n return tf.gather(params=filtered_predictions,\n indices=tf.nn.top_k(filtered_predictions[:, 1], k=self.tf_top_k, sorted=True).indices,\n axis=0)\n def pad_and_top_k():\n padded_predictions = tf.pad(tensor=filtered_predictions,\n paddings=[[0, self.tf_top_k - tf.shape(filtered_predictions)[0]], [0, 0]],\n mode='CONSTANT',\n constant_values=0.0)\n return tf.gather(params=padded_predictions,\n indices=tf.nn.top_k(padded_predictions[:, 1], k=self.tf_top_k, sorted=True).indices,\n axis=0)\n\n top_k_boxes = pad_and_top_k()\n\n return top_k_boxes\n\n # Iterate `filter_predictions()` over all batch items.\n output_tensor = tf.map_fn(fn=lambda x: filter_predictions(x),\n elems=y_pred,\n dtype=None,\n parallel_iterations=128,\n back_prop=False,\n swap_memory=False,\n infer_shape=True,\n name='loop_over_batch')\n\n return output_tensor\n\n def compute_output_shape(self, input_shape):\n batch_size, n_boxes, last_axis = input_shape\n return (batch_size, self.tf_top_k, 6) # Last axis: (class_ID, confidence, 4 box coordinates)\n\n def get_config(self):\n config = {\n 'confidence_thresh': self.confidence_thresh,\n 'iou_threshold': self.iou_threshold,\n 'top_k': self.top_k,\n 'nms_max_output_size': self.nms_max_output_size,\n 'coords': self.coords,\n 'normalize_coords': self.normalize_coords,\n 'img_height': self.img_height,\n 'img_width': self.img_width,\n }\n base_config = super(DecodeDetections, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))"
] |
[
[
"tensorflow.boolean_mask",
"tensorflow.concat",
"tensorflow.constant",
"tensorflow.range",
"tensorflow.shape",
"tensorflow.image.non_max_suppression",
"tensorflow.reshape",
"tensorflow.expand_dims",
"tensorflow.exp",
"tensorflow.gather",
"tensorflow.nn.top_k",
"tensorflow.to_float"
]
] |
BirdVox/mirdata
|
[
"42d9fddb5b84ad95b6fb15a56f1b572c77db9eb3"
] |
[
"tests/test_jams_utils.py"
] |
[
"# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport pytest\nimport jams\n\nfrom mirdata import jams_utils, utils\n\n\ndef get_jam_data(jam, namespace, annot_numb):\n time = []\n duration = []\n value = []\n confidence = []\n for obs in jam.search(namespace=namespace)[annot_numb]['data']:\n time.append(obs.time)\n duration.append(round(obs.duration, 3))\n value.append(obs.value)\n confidence.append(obs.confidence)\n return time, duration, value, confidence\n\n\ndef test_beats():\n\n beat_data_1 = [(utils.BeatData(np.array([0.2, 0.3]), np.array([1, 2])), None)]\n beat_data_2 = [(utils.BeatData(np.array([0.5, 0.7]), np.array([2, 3])), 'beats_2')]\n beat_data_3 = [\n (utils.BeatData(np.array([0.0, 0.3]), np.array([1, 2])), 'beats_1'),\n (utils.BeatData(np.array([0.5, 0.13]), np.array([4, 3])), 'beats_2'),\n ]\n beat_data_4 = (utils.BeatData(np.array([0.0, 0.3]), np.array([1, 2])), 'beats_1')\n beat_data_5 = [\n (utils.BeatData(np.array([0.0, 0.3]), np.array([1, 2])), 'beats_1'),\n [utils.BeatData(np.array([0.5, 0.13]), np.array([4, 3])), 'beats_2'],\n ]\n beat_data_6 = [(None, None)]\n beat_data_7 = [\n (\n utils.EventData(\n np.array([0.2, 0.3]),\n np.array([0.3, 0.4]),\n np.array(['event A', 'event B']),\n ),\n None,\n )\n ]\n\n jam_1 = jams_utils.jams_converter(beat_data=beat_data_1)\n jam_2 = jams_utils.jams_converter(beat_data=beat_data_2)\n jam_3 = jams_utils.jams_converter(beat_data=beat_data_3)\n jam_6 = jams_utils.jams_converter(beat_data=beat_data_6)\n\n time, duration, value, confidence = get_jam_data(jam_1, 'beat', 0)\n assert time == [0.2, 0.3]\n assert duration == [0.0, 0.0]\n assert value == [1, 2]\n assert confidence == [None, None]\n\n assert jam_2.annotations[0]['sandbox']['name'] == 'beats_2'\n\n time, duration, value, confidence = get_jam_data(jam_3, 'beat', 0)\n assert time == [0.0, 0.3]\n assert duration == [0.0, 0.0]\n assert value == [1, 2]\n assert confidence == [None, None]\n\n time, duration, value, confidence = get_jam_data(jam_3, 'beat', 1)\n assert time == [0.13, 0.5]\n assert duration == [0.0, 0.0]\n assert value == [3, 4]\n assert confidence == [None, None]\n\n time, duration, value, confidence = get_jam_data(jam_6, 'beat', 0)\n assert time == []\n assert duration == []\n assert value == []\n assert confidence == []\n\n assert type(jam_1) == jams.JAMS\n\n with pytest.raises(TypeError):\n jams_utils.jams_converter(beat_data=beat_data_4)\n with pytest.raises(TypeError):\n jams_utils.jams_converter(beat_data=beat_data_5)\n with pytest.raises(TypeError):\n jams_utils.jams_converter(beat_data=beat_data_7)\n\n\ndef test_chords():\n chord_data_1 = [\n (\n utils.ChordData(\n np.array([[0.0, 0.5, 1.0], [0.5, 1.0, 1.5]]).T,\n np.array(['A', 'A', 'E']),\n ),\n None,\n )\n ]\n chord_data_2 = [\n (\n utils.ChordData(\n np.array([[0.0, 0.8, 1.0], [0.5, 1.0, 1.5]]).T,\n np.array(['A', 'B', 'C']),\n ),\n 'chords_2',\n )\n ]\n chord_data_3 = [\n (\n utils.ChordData(\n np.array([[0.0, 0.5, 1.0], [0.5, 1.0, 1.5]]).T,\n np.array(['A', 'A', 'E']),\n ),\n 'chords_1',\n ),\n (\n utils.ChordData(\n np.array([[0.0, 0.7, 1.0], [0.7, 1.0, 1.5]]).T,\n np.array(['A', 'B', 'C']),\n ),\n 'chords_2',\n ),\n ]\n chord_data_4 = (\n utils.ChordData(\n np.array([[0.0, 0.5, 1.0], [0.5, 1.0, 1.5]]).T, np.array(['A', 'A', 'E'])\n ),\n None,\n )\n chord_data_5 = [\n [\n utils.ChordData(\n np.array([[0.0, 0.5, 1.0], [0.5, 1.0, 1.5]]).T,\n np.array(['A', 'A', 'E']),\n ),\n None,\n ],\n (\n utils.ChordData(\n np.array([[0.0, 0.8, 1.0], [0.5, 1.0, 1.5]]).T,\n np.array(['A', 'B', 'C']),\n ),\n 'chords_2',\n ),\n ]\n chord_data_6 = [(None, None)]\n chord_data_7 = [\n (\n utils.EventData(\n np.array([0.2, 0.3]),\n np.array([0.3, 0.4]),\n np.array(['event A', 'event B']),\n ),\n None,\n )\n ]\n\n jam_1 = jams_utils.jams_converter(chord_data=chord_data_1)\n jam_2 = jams_utils.jams_converter(chord_data=chord_data_2)\n jam_3 = jams_utils.jams_converter(chord_data=chord_data_3)\n jam_6 = jams_utils.jams_converter(chord_data=chord_data_6)\n\n time, duration, value, confidence = get_jam_data(jam_1, 'chord', 0)\n assert time == [0.0, 0.5, 1.0]\n assert duration == [0.5, 0.5, 0.5]\n assert value == ['A', 'A', 'E']\n assert confidence == [None, None, None]\n\n assert jam_2.annotations[0]['sandbox']['name'] == 'chords_2'\n\n time, duration, value, confidence = get_jam_data(jam_3, 'chord', 0)\n assert time == [0.0, 0.5, 1.0]\n assert duration == [0.5, 0.5, 0.5]\n assert value == ['A', 'A', 'E']\n assert confidence == [None, None, None]\n\n time, duration, value, confidence = get_jam_data(jam_3, 'chord', 1)\n assert time == [0.0, 0.7, 1.0]\n assert duration == [0.7, 0.3, 0.5]\n assert value == ['A', 'B', 'C']\n assert confidence == [None, None, None]\n\n time, duration, value, confidence = get_jam_data(jam_6, 'chord', 0)\n assert time == []\n assert duration == []\n assert value == []\n assert confidence == []\n\n assert type(jam_1) == jams.JAMS\n\n with pytest.raises(TypeError):\n jams_utils.jams_converter(chord_data=chord_data_4)\n with pytest.raises(TypeError):\n jams_utils.jams_converter(chord_data=chord_data_5)\n with pytest.raises(TypeError):\n jams_utils.jams_converter(chord_data=chord_data_7)\n\n\ndef test_notes():\n note_data_1 = [\n (\n utils.NoteData(\n np.array([[0.0, 0.5, 1.0], [0.5, 1.0, 1.5]]).T,\n np.array([1108.731, 1108.731, 1108.731]),\n np.array([1, 1, 1])\n ),\n None,\n )\n ]\n note_data_2 = [\n (\n utils.NoteData(\n np.array([[0.0, 0.8, 1.0], [0.5, 1.0, 1.5]]).T,\n np.array([1108.731, 1108.731, 1108.731]),\n np.array([1, 1, 1])\n ),\n 'notes_2',\n )\n ]\n note_data_3 = [\n (\n utils.NoteData(\n np.array([[0.0, 0.5, 1.0], [0.5, 1.0, 1.5]]).T,\n np.array([1108.731, 1108.731, 1108.731]),\n np.array([1, 1, 1])\n ),\n 'notes_1',\n ),\n (\n utils.NoteData(\n np.array([[0.0, 0.7, 1.0], [0.7, 1.0, 1.5]]).T,\n np.array([1108.731, 1108.731, 1108.731]),\n np.array([1, 1, 1])\n ),\n 'notes_2',\n ),\n ]\n note_data_4 = (\n utils.NoteData(\n np.array([[0.0, 0.5, 1.0], [0.5, 1.0, 1.5]]).T, \n np.array([1108.731, 1108.731, 1108.731]),\n np.array([1, 1, 1])\n ),\n None,\n )\n note_data_5 = [\n [\n utils.NoteData(\n np.array([[0.0, 0.5, 1.0], [0.5, 1.0, 1.5]]).T,\n np.array([1108.731, 1108.731, 1108.731]),\n np.array([1, 1, 1])\n ),\n None,\n ],\n (\n utils.NoteData(\n np.array([[0.0, 0.8, 1.0], [0.5, 1.0, 1.5]]).T,\n np.array([1108.731, 1108.731, 1108.731]),\n np.array([1, 1, 1])\n ),\n 'notes_2',\n ),\n ]\n note_data_6 = [(None, None)]\n note_data_7 = [\n (\n utils.EventData(\n np.array([0.2, 0.3]),\n np.array([0.3, 0.4]),\n np.array(['event A', 'event B']),\n ),\n None,\n )\n ]\n\n jam_1 = jams_utils.jams_converter(note_data=note_data_1)\n jam_2 = jams_utils.jams_converter(note_data=note_data_2)\n jam_3 = jams_utils.jams_converter(note_data=note_data_3)\n jam_6 = jams_utils.jams_converter(note_data=note_data_6)\n\n time, duration, value, confidence = get_jam_data(jam_1, 'note_hz', 0)\n assert time == [0.0, 0.5, 1.0]\n assert duration == [0.5, 0.5, 0.5]\n assert value == [1108.731, 1108.731, 1108.731]\n assert confidence == [None, None, None]\n\n assert jam_2.annotations[0]['sandbox']['name'] == 'notes_2'\n\n time, duration, value, confidence = get_jam_data(jam_3, 'note_hz', 0)\n assert time == [0.0, 0.5, 1.0]\n assert duration == [0.5, 0.5, 0.5]\n assert value == [1108.731, 1108.731, 1108.731]\n assert confidence == [None, None, None]\n\n time, duration, value, confidence = get_jam_data(jam_3, 'note_hz', 1)\n assert time == [0.0, 0.7, 1.0]\n assert duration == [0.7, 0.3, 0.5]\n assert value == [1108.731, 1108.731, 1108.731]\n assert confidence == [None, None, None]\n\n time, duration, value, confidence = get_jam_data(jam_6, 'note_hz', 0)\n assert time == []\n assert duration == []\n assert value == []\n assert confidence == []\n\n assert type(jam_1) == jams.JAMS\n\n with pytest.raises(TypeError):\n jams_utils.jams_converter(note_data=note_data_4)\n with pytest.raises(TypeError):\n jams_utils.jams_converter(note_data=note_data_5)\n with pytest.raises(TypeError):\n jams_utils.jams_converter(note_data=note_data_7)\n\n\ndef test_sections():\n section_data_1 = [\n (\n utils.SectionData(\n np.array([[0.0, 10.0, 20.0], [10.0, 20.0, 25.0]]).T,\n np.array(['verse A', 'verse B', 'verse A']),\n ),\n None,\n )\n ]\n section_data_2 = [\n (\n utils.SectionData(\n np.array([[0.0, 10.0, 20.0], [10.0, 20.0, 25.0]]).T,\n np.array(['verse A', 'verse B', 'verse A']),\n ),\n 'sections_2',\n )\n ]\n section_data_3 = [\n (\n utils.SectionData(\n np.array([[0.0, 10.0, 20.0], [10.0, 20.0, 25.0]]).T,\n np.array(['verse A', 'verse B', 'verse A']),\n ),\n 'sections_1',\n ),\n (\n utils.SectionData(\n np.array([[0.0, 15.0, 20.0], [15.0, 20.0, 30.0]]).T,\n np.array(['verse A', 'verse B', 'verse C']),\n ),\n 'sections_2',\n ),\n ]\n section_data_4 = (\n utils.SectionData(\n np.array([[0.0, 10.0, 20.0], [10.0, 20.0, 25.0]]).T,\n np.array(['verse A', 'verse B', 'verse A']),\n ),\n None,\n )\n section_data_5 = [\n [\n utils.SectionData(\n np.array([[0.0, 10.0, 20.0], [10.0, 20.0, 25.0]]).T,\n np.array(['verse A', 'verse B', 'verse A']),\n ),\n None,\n ],\n (\n utils.SectionData(\n np.array([[0.0, 10.0, 20.0], [10.0, 20.0, 25.0]]).T,\n np.array(['verse A', 'verse B', 'verse A']),\n ),\n 'sections_2',\n ),\n ]\n section_data_6 = [(None, None)]\n section_data_7 = [\n (\n utils.EventData(\n np.array([0.2, 0.3]),\n np.array([0.3, 0.4]),\n np.array(['event A', 'event B']),\n ),\n None,\n )\n ]\n\n jam_1 = jams_utils.jams_converter(section_data=section_data_1)\n jam_2 = jams_utils.jams_converter(section_data=section_data_2)\n jam_3 = jams_utils.jams_converter(section_data=section_data_3)\n jam_6 = jams_utils.jams_converter(section_data=section_data_6)\n\n time, duration, value, confidence = get_jam_data(jam_1, 'segment', 0)\n assert time == [0.0, 10.0, 20.0]\n assert duration == [10.0, 10.0, 5.0]\n assert value == ['verse A', 'verse B', 'verse A']\n assert confidence == [None, None, None]\n\n assert jam_2.annotations[0]['sandbox']['name'] == 'sections_2'\n\n time, duration, value, confidence = get_jam_data(jam_3, 'segment', 0)\n assert time == [0.0, 10.0, 20.0]\n assert duration == [10.0, 10.0, 5.0]\n assert value == ['verse A', 'verse B', 'verse A']\n assert confidence == [None, None, None]\n\n time, duration, value, confidence = get_jam_data(jam_3, 'segment', 1)\n assert time == [0.0, 15.0, 20.0]\n assert duration == [15.0, 5.0, 10.0]\n assert value == ['verse A', 'verse B', 'verse C']\n assert confidence == [None, None, None]\n\n time, duration, value, confidence = get_jam_data(jam_6, 'segment', 0)\n assert time == []\n assert duration == []\n assert value == []\n assert confidence == []\n\n assert type(jam_1) == jams.JAMS\n\n with pytest.raises(TypeError):\n jams_utils.jams_converter(section_data=section_data_4)\n with pytest.raises(TypeError):\n jams_utils.jams_converter(section_data=section_data_5)\n with pytest.raises(TypeError):\n jams_utils.jams_converter(section_data=section_data_7)\n\n\ndef test_multi_sections():\n multi_section_data_1 = [\n (\n [\n (\n utils.SectionData(\n np.array([[0.0, 10.0, 20.0], [10.0, 20.0, 25.0]]).T,\n np.array(['verse A', 'verse B', 'verse A']),\n ),\n None,\n ),\n (\n utils.SectionData(\n np.array([[0.0, 15.0, 20.0], [15.0, 20.0, 25.0]]).T,\n np.array(['verse a', 'verse b', 'verse a']),\n ),\n None,\n ),\n ],\n None,\n )\n ]\n\n multi_section_data_2 = [\n (\n [\n (\n utils.SectionData(\n np.array([[0.0, 10.0, 20.0], [10.0, 20.0, 25.0]]).T,\n np.array(['verse A', 'verse B', 'verse A']),\n ),\n 0,\n ),\n (\n utils.SectionData(\n np.array([[0.0, 15.0, 20.0], [15.0, 20.0, 25.0]]).T,\n np.array(['verse a', 'verse b', 'verse a']),\n ),\n 1,\n ),\n ],\n 'annotator_1',\n )\n ]\n multi_section_data_3 = [\n (\n [\n (\n utils.SectionData(\n np.array([[0.0, 10.0, 20.0], [10.0, 20.0, 25.0]]).T,\n np.array(['verse A', 'verse B', 'verse A']),\n ),\n 0,\n ),\n (\n utils.SectionData(\n np.array([[0.0, 15.0, 20.0], [15.0, 20.0, 25.0]]).T,\n np.array(['verse a', 'verse b', 'verse a']),\n ),\n 1,\n ),\n ],\n 'annotator_1',\n ),\n (\n [\n (\n utils.SectionData(\n np.array([[0.0, 10.0, 20.0], [10.0, 20.0, 25.0]]).T,\n np.array(['verse A', 'verse B', 'verse A']),\n ),\n 0,\n ),\n (\n utils.SectionData(\n np.array([[0.0, 15.0, 20.0], [15.0, 20.0, 25.0]]).T,\n np.array(['verse a', 'verse b', 'verse a']),\n ),\n 1,\n ),\n ],\n 'annotator_2',\n ),\n ]\n multi_section_data_4 = (\n [\n (\n utils.SectionData(\n np.array([[0.0, 10.0, 20.0], [10.0, 20.0, 25.0]]).T,\n np.array(['verse A', 'verse B', 'verse A']),\n ),\n None,\n ),\n (\n utils.SectionData(\n np.array([[0.0, 15.0, 20.0], [15.0, 20.0, 25.0]]).T,\n np.array(['verse a', 'verse b', 'verse a']),\n ),\n None,\n ),\n ],\n None,\n )\n multi_section_data_5 = [\n [\n [\n (\n utils.SectionData(\n np.array([[0.0, 10.0, 20.0], [10.0, 20.0, 25.0]]).T,\n np.array(['verse A', 'verse B', 'verse A']),\n ),\n None,\n ),\n (\n utils.SectionData(\n np.array([[0.0, 15.0, 20.0], [15.0, 20.0, 25.0]]).T,\n np.array(['verse a', 'verse b', 'verse a']),\n ),\n None,\n ),\n ],\n None,\n ]\n ]\n multi_section_data_6 = [\n (\n (\n (\n utils.SectionData(\n np.array([[0.0, 10.0, 20.0], [10.0, 20.0, 25.0]]).T,\n np.array(['verse A', 'verse B', 'verse A']),\n ),\n None,\n ),\n (\n utils.SectionData(\n np.array([[0.0, 15.0, 20.0], [15.0, 20.0, 25.0]]).T,\n np.array(['verse a', 'verse b', 'verse a']),\n ),\n None,\n ),\n ),\n None,\n )\n ]\n multi_section_data_7 = [([(None, None), (None, None)], None)]\n multi_section_data_8 = [\n (\n [\n (\n utils.EventData(\n np.array([0.2, 0.3]),\n np.array([0.3, 0.4]),\n np.array(['event A', 'event B']),\n ),\n None,\n ),\n (\n utils.EventData(\n np.array([0.2, 0.3]),\n np.array([0.3, 0.4]),\n np.array(['event A', 'event B']),\n ),\n None,\n ),\n ],\n None,\n )\n ]\n\n jam_1 = jams_utils.jams_converter(multi_section_data=multi_section_data_1)\n jam_2 = jams_utils.jams_converter(multi_section_data=multi_section_data_2)\n jam_3 = jams_utils.jams_converter(multi_section_data=multi_section_data_3)\n jam_7 = jams_utils.jams_converter(multi_section_data=multi_section_data_7)\n\n time, duration, value, confidence = get_jam_data(jam_1, 'multi_segment', 0)\n assert time == [0.0, 0.0, 10.0, 15.0, 20.0, 20.0]\n assert duration == [10.0, 15.0, 10.0, 5.0, 5.0, 5.0]\n assert value == [\n {'label': 'verse A', 'level': None},\n {'label': 'verse a', 'level': None},\n {'label': 'verse B', 'level': None},\n {'label': 'verse b', 'level': None},\n {'label': 'verse A', 'level': None},\n {'label': 'verse a', 'level': None},\n ]\n assert confidence == [None, None, None, None, None, None]\n\n assert (\n jam_2.annotations[0]['annotation_metadata']['annotator']['name']\n == 'annotator_1'\n )\n\n time, duration, value, confidence = get_jam_data(jam_3, 'multi_segment', 0)\n assert time == [0.0, 0.0, 10.0, 15.0, 20.0, 20.0]\n assert duration == [10.0, 15.0, 10.0, 5.0, 5.0, 5.0]\n assert value == [\n {'label': 'verse A', 'level': 0},\n {'label': 'verse a', 'level': 1},\n {'label': 'verse B', 'level': 0},\n {'label': 'verse b', 'level': 1},\n {'label': 'verse A', 'level': 0},\n {'label': 'verse a', 'level': 1},\n ]\n assert confidence == [None, None, None, None, None, None]\n\n time, duration, value, confidence = get_jam_data(jam_3, 'multi_segment', 1)\n assert time == [0.0, 0.0, 10.0, 15.0, 20.0, 20.0]\n assert duration == [10.0, 15.0, 10.0, 5.0, 5.0, 5.0]\n assert value == [\n {'label': 'verse A', 'level': 0},\n {'label': 'verse a', 'level': 1},\n {'label': 'verse B', 'level': 0},\n {'label': 'verse b', 'level': 1},\n {'label': 'verse A', 'level': 0},\n {'label': 'verse a', 'level': 1},\n ]\n assert confidence == [None, None, None, None, None, None]\n\n time, duration, value, confidence = get_jam_data(jam_7, 'multi_segment', 0)\n assert time == []\n assert duration == []\n assert value == []\n assert confidence == []\n\n assert type(jam_1) == jams.JAMS\n\n with pytest.raises(TypeError):\n jams_utils.jams_converter(multi_section_data=multi_section_data_4)\n with pytest.raises(TypeError):\n jams_utils.jams_converter(multi_section_data=multi_section_data_5)\n with pytest.raises(TypeError):\n jams_utils.jams_converter(multi_section_data=multi_section_data_6)\n with pytest.raises(TypeError):\n jams_utils.jams_converter(multi_section_data=multi_section_data_8)\n\n\ndef test_keys():\n key_data_1 = [\n (utils.KeyData(np.array([0.0]), np.array([100.0]), np.array(['A'])), None)\n ]\n key_data_2 = [\n (utils.KeyData(np.array([0.0]), np.array([100.0]), np.array(['A'])), 'keys_1')\n ]\n key_data_3 = [\n (utils.KeyData(np.array([0.0]), np.array([100.0]), np.array(['A'])), 'keys_1'),\n (utils.KeyData(np.array([0.0]), np.array([50.0]), np.array(['B'])), 'keys_2'),\n ]\n key_data_4 = (\n utils.KeyData(np.array([0.0]), np.array([100.0]), np.array(['A'])),\n 'keys_1',\n )\n key_data_5 = [\n [utils.KeyData(np.array([0.0]), np.array([100.0]), np.array(['A'])), 'keys_1'],\n (utils.KeyData(np.array([0.0]), np.array([50.0]), np.array(['B'])), 'keys_2'),\n ]\n key_data_6 = [(None, None)]\n key_data_7 = [\n (\n utils.EventData(\n np.array([0.2, 0.3]),\n np.array([0.3, 0.4]),\n np.array(['event A', 'event B']),\n ),\n None,\n )\n ]\n\n jam_1 = jams_utils.jams_converter(key_data=key_data_1)\n jam_2 = jams_utils.jams_converter(key_data=key_data_2)\n jam_3 = jams_utils.jams_converter(key_data=key_data_3)\n jam_6 = jams_utils.jams_converter(key_data=key_data_6)\n\n time, duration, value, confidence = get_jam_data(jam_1, 'key', 0)\n assert time == [0.0]\n assert duration == [100.0]\n assert value == ['A']\n assert confidence == [None]\n\n assert jam_2.annotations[0]['sandbox']['name'] == 'keys_1'\n\n time, duration, value, confidence = get_jam_data(jam_3, 'key', 0)\n assert time == [0.0]\n assert duration == [100.0]\n assert value == ['A']\n assert confidence == [None]\n\n time, duration, value, confidence = get_jam_data(jam_3, 'key', 1)\n assert time == [0.0]\n assert duration == [50.0]\n assert value == ['B']\n assert confidence == [None]\n\n time, duration, value, confidence = get_jam_data(jam_6, 'key', 0)\n assert time == []\n assert duration == []\n assert value == []\n assert confidence == []\n\n assert type(jam_1) == jams.JAMS\n\n with pytest.raises(TypeError):\n jams_utils.jams_converter(key_data=key_data_4)\n with pytest.raises(TypeError):\n jams_utils.jams_converter(key_data=key_data_5)\n with pytest.raises(TypeError):\n jams_utils.jams_converter(key_data=key_data_7)\n\n\ndef test_f0s():\n f0_data_1 = [\n (\n utils.F0Data(\n np.array([0.016, 0.048]), np.array([0.0, 260.9]), np.array([0.0, 1.0])\n ),\n None,\n )\n ]\n f0_data_2 = [\n (\n utils.F0Data(\n np.array([0.016, 0.048]), np.array([0.0, 260.9]), np.array([0.0, 1.0])\n ),\n 'f0s_1',\n )\n ]\n f0_data_3 = [\n (\n utils.F0Data(\n np.array([0.016, 0.048]), np.array([0.0, 260.9]), np.array([0.0, 1.0])\n ),\n 'f0s_1',\n ),\n (\n utils.F0Data(\n np.array([0.003, 0.012]), np.array([0.0, 230.5]), np.array([0.0, 1.0])\n ),\n 'f0s_2',\n ),\n ]\n f0_data_4 = (\n utils.F0Data(\n np.array([0.016, 0.048]), np.array([0.0, 260.9]), np.array([0.0, 1.0])\n ),\n 'f0s_1',\n )\n f0_data_5 = [\n [\n utils.F0Data(\n np.array([0.016, 0.048]), np.array([0.0, 260.9]), np.array([0.0, 1.0])\n ),\n 'f0s_1',\n ],\n (\n utils.F0Data(\n np.array([0.003, 0.012]), np.array([0.0, 230.5]), np.array([0.0, 1.0])\n ),\n 'f0s_2',\n ),\n ]\n f0_data_6 = [(None, None)]\n f0_data_7 = [\n (\n utils.EventData(\n np.array([0.2, 0.3]),\n np.array([0.3, 0.4]),\n np.array(['event A', 'event B']),\n ),\n None,\n )\n ]\n\n jam_1 = jams_utils.jams_converter(f0_data=f0_data_1)\n jam_2 = jams_utils.jams_converter(f0_data=f0_data_2)\n jam_3 = jams_utils.jams_converter(f0_data=f0_data_3)\n jam_6 = jams_utils.jams_converter(f0_data=f0_data_6)\n\n time, duration, value, confidence = get_jam_data(jam_1, 'pitch_contour', 0)\n assert time == [0.016, 0.048]\n assert duration == [0.0, 0.0]\n assert value == [0.0, 260.9]\n assert confidence == [0.0, 1.0]\n\n assert jam_2.annotations[0]['sandbox']['name'] == 'f0s_1'\n\n time, duration, value, confidence = get_jam_data(jam_3, 'pitch_contour', 0)\n assert time == [0.016, 0.048]\n assert duration == [0.0, 0.0]\n assert value == [0.0, 260.9]\n assert confidence == [0.0, 1.0]\n\n time, duration, value, confidence = get_jam_data(jam_3, 'pitch_contour', 1)\n assert time == [0.003, 0.012]\n assert duration == [0.0, 0.0]\n assert value == [0.0, 230.5]\n assert confidence == [0.0, 1.0]\n\n time, duration, value, confidence = get_jam_data(jam_6, 'pitch_contour', 0)\n assert time == []\n assert duration == []\n assert value == []\n assert confidence == []\n\n assert type(jam_1) == jams.JAMS\n\n with pytest.raises(TypeError):\n jams_utils.jams_converter(f0_data=f0_data_4)\n with pytest.raises(TypeError):\n jams_utils.jams_converter(f0_data=f0_data_5)\n with pytest.raises(TypeError):\n jams_utils.jams_converter(f0_data=f0_data_7)\n\n\ndef test_lyrics():\n lyrics_data_1 = [\n (\n utils.LyricData(\n np.array([0.027, 0.232]),\n np.array([0.227, 0.742]),\n np.array(['The', 'Test']),\n np.array([None, None]),\n ),\n None,\n )\n ]\n lyrics_data_2 = [\n (\n utils.LyricData(\n np.array([0.027, 0.232]),\n np.array([0.227, 0.742]),\n np.array(['The', 'Test']),\n np.array([None, None]),\n ),\n 'lyrics_1',\n )\n ]\n lyrics_data_3 = [\n (\n utils.LyricData(\n np.array([0.027, 0.232]),\n np.array([0.227, 0.742]),\n np.array(['The', 'Test']),\n np.array([None, None]),\n ),\n 'lyrics_1',\n ),\n (\n utils.LyricData(\n np.array([0.0, 0.232]),\n np.array([0.227, 0.742]),\n np.array(['is', 'cool']),\n np.array([None, None]),\n ),\n 'lyrics_2',\n ),\n ]\n lyrics_data_4 = (\n utils.LyricData(\n np.array([0.027, 0.232]),\n np.array([0.227, 0.742]),\n np.array(['The', 'Test']),\n np.array([None, None]),\n ),\n 'lyrics_1',\n )\n lyrics_data_5 = [\n (\n utils.LyricData(\n np.array([0.027, 0.232]),\n np.array([0.227, 0.742]),\n np.array(['The', 'Test']),\n np.array([None, None]),\n ),\n 'lyrics_1',\n ),\n [\n utils.LyricData(\n np.array([0.0, 0.232]),\n np.array([0.227, 0.742]),\n np.array(['is', 'cool']),\n np.array([None, None]),\n ),\n 'lyrics_2',\n ],\n ]\n lyrics_data_6 = [(None, None)]\n lyrics_data_7 = [\n (\n utils.EventData(\n np.array([0.2, 0.3]),\n np.array([0.3, 0.4]),\n np.array(['event A', 'event B']),\n ),\n None,\n )\n ]\n\n jam_1 = jams_utils.jams_converter(lyrics_data=lyrics_data_1)\n jam_2 = jams_utils.jams_converter(lyrics_data=lyrics_data_2)\n jam_3 = jams_utils.jams_converter(lyrics_data=lyrics_data_3)\n jam_6 = jams_utils.jams_converter(lyrics_data=lyrics_data_6)\n\n time, duration, value, confidence = get_jam_data(jam_1, 'lyrics', 0)\n assert time == [0.027, 0.232]\n assert duration == [0.2, 0.51]\n assert value == ['The', 'Test']\n assert confidence == [None, None]\n\n assert jam_2.annotations[0]['sandbox']['name'] == 'lyrics_1'\n\n time, duration, value, confidence = get_jam_data(jam_3, 'lyrics', 0)\n assert time == [0.027, 0.232]\n assert duration == [0.2, 0.51]\n assert value == ['The', 'Test']\n assert confidence == [None, None]\n\n time, duration, value, confidence = get_jam_data(jam_3, 'lyrics', 1)\n assert time == [0.0, 0.232]\n assert duration == [0.227, 0.51]\n assert value == ['is', 'cool']\n assert confidence == [None, None]\n\n time, duration, value, confidence = get_jam_data(jam_6, 'lyrics', 0)\n assert time == []\n assert duration == []\n assert value == []\n assert confidence == []\n\n assert type(jam_1) == jams.JAMS\n\n with pytest.raises(TypeError):\n jams_utils.jams_converter(lyrics_data=lyrics_data_4)\n with pytest.raises(TypeError):\n jams_utils.jams_converter(lyrics_data=lyrics_data_5)\n with pytest.raises(TypeError):\n jams_utils.jams_converter(lyrics_data=lyrics_data_7)\n\n\ndef test_metadata():\n metadata_1 = {\n 'duration': 1.5,\n 'artist': 'Meatloaf',\n 'title': 'Le ciel est blue',\n 'favourite_color': 'rainbow',\n }\n\n jam_1 = jams_utils.jams_converter(lyrics_data=[(None, None)], metadata=metadata_1)\n\n assert jam_1['file_metadata']['title'] == 'Le ciel est blue'\n assert jam_1['file_metadata']['artist'] == 'Meatloaf'\n assert jam_1['file_metadata']['duration'] == 1.5\n assert jam_1['sandbox']['favourite_color'] == 'rainbow'\n"
] |
[
[
"numpy.array"
]
] |
horoiwa/VisionML
|
[
"f1507b3acbb696a39d46037c1653fe2ca5702a0b"
] |
[
"absoluteCLF/valid.py"
] |
[
"import os\nimport glob\nimport pandas as pd\nimport pathlib\nimport shutil\nimport sys\n\nfrom PIL import Image\nimport numpy as np\nfrom skimage.segmentation import mark_boundaries\nimport matplotlib.pyplot as plt\nimport lime\nfrom lime import lime_image\n\nfrom src.models import load_model\nfrom src.util import get_latestname\nfrom config import TARGET_SIZE, BASEMODEL\nNUM_SAMPLES = 100\n\n\ndef main(mode='simple'):\n homedir = '__checkpoints__/valid'\n if os.path.exists(homedir):\n shutil.rmtree(homedir)\n os.makedirs(homedir)\n\n categories = os.listdir('images/test')\n indices = range(len(categories))\n labels = {idx: label for idx, label in zip(indices, categories)}\n\n for category in categories:\n category_dir = os.path.join(homedir, category)\n os.makedirs(category_dir)\n debug_model(category, category_dir, labels, mode)\n\n\ndef debug_model(category, category_dir, labels, mode):\n print(\"Start:\", category)\n images_path = glob.glob(os.path.join('images', 'test', category, '*.jpg'))\n\n n_classes = os.listdir('images/train')\n trained_weight = get_latestname(\"__checkpoints__/model_\", 1)\n model = load_model(len(n_classes), trained_weight,\n freeze='inference', basemodel=BASEMODEL)\n\n for image_path in images_path:\n image = prep_image(image_path)\n name = pathlib.Path(image_path).name\n\n prediction = model.predict(image)\n predicted_label = labels[np.argmax(prediction)]\n true_label = category\n\n if predicted_label == true_label:\n continue\n\n image = image.reshape(TARGET_SIZE[0], TARGET_SIZE[1], 3)\n explainer = lime_image.LimeImageExplainer()\n explanation = explainer.explain_instance(image, model.predict,\n top_labels=len(n_classes),\n hide_color=0,\n num_samples=NUM_SAMPLES)\n\n if mode == 'RedGreen':\n temp, mask = explanation.get_image_and_mask(\n explanation.top_labels[0],\n positive_only=False,\n num_features=10,\n hide_rest=False)\n\n elif mode == 'simple':\n temp, mask = explanation.get_image_and_mask(\n explanation.top_labels[0],\n positive_only=True,\n num_features=5,\n hide_rest=False)\n\n image = mark_boundaries(temp * 0.7 + 0.3, mask) * 255\n image = Image.fromarray(np.uint8(image))\n image.save(os.path.join(category_dir, \"[\"+predicted_label+\"]_\"+name))\n\n\ndef prep_image(image_path):\n image = Image.open(image_path)\n image = image.resize(TARGET_SIZE, Image.LANCZOS)\n image = np.array(image) / 255\n image = image.reshape(TARGET_SIZE[0],\n TARGET_SIZE[1], 3).reshape(1,\n TARGET_SIZE[0],\n TARGET_SIZE[1], 3)\n\n return image\n\n\nif __name__ == '__main__':\n try:\n mode = sys.argv[1]\n except IndexError:\n mode = 'RedGreen'\n\n main(mode)\n"
] |
[
[
"numpy.uint8",
"numpy.array",
"numpy.argmax"
]
] |
SSantosLab/notMOSFiT
|
[
"58cc4a271ef1f1bbf8ae3eb2fc847d2d82c103f0"
] |
[
"mosfit/modules/seds/multiblackbody.py"
] |
[
"\"\"\"Definitions for the `MultiBlackbody` class.\"\"\"\nfrom math import pi\n\nimport numexpr as ne\nimport numpy as np\nfrom astropy import constants as c\nfrom mosfit.constants import DAY_CGS, FOUR_PI, KM_CGS, M_SUN_CGS # noqa: F401\nfrom mosfit.modules.seds.sed import SED\n\n\n# Important: Only define one ``Module`` class per file.\n\n\nclass MultiBlackbody(SED):\n \"\"\"Generalized multiple blackbody spectral energy distribution.\"\"\"\n\n FLUX_CONST = FOUR_PI * (2.0 * c.h / (c.c ** 2) * pi).cgs.value\n X_CONST = (c.h / c.k_B).cgs.value\n STEF_CONST = (4.0 * pi * c.sigma_sb).cgs.value\n\n def process(self, **kwargs):\n \"\"\"Process module.\"\"\"\n raise RuntimeError('`MultiBlackbody` is not yet functional.')\n kwargs = self.prepare_input(self.key('luminosities'), **kwargs)\n self._luminosities = kwargs[self.key('luminosities')]\n self._bands = kwargs['all_bands']\n self._band_indices = kwargs['all_band_indices']\n self._areas = kwargs[self.key('areas')]\n self._temperature_phots = kwargs[self.key('temperaturephots')]\n xc = self.X_CONST # noqa: F841\n fc = self.FLUX_CONST # noqa: F841\n temperature_phot = self._temperature_phot\n zp1 = 1.0 + kwargs[self.key('redshift')]\n seds = []\n for li, lum in enumerate(self._luminosities):\n cur_band = self._bands[li] # noqa: F841\n bi = self._band_indices[li]\n rest_freqs = [x * zp1 # noqa: F841\n for x in self._sample_frequencies[bi]]\n wav_arr = np.array(self._sample_wavelengths[bi]) # noqa: F841\n radius_phot = self._radius_phot[li] # noqa: F841\n temperature_phot = self._temperature_phot[li] # noqa: F841\n\n if li == 0:\n sed = ne.evaluate(\n 'fc * radius_phot**2 * rest_freqs**3 / '\n '(exp(xc * rest_freqs / temperature_phot) - 1.0)')\n else:\n sed = ne.re_evaluate()\n\n sed = np.nan_to_num(sed)\n\n seds.append(list(sed))\n\n seds = self.add_to_existing_seds(seds, **kwargs)\n\n return {'sample_wavelengths': self._sample_wavelengths,\n self.key('seds'): seds}\n"
] |
[
[
"numpy.array",
"numpy.nan_to_num"
]
] |
nuannuanhcc/DMRNet
|
[
"e6e2315219aac9146f55b37481d0c287f624dd7a"
] |
[
"mmdetection/mmdet/models/detectors/double_head_rcnn.py"
] |
[
"import torch\n\nfrom mmdet.core import bbox2roi, build_assigner, build_sampler\nfrom ..registry import DETECTORS\nfrom .two_stage import TwoStageDetector\n\n\[email protected]_module\nclass DoubleHeadRCNN(TwoStageDetector):\n\n def __init__(self, reg_roi_scale_factor, **kwargs):\n super().__init__(**kwargs)\n self.reg_roi_scale_factor = reg_roi_scale_factor\n\n def forward_train(self,\n img,\n img_meta,\n gt_bboxes,\n gt_labels,\n gt_bboxes_ignore=None,\n gt_masks=None,\n proposals=None):\n x = self.extract_feat(img)\n\n losses = dict()\n\n # RPN forward and loss\n if self.with_rpn:\n rpn_outs = self.rpn_head(x)\n rpn_loss_inputs = rpn_outs + (gt_bboxes, img_meta,\n self.train_cfg.rpn)\n rpn_losses = self.rpn_head.loss(\n *rpn_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)\n losses.update(rpn_losses)\n\n proposal_cfg = self.train_cfg.get('rpn_proposal',\n self.test_cfg.rpn)\n proposal_inputs = rpn_outs + (img_meta, proposal_cfg)\n proposal_list = self.rpn_head.get_bboxes(*proposal_inputs)\n else:\n proposal_list = proposals\n\n # assign gts and sample proposals\n if self.with_bbox or self.with_mask:\n bbox_assigner = build_assigner(self.train_cfg.rcnn.assigner)\n bbox_sampler = build_sampler(\n self.train_cfg.rcnn.sampler, context=self)\n num_imgs = img.size(0)\n if gt_bboxes_ignore is None:\n gt_bboxes_ignore = [None for _ in range(num_imgs)]\n sampling_results = []\n for i in range(num_imgs):\n assign_result = bbox_assigner.assign(proposal_list[i],\n gt_bboxes[i],\n gt_bboxes_ignore[i],\n gt_labels[i])\n sampling_result = bbox_sampler.sample(\n assign_result,\n proposal_list[i],\n gt_bboxes[i],\n gt_labels[i],\n feats=[lvl_feat[i][None] for lvl_feat in x])\n sampling_results.append(sampling_result)\n\n # bbox head forward and loss\n if self.with_bbox:\n rois = bbox2roi([res.bboxes for res in sampling_results])\n # TODO: a more flexible way to decide which feature maps to use\n bbox_cls_feats = self.bbox_roi_extractor(\n x[:self.bbox_roi_extractor.num_inputs], rois)\n bbox_reg_feats = self.bbox_roi_extractor(\n x[:self.bbox_roi_extractor.num_inputs],\n rois,\n roi_scale_factor=self.reg_roi_scale_factor)\n if self.with_shared_head:\n bbox_cls_feats = self.shared_head(bbox_cls_feats)\n bbox_reg_feats = self.shared_head(bbox_reg_feats)\n cls_score, bbox_pred = self.bbox_head(bbox_cls_feats,\n bbox_reg_feats)\n\n bbox_targets = self.bbox_head.get_target(sampling_results,\n gt_bboxes, gt_labels,\n self.train_cfg.rcnn)\n loss_bbox = self.bbox_head.loss(cls_score, bbox_pred,\n *bbox_targets)\n losses.update(loss_bbox)\n\n # mask head forward and loss\n if self.with_mask:\n if not self.share_roi_extractor:\n pos_rois = bbox2roi(\n [res.pos_bboxes for res in sampling_results])\n mask_feats = self.mask_roi_extractor(\n x[:self.mask_roi_extractor.num_inputs], pos_rois)\n if self.with_shared_head:\n mask_feats = self.shared_head(mask_feats)\n else:\n pos_inds = []\n device = bbox_cls_feats.device\n for res in sampling_results:\n pos_inds.append(\n torch.ones(\n res.pos_bboxes.shape[0],\n device=device,\n dtype=torch.uint8))\n pos_inds.append(\n torch.zeros(\n res.neg_bboxes.shape[0],\n device=device,\n dtype=torch.uint8))\n pos_inds = torch.cat(pos_inds)\n mask_feats = bbox_cls_feats[pos_inds]\n mask_pred = self.mask_head(mask_feats)\n\n mask_targets = self.mask_head.get_target(sampling_results,\n gt_masks,\n self.train_cfg.rcnn)\n pos_labels = torch.cat(\n [res.pos_gt_labels for res in sampling_results])\n loss_mask = self.mask_head.loss(mask_pred, mask_targets,\n pos_labels)\n losses.update(loss_mask)\n\n return losses\n\n def simple_test_bboxes(self,\n x,\n img_meta,\n proposals,\n rcnn_test_cfg,\n rescale=False):\n \"\"\"Test only det bboxes without augmentation.\"\"\"\n rois = bbox2roi(proposals)\n bbox_cls_feats = self.bbox_roi_extractor(\n x[:self.bbox_roi_extractor.num_inputs], rois)\n bbox_reg_feats = self.bbox_roi_extractor(\n x[:self.bbox_roi_extractor.num_inputs],\n rois,\n roi_scale_factor=self.reg_roi_scale_factor)\n if self.with_shared_head:\n bbox_cls_feats = self.shared_head(bbox_cls_feats)\n bbox_reg_feats = self.shared_head(bbox_reg_feats)\n cls_score, bbox_pred = self.bbox_head(bbox_cls_feats, bbox_reg_feats)\n img_shape = img_meta[0]['img_shape']\n scale_factor = img_meta[0]['scale_factor']\n det_bboxes, det_labels = self.bbox_head.get_det_bboxes(\n rois,\n cls_score,\n bbox_pred,\n img_shape,\n scale_factor,\n rescale=rescale,\n cfg=rcnn_test_cfg)\n return det_bboxes, det_labels\n"
] |
[
[
"torch.zeros",
"torch.ones",
"torch.cat"
]
] |
sakamotosan/pipeline_grid_search
|
[
"bcae3e8320f4fbe6cd016c4899a2418cac077abc"
] |
[
"tests/test_pipeline_grid_search.py"
] |
[
"\"\"\"\nTesting for pipeline_grid_search module.\n\"\"\"\n\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport time\n\nimport numpy as np\n\nfrom sklearn.base import BaseEstimator, TransformerMixin, ClassifierMixin\nfrom sklearn.cross_validation import StratifiedKFold\nfrom sklearn.datasets import make_classification\nfrom sklearn.decomposition import PCA\nfrom sklearn.grid_search import GridSearchCV\nfrom sklearn.pipeline import Pipeline, FeatureUnion\nfrom sklearn.preprocessing import Normalizer\nfrom sklearn.svm import SVC\n\nfrom nose.tools import assert_equal\n\nfrom pipeline_grid_search import PipelineGridSearchCV\n\n# Globals for counting estimator calls\nn_transform_calls = 0\nn_fit_calls = 0\n\n# http://stackoverflow.com/a/27005560/4963543\ndef make_init_body(classname,parameters):\n # Calling super does not work for some reason,\n # but it does not matter in this case, since\n # BaseEstimator, TransformerMixin and ClassifierMixin have an empty __init__ function.\n #body = \" super({}, self).__init__()\".format(classname)\n body = \" pass\"\n body += ''.join('\\n self.{}={}'.format(key,key) for key,_ in parameters)\n func_str = \" def __init__(self{}):\\n{}\".format(''.join(', {}={}'.format(key,val) for key,val in parameters), body)\n return func_str\n\ndef create_mock_estimator(classname,parameters,is_classifier=False):\n # parameters is a list of (key,val) pairs.\n\n init_body = make_init_body(classname,parameters)\n\n main_body = \"\"\"\n def fit(self, X, y=None):\n global n_fit_calls\n n_fit_calls += 1\n return self\n \"\"\"\n if is_classifier:\n bases = \"(BaseEstimator, TransformerMixin, ClassifierMixin)\"\n main_body += \"\"\"\n def predict(self, X):\n return np.arange(X.shape[0])\n \"\"\"\n else:\n bases = \"(BaseEstimator, TransformerMixin)\"\n main_body += \"\"\"\n def transform(self, X):\n global n_transform_calls\n n_transform_calls += 1\n odd = False\n for k,v in self.get_params().items():\n if odd:\n X = X*v\n else:\n X = X-v\n odd = not odd\n return X\n \"\"\"\n body = \"class {}{}:\\n{}\\n{}\".format(classname,bases,init_body,main_body)\n\n print(body)\n exec(body)\n\n newclassobj = locals()[classname]()\n return newclassobj\n\ndef create_mock_classifier(classname,parameters):\n return create_mock_estimator(classname,parameters,is_classifier=True)\n\ndef nfits(nparams):\n # calcs the number of optimal calls to fit when following DFS order\n # (the number of nodes in the pipeline tree minus one)\n s = 1\n for c in reversed(nparams):\n s = 1+c*s if c>1 else s+1\n return s-1\n\ndef calc_n_ideal_fit_calls(parts, cv_params, n_folds):\n pipe_length = len(parts)\n nparams = []\n for p in parts:\n param_count = 1\n for (name,vals) in cv_params:\n est_name,_ = name.split(\"__\",1)\n if est_name == p.__class__.__name__:\n param_count *= len(vals)\n nparams.append(param_count)\n print(nparams)\n \n n_ideal_calls = nfits(nparams)\n n_ideal_calls *= n_folds # We repeat the above number of fit calls for each fold \n n_ideal_calls += pipe_length # plus the fits for fitting on the whole X last\n\n return n_ideal_calls\n\ndef calc_n_ideal_transform_calls(parts, cv_params, n_folds):\n pipe_length = len(parts)\n nparams = []\n for p in parts[:-1]: # Do not include the last part of the pipeline; it is a classifier (without transform)\n param_count = 1\n for (name,vals) in cv_params:\n est_name,_ = name.split(\"__\",1)\n if est_name == p.__class__.__name__:\n param_count *= len(vals)\n nparams.append(param_count)\n \n n_ideal_calls = nfits(nparams)\n n_ideal_calls *= n_folds*2 # We repeat the above number of fit calls for each fold (and for both the train and development set)\n n_ideal_calls += pipe_length-1 # plus the fits for fitting on the whole X last (minus the classifier at the end)\n\n return n_ideal_calls\n\ndef test_pipeline_grid_search1():\n # The that the number of estimator calls is less than the ones for regular GridSearchCV\n parts = [\n create_mock_estimator(\"f0\",[]),\n create_mock_estimator(\"f1\", [(\"p1\",0),(\"p2\",2)]),\n create_mock_estimator(\"f2\",[]),\n create_mock_estimator(\"f3\",[(\"c\",0),(\"d\",0)]),\n create_mock_estimator(\"f4\",[]),\n create_mock_estimator(\"f5\",[]),\n create_mock_classifier(\"f6\",[(\"c\",0)]),\n ]\n\n cv_params = [\n ('f1__p1', [10,20]),\n ('f3__c', [10,20,30]),\n ('f3__d', [10,20,30,40]),\n ('f6__c', [10,20,30,40]),\n ]\n\n perform_pipeline_case(parts, cv_params)\n\ndef test_pipeline_grid_search2():\n # The that the number of estimator calls is less than the ones for regular GridSearchCV\n parts = [\n create_mock_estimator(\"f0\",[]),\n create_mock_estimator(\"f1\", [(\"p1\",0),(\"p2\",2)]),\n create_mock_estimator(\"f2\",[]),\n create_mock_estimator(\"f3\",[(\"c\",0),(\"d\",0)]),\n create_mock_estimator(\"f4\",[]),\n create_mock_estimator(\"f5\",[]),\n create_mock_estimator(\"f40\",[]),\n create_mock_estimator(\"f50\",[]),\n create_mock_estimator(\"f41\",[]),\n create_mock_estimator(\"f51\",[]),\n create_mock_estimator(\"f42\",[]),\n create_mock_estimator(\"f52\",[]),\n create_mock_classifier(\"f6\",[(\"c\",0)]),\n ]\n\n cv_params = [\n ('f1__p1', [10,20]),\n ('f3__c', [10,20,30]),\n ('f3__d', [10,20,30,40]),\n ('f6__c', [10,20,30,40]),\n ]\n\n perform_pipeline_case(parts, cv_params)\n\ndef test_pipeline_grid_search3():\n # The that the number of estimator calls is less than the ones for regular GridSearchCV\n parts = [\n create_mock_classifier(\"f1\", [(\"p1\",0)]),\n ]\n\n cv_params = [\n ('f1__p1', [10,20]),\n ]\n\n perform_pipeline_case(parts, cv_params)\n\ndef test_pipeline_grid_search4():\n # The that the number of estimator calls is less than the ones for regular GridSearchCV\n parts = [\n create_mock_classifier(\"f1\", []),\n ]\n\n cv_params = [\n ]\n\n perform_pipeline_case(parts, cv_params)\n\ndef test_pipeline_grid_search5():\n # The that the number of estimator calls is less than the ones for regular GridSearchCV\n parts = [\n create_mock_estimator(\"f0\",[]),\n create_mock_estimator(\"f1\", [(\"p1\",0),(\"p2\",2)]),\n create_mock_estimator(\"f2\",[]),\n create_mock_estimator(\"f3\",[(\"c\",0),(\"d\",0)]),\n create_mock_estimator(\"f4\",[]),\n create_mock_estimator(\"f5\",[]),\n create_mock_estimator(\"f6\",[]),\n create_mock_estimator(\"f7\",[]),\n create_mock_estimator(\"f8\",[]),\n create_mock_estimator(\"f9\",[]),\n create_mock_estimator(\"f10\",[]),\n create_mock_classifier(\"f11\",[]),\n ]\n\n cv_params = [\n ('f1__p1', [10,20]),\n ('f3__c', [10,20,30]),\n ('f3__d', [10,20,30,40]),\n ]\n\n perform_pipeline_case(parts, cv_params)\n\ndef test_pipeline_grid_search6():\n # Test that the number of estimator calls is less than the ones for regular GridSearchCV\n parts = [\n create_mock_estimator(\"f0\",[]),\n create_mock_estimator(\"f1\", [(\"p1\",0),(\"p2\",2)]),\n create_mock_estimator(\"f2\",[]),\n create_mock_estimator(\"f3\",[(\"c\",0),(\"d\",0)]),\n create_mock_estimator(\"f4\",[]),\n create_mock_estimator(\"f5\",[]),\n SVC() \n ]\n\n cv_params = [\n ('f1__p1', [10,20]),\n ('f3__c', [10,20,30]),\n ('f3__d', [10,20,30,40]),\n ('SVC__C', [1.,10.,100.,1000.]),\n ('SVC__kernel', ['linear']),\n ]\n\n # Set assert_n_calls_equal to False, as we need to implement our custom counting of function calls in order to measure the call tests.\n perform_pipeline_case(parts, cv_params, assert_n_calls_equal=False)\n\ndef test_pipeline_grid_search7():\n # Test that _DFSGridSearchCVPipeline gives the same selected parameters as the normal GridSearchCV\n parts = [\n PCA(),\n Normalizer(),\n SVC()\n ]\n\n cv_params = [\n ('PCA__n_components', [3,5,7]),\n ('Normalizer__norm', ['l2']),\n ('SVC__C', [1.,10.,100.,1000.]),\n ('SVC__kernel', ['linear']),\n ]\n\n perform_pipeline_case(parts, cv_params, assert_n_calls_equal=False)\n\ndef test_pipeline_grid_search8():\n # Test using a FeatureUnion with embedded Pipelines.\n parts = [\n create_mock_estimator(\"f0\",[]),\n FeatureUnion([\n ('feat1', Pipeline([\n ('f11', create_mock_estimator(\"f11\", [(\"p1\",0),(\"p2\",2)])),\n ])),\n ('feat2', Pipeline([\n ('f12', create_mock_estimator(\"f12\", [(\"a\",0)])),\n ])),\n ]),\n create_mock_estimator(\"f1\", [(\"p1\",0),(\"p2\",2)]),\n create_mock_estimator(\"f2\",[]),\n create_mock_estimator(\"f3\",[(\"c\",0),(\"d\",0)]),\n create_mock_estimator(\"f4\",[]),\n create_mock_estimator(\"f5\",[]),\n create_mock_classifier(\"f11\",[]),\n ]\n\n cv_params = [\n ('FeatureUnion__feat1__f11__p1', [10,20]),\n ('FeatureUnion__feat2__f12__a', [10,20,30]),\n ('f1__p1', [10,20]),\n ('f3__c', [10,20,30]),\n ('f3__d', [10,20,30,40]),\n ]\n\n # Set assert_n_calls_equal to False, as we need to implement our custom counting of function calls in order to measure the call tests.\n perform_pipeline_case(parts, cv_params, assert_n_calls_equal=False)\n # TODO: Update assert_n_calls_equal logic to work correctly with pipelines embedded in FeatureUnions.\n\ndef test_pipeline_grid_search9():\n # Test using a FeatureUnion with embedded Pipelines.\n parts = [\n create_mock_estimator(\"f0\",[]),\n FeatureUnion([\n ('feat1', Pipeline([\n ('f11', create_mock_estimator(\"f11\", [(\"p1\",0),(\"p2\",2)])),\n ('f111', create_mock_estimator(\"f111\", [(\"p1\",0),(\"p2\",2)])),\n ('f112', create_mock_estimator(\"f112\", [(\"p1\",0),(\"p2\",2)])),\n ])),\n ('feat2', Pipeline([\n ('f12', create_mock_estimator(\"f12\", [(\"a\",0)])),\n ('f121', create_mock_estimator(\"f121\", [(\"a\",0)])),\n ('f122', create_mock_estimator(\"f122\", [(\"a\",0)])),\n ])),\n ]),\n create_mock_estimator(\"f1\", [(\"p1\",0),(\"p2\",2)]),\n create_mock_estimator(\"f2\",[]),\n create_mock_estimator(\"f3\",[(\"c\",0),(\"d\",0)]),\n create_mock_estimator(\"f4\",[]),\n create_mock_estimator(\"f5\",[]),\n create_mock_classifier(\"f11\",[]),\n ]\n\n cv_params = [\n ('FeatureUnion__feat1__f11__p1', [10,20]),\n #('FeatureUnion__feat1__f111__p1', [10,20]),\n ('FeatureUnion__feat1__f112__p1', [10,20]),\n #('FeatureUnion__feat2__f12__a', [10,20,30]),\n #('FeatureUnion__feat2__f121__a', [10,20,30]),\n ('FeatureUnion__feat2__f122__a', [10,20,30]),\n ('f1__p1', [10,20]),\n ('f3__c', [10,20,30]),\n ('f3__d', [10,20,30,40]),\n ]\n\n # Set assert_n_calls_equal to False, as we need to implement our custom counting of function calls in order to measure the call tests.\n perform_pipeline_case(parts, cv_params, assert_n_calls_equal=False, mode='file', cachedir='file_cache', datasetname='make_class')\n\ndef test_pipeline_grid_search10():\n # Test if _DFSGridSearchCVPipeline works with submerged pipelines.\n parts = [\n create_mock_estimator(\"f0\",[]),\n FeatureUnion([\n ('feat1', Pipeline([\n ('f11', create_mock_estimator(\"f11\", [(\"p1\",0),(\"p2\",2)])),\n ('f111', create_mock_estimator(\"f111\", [(\"p1\",0),(\"p2\",2)])),\n ('f112', create_mock_estimator(\"f112\", [(\"p1\",0),(\"p2\",2)])),\n ])),\n ('feat2', Pipeline([\n ('f12', create_mock_estimator(\"f12\", [(\"a\",0)])),\n ('f121', create_mock_estimator(\"f121\", [(\"a\",0)])),\n ('f122', create_mock_estimator(\"f122\", [(\"a\",0)])),\n ])),\n ]),\n PCA(),\n Normalizer(),\n SVC(),\n ]\n\n cv_params = [\n ('FeatureUnion__feat1__f11__p1', [10,20]),\n #('FeatureUnion__feat1__f111__p1', [10,20]),\n ('FeatureUnion__feat1__f112__p1', [10,20]),\n #('FeatureUnion__feat2__f12__a', [10,20,30]),\n #('FeatureUnion__feat2__f121__a', [10,20,30]),\n ('FeatureUnion__feat2__f122__a', [10,20,30]),\n ('PCA__n_components', [3,5,7]),\n ('Normalizer__norm', ['l2']),\n ('SVC__C', [1.,10.,100.,1000.]),\n ('SVC__kernel', ['linear']),\n ]\n\n # Set assert_n_calls_equal to False, as we need to implement our custom counting of function calls in order to measure the call tests.\n perform_pipeline_case(parts, cv_params, assert_n_calls_equal=False, mode='dfs', cachedir='file_cache', datasetname='make_class')\n\ndef test_pipeline_grid_search11():\n # Test if _CacheGridSearchCVPipeline works with submerged pipelines.\n parts = [\n create_mock_estimator(\"f0\",[]),\n FeatureUnion([\n ('feat1', Pipeline([\n ('f11', create_mock_estimator(\"f11\", [(\"p1\",0),(\"p2\",2)])),\n ('f111', create_mock_estimator(\"f111\", [(\"p1\",0),(\"p2\",2)])),\n ('f112', create_mock_estimator(\"f112\", [(\"p1\",0),(\"p2\",2)])),\n ])),\n ('feat2', Pipeline([\n ('f12', create_mock_estimator(\"f12\", [(\"a\",0)])),\n ('f121', create_mock_estimator(\"f121\", [(\"a\",0)])),\n ('f122', create_mock_estimator(\"f122\", [(\"a\",0)])),\n ])),\n ]),\n PCA(),\n Normalizer(),\n SVC(),\n ]\n\n cv_params = [\n ('FeatureUnion__feat1__f11__p1', [10,20]),\n #('FeatureUnion__feat1__f111__p1', [10,20]),\n ('FeatureUnion__feat1__f112__p1', [10,20]),\n #('FeatureUnion__feat2__f12__a', [10,20,30]),\n #('FeatureUnion__feat2__f121__a', [10,20,30]),\n ('FeatureUnion__feat2__f122__a', [10,20,30]),\n ('PCA__n_components', [3,5,7]),\n ('Normalizer__norm', ['l2']),\n ('SVC__C', [1.,10.,100.,1000.]),\n ('SVC__kernel', ['linear']),\n ]\n\n # Set assert_n_calls_equal to False, as we need to implement our custom counting of function calls in order to measure the call tests.\n perform_pipeline_case(parts, cv_params, assert_n_calls_equal=False, mode='file', cachedir='file_cache', datasetname='make_class')\n\ndef test_pipeline_grid_search12():\n # Test that _DFSGridSearchCVPipeline gives the same selected parameters as the normal GridSearchCV\n parts = [\n PCA(),\n Normalizer(),\n SVC()\n ]\n\n cv_params = [\n ('PCA__n_components', [3,5,7]),\n ('Normalizer__norm', ['l1','l2']),\n ('SVC__C', [1.,10.,100.,1000.]),\n ('SVC__kernel', ['linear']),\n ]\n\n perform_pipeline_case(parts, cv_params, assert_n_calls_equal=False, mode='file', cachedir='file_cache', datasetname='make_class')\n\ndef test_pipeline_grid_search13():\n # Test that _DFSGridSearchCVPipeline gives the same selected parameters as the normal GridSearchCV\n parts = [\n SVC()\n ]\n\n cv_params = [\n ('SVC__C', [1.,10.,100.,1000.]),\n ('SVC__kernel', ['linear']),\n ]\n\n perform_pipeline_case(parts, cv_params, assert_n_calls_equal=False, mode='file', cachedir='file_cache', datasetname='make_class')\n\ndef test_pipeline_grid_search14():\n # Test that _DFSGridSearchCVPipeline gives the same selected parameters as the normal GridSearchCV\n parts = [\n PCA(),\n Normalizer(),\n SVC()\n ]\n\n cv_params = [\n ('PCA__n_components', [3,5]),\n ('Normalizer__norm', ['l2']),\n ('SVC__C', [1.,10.]),\n ('SVC__kernel', ['linear']),\n ]\n\n perform_pipeline_case(parts, cv_params, assert_n_calls_equal=False, mode='file', cachedir='file_cache', datasetname='make_class')\n\ndef test_pipeline_grid_search15():\n # Test if _CacheGridSearchCVPipeline works with submerged pipelines.\n parts = [\n create_mock_estimator(\"f0\",[(\"p1\",0)]),\n FeatureUnion([\n ('feat1', Pipeline([\n ('f11', create_mock_estimator(\"f11\", [(\"p1\",0)])),\n ('f12', create_mock_estimator(\"f12\", [(\"p1\",0)])),\n ])),\n ('feat2', Pipeline([\n ('f21', create_mock_estimator(\"f21\", [(\"p1\",0)])),\n ('f22', create_mock_estimator(\"f22\", [(\"p1\",0)])),\n ])),\n ]),\n PCA(),\n Normalizer(),\n SVC(),\n ]\n\n cv_params = [\n ('f0__p1', [10,20]),\n ('FeatureUnion__feat1__f11__p1', [30,40]),\n ('FeatureUnion__feat1__f12__p1', [50,60]),\n ('FeatureUnion__feat2__f21__p1', [100,200,300]),\n ('FeatureUnion__feat2__f22__p1', [400,500,600]),\n ('PCA__n_components', [3,5]),\n ('Normalizer__norm', ['l2']),\n ('SVC__C', [1.,10.]),\n ('SVC__kernel', ['linear']),\n ]\n\n # Set assert_n_calls_equal to False, as we need to implement our custom counting of function calls in order to measure the call tests.\n perform_pipeline_case(parts, cv_params, assert_n_calls_equal=False, mode='file', cachedir='file_cache', datasetname='make_class')\n\ndef perform_pipeline_case(parts, cv_params, assert_n_calls_equal=True, **pipelinegridsearchcv_kwargs):\n # tests a particular pipe and cv_params combination\n\n pipe = Pipeline([ (p.__class__.__name__, p) for p in parts ])\n print(pipe)\n\n X, y = make_classification(n_samples=100, n_features=20)\n\n n_folds = 5\n n_jobs = 1\n verbose = 1\n random_seed = 0\n\n # mock.MagicMock cannot be used since GridSearchCV resets each estimator using\n # clone() before each call to fit.\n # So, let's use global variables instead that we increment in our mock\n # estimators.\n global n_transform_calls, n_fit_calls\n\n # Start PipelineGridSearchCV test here\n n_transform_calls = 0\n n_fit_calls = 0\n ideal_cv_time = time.time()\n model = PipelineGridSearchCV(pipe, dict(cv_params), cv=StratifiedKFold(y, n_folds, random_state=random_seed), verbose=verbose, n_jobs=n_jobs, **pipelinegridsearchcv_kwargs)\n model.fit(X,y)\n ideal_cv_time = time.time() - ideal_cv_time\n print(\"model.best_estimator_: {}\".format(model.best_estimator_))\n print(\"Counts (PipelineGridSearchCV)\")\n print(\"n_fit_calls:\",n_fit_calls)\n print(\"n_transform_calls:\",n_transform_calls)\n print(\"time to do grid search:\",ideal_cv_time)\n\n n_ideal_fit_calls = calc_n_ideal_fit_calls(parts,cv_params,n_folds)\n n_ideal_transform_calls = calc_n_ideal_transform_calls(parts,cv_params,n_folds)\n if assert_n_calls_equal:\n # Make sure that PipelineGridSearchCV only called fit the optimal number of times.\n assert_equal(n_fit_calls, n_ideal_fit_calls)\n assert_equal(n_transform_calls, n_ideal_transform_calls)\n\n # Start GridSearchCV test here\n n_transform_calls = 0\n n_fit_calls = 0\n naive_cv_time = time.time()\n model_naive = GridSearchCV(pipe, dict(cv_params), cv=StratifiedKFold(y, n_folds, random_state=random_seed), verbose=verbose, n_jobs=n_jobs)\n model_naive.fit(X,y)\n naive_cv_time = time.time() - naive_cv_time\n print(\"Counts (GridSearchCV)\")\n print(\"n_fit_calls:\",n_fit_calls)\n print(\"n_transform_calls:\",n_transform_calls)\n print(\"time to do grid search:\",naive_cv_time)\n\n n_param_combs = np.prod(map(lambda x: len(x[1]), cv_params))\n n_naive_fit_calls = n_param_combs * len(parts) * n_folds + len(parts)\n n_naive_transform_calls = n_param_combs * (len(parts)-1) * n_folds * 2 + (len(parts)-1) # The 2 is for running on both the train and dev. set\n if assert_n_calls_equal:\n assert_equal(n_fit_calls, n_naive_fit_calls)\n assert_equal(n_transform_calls, n_naive_transform_calls)\n\n # Make sure that PipelineGridSearchCV and GridSearchCV return the same result.\n print(\"[pipeline_grid_search] best_params_:\",model.best_params_)\n print(\"[pipeline_grid_search] best_score_:\",model.best_score_)\n print(\"[naive_grid_search] best_params_:\",model_naive.best_params_)\n print(\"[naive_grid_search] best_score_:\",model_naive.best_score_)\n assert_equal(model_naive.best_score_, model.best_score_)\n # Note that for equal mean_validation_score, the best params of GridSearchCV will depend\n # on the order that they occur to the classifier, so sometimes this test fails even though\n # PipelineGridSearchCV behaves correctly.\n assert_equal(model_naive.best_params_, model.best_params_)\n\n"
] |
[
[
"sklearn.datasets.make_classification",
"sklearn.cross_validation.StratifiedKFold",
"sklearn.pipeline.Pipeline",
"sklearn.svm.SVC",
"sklearn.preprocessing.Normalizer",
"sklearn.decomposition.PCA"
]
] |
Pennsieve/timeseries-processor
|
[
"85766afa76182503fd66cec8382c22e757743f01"
] |
[
"edf_processor/edf_processor/edf.py"
] |
[
"import sys\nimport struct\nimport re\nimport numpy as np\nfrom datetime import datetime\n\nsecond_to_usecond = 1000000\n\ndef twosComp(val, bits):\n \"\"\"compute the 2's complement of int value val\"\"\"\n if (val & (1 << (bits - 1))) != 0: # if sign bit is set e.g., 8bit: 128-255\n val = val - (1 << bits) # compute negative value\n return val # return positive value as is\n\ndef transformValue(val, phys_min, phys_max, dig_min, dig_max, bits):\n two_comped_val = twosComp(val, bits)\n bit_value = (phys_max - phys_min) /(dig_max - dig_min)\n offset = (phys_max / bit_value) - dig_max\n return bit_value * (offset + float(two_comped_val))\n\nclass EdfReader():\n \"\"\"\n Simple interface for EDF, EDF+C and EDF+D files\n \"\"\"\n \n def __init__ (self,filename):\n self.open(filename)\n \n def __enter__(self):\n return self\n\n def __exit__(self,exception_type, exception_value, traceback):\n return\n\n def open(self,filename):\n with open(filename, \"rb\") as f:\n self.version = f.read(8).strip()\n self.patient_id = f.read(80).strip()\n self.record_id = f.read(80).strip()\n self.start_date = f.read(8).strip()\n self.start_time = f.read(8).strip()\n self.nb_bytes = int(f.read(8))\n self.reserved = f.read(44).strip()\n self.nb_data_rec = int(f.read(8))\n self.duration = float(f.read(8))\n self.nb_signal = int(f.read(4))\n self.labels = [ y.strip() for y in re.findall('.{1,16}', f.read(16 * self.nb_signal))]\n self.transducer_type = [ y.strip() for y in re.findall('.{1,80}', f.read(80 * self.nb_signal))]\n self.phy_dim = [y.strip() for y in re.findall('.{1,8}', f.read(8 * self.nb_signal))]\n self.phy_min = [ float(y) for y in re.findall('.{1,8}', f.read(8 * self.nb_signal))]\n self.phy_max = [ float(y) for y in re.findall('.{1,8}', f.read(8 * self.nb_signal))]\n self.dig_min = [ float(y) for y in re.findall('.{1,8}', f.read(8 * self.nb_signal))]\n self.dig_max = [ float(y) for y in re.findall('.{1,8}', f.read(8 * self.nb_signal))]\n self.prefiltering = [ y.strip() for y in re.findall('.{1,80}', f.read(80 * self.nb_signal))]\n self.nr_samples = [int(y) for y in re.findall('.{1,8}', f.read(8 * self.nb_signal))]\n self.reserved_signal = [y.strip() for y in re.findall('.{1,32}', f.read(32 * self.nb_signal))]\n self.data_signal=[]\n self.rec_start_time=[]\n self.annotations=[]\n for x in range(self.nb_signal):\n if (self.labels[x]!=\"EDF Annotations\"):\n self.data_signal.append(np.array([])) \n for y in range(self.nb_data_rec):\n self.rec_start_time.append([])\n for x in range(self.nb_signal):\n if (self.labels[x]==\"EDF Annotations\"):\n segment=f.read (2 * self.nr_samples[x])\n self.annotations.append(segment)\n if (self.reserved==\"EDF+D\"):\n split_segment=segment.split(\"\\x14\\x14\")\n self.rec_start_time[y]=float(split_segment[0])\n else:\n block=f.read ( 2 * self.nr_samples[x])\n segment= [block[i:i+2] for i in range(0, len(block), 2)]\n self.data_signal[x] = np.append(self.data_signal[x], [transformValue(struct.unpack('<H',z)[0], self.phy_min[x], self.phy_max[x], self.dig_min[x], self.dig_max[x], 16) for z in segment])\n \n def getNSamples(self):\n return [x* self.nb_data_rec for x in self.nr_samples] \n\n def getSignalLabels(self):\n return self.labels\n \n def getRecordStartTime(self,i):\n return self.rec_start_time[i]\n \n def getNumberOfDataRecords(self):\n return self.nb_data_rec\n\n def getNrSamples(self,i):\n return self.nr_samples[i]\n\n def getSampleFrequency(self, i):\n return float(self.nr_samples[i]) / self.duration\n\n def getPhysicalDimension(self,i):\n return self.phy_dim[i]\n\n def getStartdatetime(self):\n split_start_date=self.start_date.split(\".\")\n split_start_time=self.start_time.split(\".\")\n year=int(split_start_date[2])+1900\n if (year<1985):\n year=year+100\n month=int(split_start_date[1])\n day=int(split_start_date[0])\n hour=int(split_start_time[0])\n minute=int(split_start_time[1])\n second=int(split_start_time[2])\n return datetime(year, month, day, hour, minute, second)\n\n def getTimestamps(self, index, signal_number, start_time) :\n #start_time is in usecs sine epoch\n start_time_record = start_time + self.getRecordStartTime(index) * second_to_usecond \n end_time_record = start_time_record + second_to_usecond * self.getDuration() \n return np.linspace(start_time_record, end_time_record, num=self.getNrSamples(signal_number), endpoint=False) \n \n def isDiscontiguous(self):\n return (self.reserved == \"EDF+D\")\n\n def getDuration(self):\n return self.duration\n \n def getAnnotations(self):\n return self.annotations\n\n def readSignal(self, i, start,end):\n return self.data_signal[i][start:end]\n\n\n"
] |
[
[
"numpy.array"
]
] |
DocTorH123/PM_Checker
|
[
"84e8ba09104d005b67549012b4f4e95de20f94f2"
] |
[
"PM/BPM.py"
] |
[
"import librosa;import sys;import matplotlib.pyplot as plt\n\ntry:\n filename = input(\"Input your File Name\")\n y, sr = librosa.load(\"Test_Music/\"+filename)\n tempo, beat_frames = librosa.beat.beat_track(y=y,sr=sr)\n print('{:.2f}BPM'.format(tempo))\n beat_times = librosa.frames_to_time(beat_frames, sr=sr);\n print(beat_times);plt.plot(beat_times);\n plt.axhline(y=tempo, color='r', linewidth=1);plt.show()\n\nexcept FileNotFoundError:\n print(\"File is not Founded, please type correctly\")\n sys.exit(0)\n \n"
] |
[
[
"matplotlib.pyplot.plot",
"matplotlib.pyplot.axhline",
"matplotlib.pyplot.show"
]
] |
lovishjindal2503/TOPSIS_project1
|
[
"4ce4e15e04ac84b716a6ae6f4a50ac01fdb854d8"
] |
[
"Topsis_lovish_101703312/topsis.py"
] |
[
"import numpy as np\n\ndef Topsis(weights, numerical_data, impact):\n\n\ttry:\n\t\tif(numerical_data.shape[1] != weights.shape[0] or weights.shape != impact.shape or numerical_data.shape[1] != impact.shape[0]):\n\t\t\traise Exception(\"Given input is not correct\")\n\texcept Exception as e:\n\t\tprint(\"Given input is incorrect\")\n\t\treturn\n\n\t#Converting weight matrix into percent form\n\tweights = weights/weights.sum()\n\t#Making normalized matrix\n\tfor i in range(numerical_data.shape[1]):\n\t\tnumerical_data[:,i] = (numerical_data[:,i]/np.sqrt((numerical_data[:,i]**2).sum()))\n\n\t#Multiplying columns with their specific weights\n\tnumerical_data = numerical_data*(weights.reshape(1,numerical_data.shape[1]))\n\n\tideal_best_values = []\n\tideal_worst_values = []\n\n\tfor i in range(numerical_data.shape[1]):\n\t\tif(impact[i] == \"+\"):\n\t\t\t#It indicates this particular feature value need to be increased\n\t\t\tideal_best_values.append(numerical_data[:,i].max())\n\t\t\tideal_worst_values.append(numerical_data[:,i].min())\n\t\telif(impact[i] == \"-\"):\n\t\t\t#This feature value need to be decreased\n\t\t\tideal_best_values.append(numerical_data[:,i].min())\n\t\t\tideal_worst_values.append(numerical_data[:,i].max())\n\n\tideal_best_values = np.array(ideal_best_values, dtype = np.float)\n\tideal_worst_values = np.array(ideal_worst_values, dtype = np.float)\n\n\teuclDist_ideal_best = np.sqrt(((numerical_data - ideal_best_values)**2).sum(axis = 1))\n\teuclDist_ideal_worst = np.sqrt(((numerical_data - ideal_worst_values)**2).sum(axis = 1))\n\n\tperformance_score = euclDist_ideal_worst/(euclDist_ideal_best + euclDist_ideal_worst)\n\tranking = np.argsort(performance_score)\n\treturn np.argmax(performance_score)#Returning the index of the row having maximum performance score\n"
] |
[
[
"numpy.argsort",
"numpy.array",
"numpy.argmax"
]
] |
phspo/snakemake-ngs-spa-typing
|
[
"de710308c1ad26303539d2998573d682caba3e25"
] |
[
"scripts/createKmerErrorDistributionPlots.py"
] |
[
"import json\nimport math\nfrom scipy.stats import poisson\nimport sys\nimport logging\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\nfrom matplotlib.ticker import MaxNLocator\n\nlogging.basicConfig(filename=snakemake.log[0], level=logging.DEBUG,format=\"%(asctime)s:%(levelname)s:%(message)s\")\n\n#Read counts\nexpected_counts = json.load(open(snakemake.input['expectedCounts'],'r'))\nactual_counts = json.load(open(snakemake.input['observedCounts'],'r'))\n\npredictedType = '???'\nwith open(snakemake.input['probabilities'],'r') as predictionFile:\n\tpredictedType = predictionFile.read().splitlines()[0].split('\\t')[0]\n\nkmer_error = -1\nwith open(snakemake.input['kmerError'],'r') as errorFile:\n\tkmer_error = float(errorFile.read().splitlines()[0].split('\\t')[1])\n\ngroundTruthType = snakemake.params['gt']\n\ngtt_counts = expected_counts[groundTruthType]\nprd_counts = expected_counts[predictedType]\n\ngtt_correct = [actual_counts[x] for x in actual_counts if x in gtt_counts ]\ngtt_error = [actual_counts[x] for x in actual_counts if not x in gtt_counts ]\n\nprd_correct = [actual_counts[x] for x in actual_counts if x in prd_counts]\nprd_error = [actual_counts[x] for x in actual_counts if not x in prd_counts ]\n\nfig, axs = plt.subplots(2, 2, figsize=(8, 8),sharex='col')\n\nlabels, counts = np.unique(gtt_correct, return_counts=True)\naxs[0,0].bar(labels, counts, align='center')\naxs[0,0].set_title(\"GTT Correct Kmers\")\n\nlabels, counts = np.unique(gtt_error, return_counts=True)\naxs[0,1].bar(labels, counts, align='center')\naxs[0,1].set_title(\"GTT Error Kmers\")\n\nlabels, counts = np.unique(prd_correct, return_counts=True)\naxs[1,0].bar(labels, counts, align='center')\naxs[1,0].set_title(\"PT Correct Kmers\")\n\nlabels, counts = np.unique(prd_error, return_counts=True)\naxs[1,1].bar(labels, counts, align='center')\naxs[1,1].set_title(\"PT Error Kmers\")\n\nplt.savefig(snakemake.output[\"errors\"])\n\nplt.clf()\n\n#Deviation histogram\n\ndeviationsGttError = []\ndeviationsGttActual = []\ndeviationsGttSum = []\ndeviationsPrdError = []\ndeviationsPrdActual = []\ndeviationsPrdSum = []\n\nepsilonGtt = sum(actual_counts[x] for x in actual_counts)*kmer_error/sum(1 for kmer in actual_counts if not kmer in gtt_counts)\nepsilonPrd = sum(actual_counts[x] for x in actual_counts)*kmer_error/sum(1 for kmer in actual_counts if not kmer in prd_counts)\n\nfor kmer in actual_counts:\n\tif kmer in gtt_counts:\n\t\tdeviationsGttActual.append(actual_counts[kmer]-gtt_counts[kmer])\n\telse:\n\t\tdeviationsGttError.append(actual_counts[kmer]-epsilonGtt)\n\tdeviationsGttSum.append(actual_counts[kmer]-epsilonGtt if kmer not in gtt_counts else actual_counts[kmer]-gtt_counts[kmer])\n\tif kmer in prd_counts:\n\t\tdeviationsPrdActual.append(actual_counts[kmer]-prd_counts[kmer])\n\telse:\n\t\tdeviationsPrdError.append(actual_counts[kmer]-epsilonPrd)\n\tdeviationsPrdSum.append(actual_counts[kmer]-epsilonPrd if kmer not in prd_counts else actual_counts[kmer]-prd_counts[kmer])\n\n\nfig, axs = plt.subplots(2, 3, figsize=(12, 8),sharex='col')\n\nlabels, counts = np.unique(deviationsGttError, return_counts=True)\naxs[0,0].bar(labels, counts, align='center')\naxs[0,0].set_title(\"Errors GTT, Epsilon GTT={}\".format(epsilonGtt))\nlabels, counts = np.unique(deviationsGttActual, return_counts=True)\naxs[0,1].bar(labels, counts, align='center')\naxs[0,1].set_title(\"Actual GTT\")\nlabels, counts = np.unique(deviationsGttSum, return_counts=True)\naxs[0,2].bar(labels, counts, align='center')\naxs[0,2].set_title(\"Sum GTT\")\n\nlabels, counts = np.unique(deviationsPrdError, return_counts=True)\naxs[1,0].bar(labels, counts, align='center')\naxs[1,0].set_title(\"Errors PRD, Epsilon GTT={}\".format(epsilonGtt))\nlabels, counts = np.unique(deviationsPrdActual, return_counts=True)\naxs[1,1].bar(labels, counts, align='center')\naxs[1,1].set_title(\"Actual PRD\")\nlabels, counts = np.unique(deviationsPrdSum, return_counts=True)\naxs[1,2].bar(labels, counts, align='center')\naxs[1,2].set_title(\"Sum PRD\")\n\nplt.savefig(snakemake.output['deviations'])\n"
] |
[
[
"matplotlib.pyplot.clf",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"numpy.unique"
]
] |
thayes805/earthpy
|
[
"d34dfe9eb9e8b6ca0ded2541d3ddbac9cb15474b"
] |
[
"examples/plot_raster_stack_crop.py"
] |
[
"\"\"\"\nStack and Crop Raster Data Using EarthPy\n========================================\n\nLearn how to stack and crop satellite imagery using EarthPy\n\"\"\"\n\n###############################################################################\n# Stack and Crop Raster Data Using EarthPy\n# ---------------------------------------------\n#\n# .. note::\n# The examples below will show you how to use the ``es.stack()`` and\n# ``es.crop_image()`` functions from EarthPy.\n\n###############################################################################\n# Stack Multi Band Imagery\n# -----------------------------\n# Some remote sensing datasets are stored with each band in a separate file. However,\n# often you want to use all of the bands together in your analysis. For example\n# you need all of the bands together in the same file or \"stack\" in order to plot a color\n# RGB image. EarthPy has a ``stack()`` function that allows you\n# to take a set of ``.tif`` files that are all in the same spatial extent, CRS and resolution\n# and either export them together a single stacked ``.tif`` file or work with them in Python\n# directly as a stacked numpy array.\n#\n# To begin using the EarthPy ``stack()`` function, import the needed packages\n# and create an array to be plotted. Below you plot the data as continuous with a colorbar\n# using the ``plot_bands()`` function.\n\n###############################################################################\n# Import Packages\n# ------------------------------\n#\n# You will need several packages to stack your raster. You will use GeoPandas to\n# open up a shapefile that will be used to crop your data. You will primarily be\n# using the EarthPy spatial module in this vignette.\n\nimport os\nfrom glob import glob\nimport matplotlib.pyplot as plt\nimport rasterio as rio\nfrom rasterio.plot import plotting_extent\nimport geopandas as gpd\nimport earthpy as et\nimport earthpy.spatial as es\nimport earthpy.plot as ep\n\n\n########################################################################################\n# Get Example Data Ready for Stack\n# ----------------------------------\n# With EarthPy you can create a stack from all of the Landsat .tif files (one per band)\n# in a folder with the ``es.stack()`` function.\n\n###################################################################################\n# Error found on Windows systems\n# -------------------------------\n# .. note::\n# If you are running this script on a Windows system, there is a\n# known bug with ``.to_crs()``, which is used in this script. If an error\n# occurs, you have to reset your os environment with the command\n# ``os.environ[\"PROJ_LIB\"] = r\"path-to-share-folder-in-environment\"``.\n\n# Get sample data from EarthPy and setting your home working directory\n\ndata_path = et.data.get_data(\"vignette-landsat\")\nos.chdir(os.path.join(et.io.HOME, \"earth-analytics\"))\n\n# Prepare the landsat bands to be stacked using glob and sort\n\nlandsat_bands_data_path = \"data/vignette-landsat/LC08_L1TP_034032_20160621_20170221_01_T1_sr_band*[2-4]*_crop.tif\"\nstack_band_paths = glob(landsat_bands_data_path)\nstack_band_paths.sort()\n\n# Create output directory and the output path\n\noutput_dir = os.path.join(\"data\", \"outputs\")\nif os.path.isdir(output_dir) == False:\n os.mkdir(output_dir)\n\nraster_out_path = os.path.join(output_dir, \"raster.tiff\")\n\n####################################################################################\n# Stack the Bands\n# ---------------------------\n# The stack function has an optional output argument, where you can write the raster\n# to a tiff file in a folder. If you want to use this functionality, make sure there\n# is a folder to write your tiff file to.\n# The Stack function also returns two object, an array and a RasterIO profile. Make\n# sure to be catch both in variables.\n\n# Stack Landsat bands\n\nos.chdir(os.path.join(et.io.HOME, \"earth-analytics\"))\narray, raster_prof = es.stack(stack_band_paths, out_path=raster_out_path)\n\n####################################################################################\n# Create Extent Object\n# --------------------------------\n# To get the raster extent, use the ``plotting_extent`` function on the\n# array from ``es.stack()`` and the Rasterio profile or metadata object. The function\n# needs a single\n# layer of a numpy array, which is why we use ``arr[0]``. The function also\n# needs the spatial transformation for the Rasterio object, which can be acquired by accessing\n# the ``\"transform\"`` key within the Rasterio Profile.\n\nextent = plotting_extent(array[0], raster_prof[\"transform\"])\n\n################################################################################\n# Plot Un-cropped Data\n# ------------------------------\n# You can see the boundary and the raster before the crop using ``ep.plot_rgb()``\n# Notice that the data appear washed out.\n\nfig, ax = plt.subplots(figsize=(12, 12))\nep.plot_rgb(\n array,\n ax=ax,\n stretch=True,\n extent=extent,\n str_clip=0.5,\n title=\"RGB Image of Un-cropped Raster\",\n)\nplt.show()\n\n\n################################################################################\n# Explore the Range of Values in the Data\n# ---------------------------------------\n# You can explore the range of values found in the data using the EarthPy ``hist()``\n# function. Do you notice any extreme values that may be impacting the stretch\n# of the image?\n\nep.hist(array, title=[\"Band 1\", \"Band 2\", \"Band 3\"])\nplt.show()\n\n###########################################################################\n# No Data Option\n# ---------------\n# ``es.stack()`` can handle ``nodata`` values in a raster. To use this\n# parameter, specify ``nodata=``. This will mask every pixel that contains\n# the specified ``nodata`` value. The output will be a numpy masked array.\n\nos.chdir(os.path.join(et.io.HOME, \"earth-analytics\"))\narray_nodata, raster_prof_nodata = es.stack(stack_band_paths, nodata=-9999)\n\n# View hist of data with nodata values removed\nep.hist(\n array_nodata,\n title=[\n \"Band 1 - No Data Values Removed\",\n \"Band 2 - No Data Values Removed\",\n \"Band 3 - No Data Values Removed\",\n ],\n)\nplt.show()\n\n# Recreate extent object for the No Data array\n\nextent_nodata = plotting_extent(\n array_nodata[0], raster_prof_nodata[\"transform\"]\n)\n\n################################################################################\n# Plot Un-cropped Data\n# ------------------------------\n# Plot the data again after the nodata values are removed.\n\nfig, ax = plt.subplots(figsize=(12, 12))\nep.plot_rgb(\n array_nodata,\n ax=ax,\n stretch=True,\n extent=extent,\n str_clip=0.5,\n title=\"RGB image of Un-cropped Raster, No Data Value Selected\",\n)\nplt.show()\n\n#############################################################################\n# Crop the Data\n# ------------------\n# Sometimes you have data for an area that is larger than your study area.\n# It is more efficient to first crop the data to your study area before processing\n# it in Python. The fastest and most efficient option is to crop each file\n# individually, write out the cropped raster to a new file, and then stack\n# the new files together. To do this, make sure you have a ShapeFile boundary\n# in the form of a GeoPandas object you can use as the cropping object.\n# Then, loop through every file you wish to crop and crop the image, then\n# write it out to a file. Take the rasters created and stack them like\n# you stacked bands in the previous examples.\n\nos.chdir(os.path.join(et.io.HOME, \"earth-analytics\"))\n\n# Open the crop boundary using GeoPandas.\n\ncrop_bound = gpd.read_file(\n \"data/vignette-landsat/vector_layers/fire-boundary-geomac/co_cold_springs_20160711_2200_dd83.shp\"\n)\n\n#############################################################################\n# Reproject the data\n# ------------------\n# .. note::\n# If you are on windows, make sure to set your environment here!\n#\n# The crop function won't work properly if the data are in different Coordinate\n# Reference Systems (CRS). To fix this, be sure to reproject the crop layer to match\n# the CRS of your raster data.\n# To reproject your data, first get the CRS of the raster from the rasterio profile\n# object. Then use that to reproject using geopandas ``.to_crs`` method.\n\nos.chdir(os.path.join(et.io.HOME, \"earth-analytics\"))\n\nwith rio.open(stack_band_paths[0]) as raster_crs:\n crop_raster_profile = raster_crs.profile\n crop_bound_utm13N = crop_bound.to_crs(crop_raster_profile[\"crs\"])\n\n#############################################################################\n# Crop Each Band\n# --------------\n# When you need to crop and stack a set of images, it is most efficient to first\n# crop each image, and then stack it.\n# ``es.crop_all()`` is an efficient way to crop all bands in an image quickly.\n# The function will write out cropped rasters to a\n# directory and return a list of file paths that can then be used with\n# ``es.stack()``.\n\nos.chdir(os.path.join(et.io.HOME, \"earth-analytics\"))\n\nband_paths_list = es.crop_all(\n stack_band_paths, output_dir, crop_bound_utm13N, overwrite=True\n)\n\n#############################################################################\n# Stack All Bands\n# ---------------\n# Once the data are cropped, you are ready to create a new stack.\n\nos.chdir(os.path.join(et.io.HOME, \"earth-analytics\"))\n\ncropped_array, array_raster_profile = es.stack(band_paths_list, nodata=-9999)\ncrop_extent = plotting_extent(\n cropped_array[0], array_raster_profile[\"transform\"]\n)\n\n# Plotting the cropped image\n# sphinx_gallery_thumbnail_number = 5\nfig, ax = plt.subplots(figsize=(12, 6))\ncrop_bound_utm13N.boundary.plot(ax=ax, color=\"red\", zorder=10)\nep.plot_rgb(\n cropped_array,\n ax=ax,\n stretch=True,\n extent=crop_extent,\n title=\"Cropped Raster and Fire Boundary\",\n)\nplt.show()\n\n#############################################################################\n# Crop Individual Bands\n# ---------------------\n# If you only need to crop one raster image, you can use EarthPy's \n# ``es.crop_image()`` function.\n# This function takes a Rasterio object and crops it to the provided \n# spatial extent.\n\n# Open Landsat image as a Rasterio object in order to crop it\nos.chdir(os.path.join(et.io.HOME, \"earth-analytics\"))\n\nwith rio.open(stack_band_paths[0]) as src:\n single_cropped_image, single_cropped_meta = es.crop_image(\n src, crop_bound_utm13N\n )\n\n# Create the extent object\nsingle_crop_extent = plotting_extent(\n single_cropped_image[0], single_cropped_meta[\"transform\"]\n)\n\n# Plot the newly cropped image\nfig, ax = plt.subplots(figsize=(12, 6))\ncrop_bound_utm13N.boundary.plot(ax=ax, color=\"red\", zorder=10)\nep.plot_bands(\n single_cropped_image,\n ax=ax,\n extent=single_crop_extent,\n title=\"Single Cropped Raster and Fire Boundary\",\n)\nplt.show()\n"
] |
[
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots"
]
] |
CSchoel/hh-modelica
|
[
"e84713a4a15bfcc5736755940e6af38bb6272521"
] |
[
"scripts/plot_compare.py"
] |
[
"import pandas as pd\nimport matplotlib.pyplot as plt\nimport os\n\n\ndef compare(afile, bfile):\n adata, bdata = [pd.read_csv(x, delimiter=\",\") for x in [afile, bfile]]\n f = plt.figure(figsize=(6, 2), dpi=300)\n ax = f.add_subplot()\n ax.plot(adata[\"time\"] * 1000, adata[\"v_m\"], label=\"modular\")\n ax.plot(bdata[\"time\"], bdata[\"v_m\"], \"--\", label=\"monolithic\")\n ax.set_xlabel(\"time [ms]\")\n ax.set_ylabel(\"membrane\\npotential [mV]\")\n ax.legend(loc=\"best\")\n ax.set_xlim(0, max(bdata[\"time\"]))\n f.tight_layout()\n if not os.path.exists(\"plots\"):\n os.mkdir(\"plots\")\n f.savefig(\"plots/modular_vs_monolithic.pdf\")\n f.savefig(\"plots/modular_vs_monolithic.eps\")\n f.savefig(\"plots/modular_vs_monolithic.jpg\")\n\n\nif __name__ == \"__main__\":\n compare(\n \"out/HHmodelica.CompleteModels.HHmodular_res.csv\",\n \"out/HHmodelica.CompleteModels.HHmono_res.csv\"\n )\n"
] |
[
[
"pandas.read_csv",
"matplotlib.pyplot.figure"
]
] |
Yannic/chromium
|
[
"ab32e8aacb08c9fce0dc4bf09eec456ba46e3710"
] |
[
"tools/android/customtabs_benchmark/scripts/customtabs_benchmark.py"
] |
[
"#!/usr/bin/env python3\n#\n# Copyright 2015 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\n\"\"\"Loops Custom Tabs tests and outputs the results into a CSV file.\"\"\"\n\nimport collections\nimport contextlib\nimport logging\nimport optparse\nimport os\nimport random\nimport re\nimport sys\nimport time\n\n_SRC_PATH = os.path.abspath(os.path.join(\n os.path.dirname(__file__), os.pardir, os.pardir, os.pardir, os.pardir))\n\nsys.path.append(os.path.join(_SRC_PATH, 'third_party', 'catapult', 'devil'))\nfrom devil.android import device_errors\nfrom devil.android import device_utils\nfrom devil.android import flag_changer\nfrom devil.android.perf import cache_control\nfrom devil.android.sdk import intent\n\nsys.path.append(os.path.join(_SRC_PATH, 'build', 'android'))\nimport devil_chromium\n\nimport chrome_setup\n\n\n# Local build of Chrome (not Chromium).\n_CHROME_PACKAGE = 'com.google.android.apps.chrome'\n_COMMAND_LINE_FILE = 'chrome-command-line'\n_TEST_APP_PACKAGE_NAME = 'org.chromium.customtabs.test'\n_INVALID_VALUE = -1\n\n\ndef RunOnce(device, url, speculated_url, parallel_url, warmup,\n skip_launcher_activity, speculation_mode, delay_to_may_launch_url,\n delay_to_launch_url, cold, pinning_benchmark, pin_filename,\n pin_offset, pin_length, extra_brief_memory_mb, chrome_args,\n reset_chrome_state):\n \"\"\"Runs a test on a device once.\n\n Args:\n device: (DeviceUtils) device to run the tests on.\n url: (str) URL to load. End of the redirect chain when using a\n parallel request.\n speculated_url: (str) Speculated URL.\n parallel_url: ([str]) URL to load in parallel, typically\n the start of the redirect chain.\n warmup: (bool) Whether to call warmup.\n skip_launcher_activity: (bool) Whether to skip the launcher activity.\n speculation_mode: (str) Speculation Mode.\n delay_to_may_launch_url: (int) Delay to mayLaunchUrl() in ms.\n delay_to_launch_url: (int) Delay to launchUrl() in ms.\n cold: (bool) Whether the page cache should be dropped.\n pinning_benchmark: (bool) Whether to perform the 'pinning benchmark'.\n pin_filename: (str) The file to pin on the device.\n pin_offset: (int) Start offset of the range to pin.\n pin_length: (int) Number of bytes to pin.\n extra_brief_memory_mb: (int) Number of MiB to consume before starting\n Chrome. Applies only to the 'pinning benchmark' scenario.\n chrome_args: ([str]) List of arguments to pass to Chrome.\n reset_chrome_state: (bool) Whether to reset the Chrome local state before\n the run.\n\n Returns:\n The output line (str), like this (one line only):\n <warmup>,<prerender_mode>,<delay_to_may_launch_url>,<delay_to_launch>,\n <intent_sent_ms>,<page_load_started_ms>,<page_load_finished_ms>,\n <first_contentful_paint>\n or None on error.\n \"\"\"\n if not device.HasRoot():\n device.EnableRoot()\n\n timeout_s = 64\n logcat_timeout = int(timeout_s + delay_to_may_launch_url / 1000.\n + delay_to_launch_url / 1000.);\n\n with flag_changer.CustomCommandLineFlags(\n device, _COMMAND_LINE_FILE, chrome_args):\n launch_intent = intent.Intent(\n action='android.intent.action.MAIN',\n package=_TEST_APP_PACKAGE_NAME,\n activity='org.chromium.customtabs.test.MainActivity',\n extras={'url': str(url),\n 'speculated_url': str(speculated_url),\n 'parallel_url': str (parallel_url),\n 'warmup': warmup,\n 'skip_launcher_activity': skip_launcher_activity,\n 'speculation_mode': str(speculation_mode),\n 'delay_to_may_launch_url': delay_to_may_launch_url,\n 'delay_to_launch_url': delay_to_launch_url,\n 'pinning_benchmark': pinning_benchmark,\n 'pin_filename': str(pin_filename),\n 'pin_offset': pin_offset,\n 'pin_length': pin_length,\n 'extra_brief_memory_mb': extra_brief_memory_mb,\n 'timeout': timeout_s})\n result_line_re = re.compile(r'CUSTOMTABSBENCHCSV.*: (.*)')\n logcat_monitor = device.GetLogcatMonitor(clear=True)\n logcat_monitor.Start()\n device.ForceStop(_CHROME_PACKAGE)\n device.ForceStop(_TEST_APP_PACKAGE_NAME)\n\n if reset_chrome_state:\n chrome_setup.ResetChromeLocalState(device, _CHROME_PACKAGE)\n\n if cold:\n cache_control.CacheControl(device).DropRamCaches()\n\n device.StartActivity(launch_intent, blocking=True)\n\n match = None\n try:\n match = logcat_monitor.WaitFor(result_line_re, timeout=logcat_timeout)\n except device_errors.CommandTimeoutError as _:\n logging.warning('Timeout waiting for the result line')\n logcat_monitor.Stop()\n logcat_monitor.Close()\n return match.group(1) if match is not None else None\n\n\nRESULT_FIELDS = ('warmup', 'skip_launcher_activity', 'speculation_mode',\n 'delay_to_may_launch_url', 'delay_to_launch_url', 'commit',\n 'plt', 'first_contentful_paint')\nResult = collections.namedtuple('Result', RESULT_FIELDS)\n\n\ndef ParseResult(result_line):\n \"\"\"Parses a result line, and returns it.\n\n Args:\n result_line: (str) A result line, as returned by RunOnce().\n\n Returns:\n An instance of Result.\n \"\"\"\n tokens = result_line.strip().split(',')\n assert len(tokens) == 9\n intent_sent_timestamp = int(tokens[5])\n return Result(int(tokens[0]), int(tokens[1]), tokens[2], int(tokens[3]),\n int(tokens[4]),\n max(_INVALID_VALUE, int(tokens[6]) - intent_sent_timestamp),\n max(_INVALID_VALUE, int(tokens[7]) - intent_sent_timestamp),\n max(_INVALID_VALUE, int(tokens[8]) - intent_sent_timestamp))\n\n\ndef LoopOnDevice(device, configs, output_filename, once=False,\n should_stop=None):\n \"\"\"Loops the tests on a device.\n\n Args:\n device: (DeviceUtils) device to run the tests on.\n configs: ([dict])\n output_filename: (str) Output filename. '-' for stdout.\n once: (bool) Run only once.\n should_stop: (threading.Event or None) When the event is set, stop looping.\n \"\"\"\n to_stdout = output_filename == '-'\n out = sys.stdout if to_stdout else open(output_filename, 'a')\n try:\n while should_stop is None or not should_stop.is_set():\n config = configs[random.randint(0, len(configs) - 1)]\n chrome_args = chrome_setup.CHROME_ARGS\n if config['speculation_mode'] == 'no_state_prefetch':\n # NoStatePrefetch is enabled through an experiment.\n chrome_args.extend([\n '--force-fieldtrials=trial/group',\n '--force-fieldtrial-params=trial.group:mode/no_state_prefetch',\n '--enable-features=NoStatePrefetch<trial'])\n elif config['speculation_mode'] == 'speculative_prefetch':\n # Speculative Prefetch is enabled through an experiment.\n chrome_args.extend([\n '--force-fieldtrials=trial/group',\n '--force-fieldtrial-params=trial.group:mode/external-prefetching',\n '--enable-features=SpeculativeResourcePrefetching<trial'])\n\n result = RunOnce(device,\n config['url'],\n config.get('speculated_url', config['url']),\n config.get('parallel_url', ''),\n config['warmup'], config['skip_launcher_activity'],\n config['speculation_mode'],\n config['delay_to_may_launch_url'],\n config['delay_to_launch_url'], config['cold'],\n config.get('pinning_benchmark', False),\n config.get('pin_filename', ''),\n config.get('pin_offset', -1),\n config.get('pin_length', -1),\n config.get('extra_brief_memory_mb', 0),\n chrome_args, reset_chrome_state=True)\n if result is not None:\n out.write(result + '\\n')\n out.flush()\n if once:\n return\n if should_stop is not None:\n should_stop.wait(10.)\n else:\n time.sleep(10)\n finally:\n if not to_stdout:\n out.close()\n\n\ndef ProcessOutput(filename):\n \"\"\"Reads an output file, and returns a processed numpy array.\n\n Args:\n filename: (str) file to process.\n\n Returns:\n A numpy structured array.\n \"\"\"\n import numpy as np\n entries = []\n with open(filename, 'r') as f:\n lines = f.readlines()\n entries = [ParseResult(line) for line in lines]\n result = np.array(entries,\n dtype=[('warmup', np.int32),\n ('skip_launcher_activity', np.int32),\n ('speculation_mode', str),\n ('delay_to_may_launch_url', np.int32),\n ('delay_to_launch_url', np.int32),\n ('commit', np.int32), ('plt', np.int32),\n ('first_contentful_paint', np.int32)])\n return result\n\n\ndef _CreateOptionParser():\n parser = optparse.OptionParser(description='Loops Custom Tabs tests on a '\n 'device, and outputs the navigation timings '\n 'in a CSV file.')\n parser.add_option('--device', help='Device ID')\n parser.add_option('--speculated_url',\n help='URL to call mayLaunchUrl() with.',)\n parser.add_option('--url', help='URL to navigate to.',\n default='https://www.android.com')\n parser.add_option('--parallel_url', help='URL to navigate to.in parallel, '\n 'e.g. the start of the redirect chain.')\n parser.add_option('--warmup', help='Call warmup.', default=False,\n action='store_true')\n parser.add_option('--skip_launcher_activity',\n help='Skip ChromeLauncherActivity.', default=False,\n action='store_true')\n parser.add_option('--speculation_mode', default='prerender',\n help='The speculation mode (prerender, '\n 'speculative_prefetch or no_state_prefetch).',\n choices=['disabled', 'prerender', 'hidden_tab'])\n parser.add_option('--delay_to_may_launch_url',\n help='Delay before calling mayLaunchUrl() in ms.',\n type='int', default=1000)\n parser.add_option('--delay_to_launch_url',\n help='Delay before calling launchUrl() in ms.',\n type='int', default=-1)\n parser.add_option('--cold', help='Purge the page cache before each run.',\n default=False, action='store_true')\n parser.add_option('--output_file', help='Output file (append). \"-\" for '\n 'stdout (this is the default)', default='-')\n parser.add_option('--once', help='Run only one iteration.',\n action='store_true', default=False)\n parser.add_option('--pinning_benchmark',\n help='Compare startup with/without a preliminary step '\n 'that pins a range of bytes in the APK into memory with '\n 'mlock(2).', default=False, action='store_true')\n parser.add_option('--extra_brief_memory_mb', help='How much memory to '\n 'consume in foreground for --pinning_benchmark.',\n type='int', default=0)\n parser.add_option('--pin_filename', help='The file name on the device to pin '\n 'to memory.', default='')\n parser.add_option('--pin_offset', help='The start offset of the range to be '\n 'pinned to memory.',\n type='int', default=-1)\n parser.add_option('--pin_length', help='The length of the range being pinned,'\n ' where 0 results in no pinning.',\n type='int', default=-1)\n\n return parser\n\n\ndef main():\n parser = _CreateOptionParser()\n options, _ = parser.parse_args()\n devil_chromium.Initialize()\n devices = device_utils.DeviceUtils.HealthyDevices()\n device = devices[0]\n if len(devices) != 1 and options.device is None:\n logging.error('Several devices attached, must specify one with --device.')\n sys.exit(0)\n if options.device is not None:\n matching_devices = [d for d in devices if str(d) == options.device]\n if not matching_devices:\n logging.error('Device not found.')\n sys.exit(0)\n device = matching_devices[0]\n\n config = {\n 'url': options.url,\n 'skip_launcher_activity': options.skip_launcher_activity,\n 'speculated_url': options.speculated_url or options.url,\n 'parallel_url': options.parallel_url,\n 'warmup': options.warmup,\n 'speculation_mode': options.speculation_mode,\n 'delay_to_may_launch_url': options.delay_to_may_launch_url,\n 'delay_to_launch_url': options.delay_to_launch_url,\n 'cold': options.cold,\n 'pinning_benchmark': options.pinning_benchmark,\n 'pin_filename': options.pin_filename,\n 'pin_offset': options.pin_offset,\n 'pin_length': options.pin_length,\n 'extra_brief_memory_mb': options.extra_brief_memory_mb,\n }\n LoopOnDevice(device, [config], options.output_file, once=options.once)\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"numpy.array"
]
] |
asafjo23/outliers-detection
|
[
"dd78e3161c02a2f8ae685ef31ad796787e305591"
] |
[
"src/loss.py"
] |
[
"import torch.nn.functional as F\nfrom tensorboardX import SummaryWriter\n\nfrom torch import Tensor, clone, clip, round, abs, sub, sum, tensor\nfrom torch.nn.modules.loss import _Loss\n\nfrom src.model import GaussianHistogram\nfrom src.utils import DataConverter, DataProcessor\n\n\nclass MiningOutliersLoss(_Loss):\n def __init__(self, data_converter: DataConverter, data_processor: DataProcessor):\n super(MiningOutliersLoss, self).__init__()\n self._data_converter = data_converter\n self._data_processor = data_processor\n\n self.min_rating = data_processor.min_rating\n self.max_rating = data_processor.max_rating\n\n self._gauss_histo = GaussianHistogram(\n bins=self.max_rating, min=self.min_rating, max=self.max_rating, sigma=1.5\n )\n\n def mse_loss(\n self,\n original_ratings: Tensor,\n predicted_ratings: Tensor,\n ) -> Tensor:\n return F.mse_loss(original_ratings, predicted_ratings)\n\n def histogram_loss(\n self,\n users: Tensor,\n items: Tensor,\n original_ratings: Tensor,\n predicted_ratings: Tensor,\n writer: SummaryWriter,\n epoch: int,\n ) -> Tensor:\n histogram_loss = tensor(0.0)\n for user, item, original_rating, predicted_rating in zip(\n users, items, original_ratings, predicted_ratings\n ):\n user_id = self._data_converter.get_original_user_id(encoded_id=user.item())\n\n original_histogram = clone(self._data_processor.histograms_by_users[user_id])\n pdf_original_histogram = self._gauss_histo(original_histogram)\n\n if epoch == 0:\n writer.add_histogram(\n tag=f\"{user_id}/original_histogram\",\n values=original_histogram,\n global_step=epoch,\n )\n\n original_rating_index = self.to_index(rating=original_rating)\n original_mass = self._calc_histogram_mass(pdf_original_histogram, original_rating_index)\n\n original_histogram[original_rating_index] -= 1\n predicted_round_rating = round(predicted_rating)\n\n predicted_rating_index = self.to_index(rating=predicted_round_rating)\n original_histogram[predicted_rating_index] += 1\n\n pdf_predicted_histogram = self._gauss_histo(original_histogram)\n predicted_mass = self._calc_histogram_mass(\n pdf_predicted_histogram, predicted_rating_index\n )\n\n histogram_loss += abs(sub(original_mass, predicted_mass)).squeeze()\n\n writer.add_scalars(\n f\"Loss/train/histogram_mass/{user_id}\",\n {\n \"original_mass\": original_mass.item(),\n \"predicted_mass\": predicted_mass.item(),\n },\n epoch,\n )\n\n writer.add_histogram(\n tag=f\"{user_id}/predicted_histogram\",\n values=original_histogram,\n global_step=epoch,\n )\n\n histogram_loss.requires_grad = True\n return histogram_loss\n\n def to_index(self, rating: Tensor) -> int:\n min_index = max(self.min_rating - 1, 0)\n return int(clip(rating, min=min_index, max=self.max_rating - 1).item())\n\n @staticmethod\n def _calc_histogram_mass(histogram: Tensor, end: int) -> Tensor:\n area = histogram[0:end]\n if len(area) == 0:\n return Tensor([0.0])\n\n edge_mass = 0.5 * area[len(area) - 1]\n mass = sum(area) - edge_mass\n return mass\n"
] |
[
[
"torch.Tensor",
"torch.clone",
"torch.round",
"torch.sum",
"torch.sub",
"torch.tensor",
"torch.clip",
"torch.nn.functional.mse_loss"
]
] |
rgmaidana/stateEstimation
|
[
"f8f4102a20ea6dfc9b466a291244fea4fa802dd4"
] |
[
"examples/dcmotor.py"
] |
[
"#!/usr/bin/env python\n\nimport numpy as np\nfrom stateEstimation import KF\nfrom scipy.integrate import ode\nimport sys\n\n# Parameters\nsim_time = 10 # Simulation time\ninit_states = [0, 0] # Initial states\nsensor_err = 0.2 # Introduce gaussian error in simulated sensor measurements\n\n# We define a DCMotor class for convenience, and for using its output function in the ODE solver\nclass DCMotor:\n def __init__(self, Ra=8, La=170e-3, J=10e-3, b=3e-3, If=0.5, kt=0.521, kw=0.521, T=0.001, **kwargs):\n # Constructive parameters\n self.Ra = Ra\n self.La = La\n self.J = J\n self.b = b\n self.If = If\n self.kt = kt\n self.kw = kw\n\n # Motor continuous-time state-space\n self.A = np.array([[-self.b/self.J, self.kt*self.If/self.J],\n [-self.kw*self.If/self.La, -self.Ra/self.La]])\n self.B = np.array([0, 1/self.La]).reshape((2,1))\n self.C = np.array([[1, 0]], dtype=np.float)\n self.dist = np.array([[-1/self.J, 0]]).T # Input Disturbance\n\n self.T = T\n self.x = np.zeros((self.A.shape[1],1), dtype=np.float)\n self.u = np.zeros((self.B.shape[1],1), dtype=np.float)\n \n def output(self, t, x, u=0):\n dx = self.A.dot(x.reshape(self.x.shape)) + self.B.dot(u.reshape(self.u.shape)) # + self.dist\n return dx\n\nif __name__ == '__main__':\n # Instantiate DC Motor model (sampling time of 0.05 seconds)\n motor = DCMotor(T=0.005)\n\n # Define measurement model matrix for DC motor (2 states, 2 \"sensors\")\n H = np.array([[1, 0],\n [0, 1]], dtype=np.float)\n\n # Define model uncertainty for DC motor (2 states)\n Q = np.diag([10, 10])\n\n # Define sensor covariance matrix for DC motor (1 \"sensor\")\n R = np.diag([0.01, 0.01])\n \n # Instantiate filter with DC motor model\n filt = KF(motor.A, motor.B, H, Q, R, T=motor.T)\n \n # Setup Nonstiff Ordinary Diff. Equation (ODE) solver (equivalent to matlab's ODE45)\n dt = 1e-3 # ODE derivation time\n solv = ode(motor.output).set_integrator('dopri5', method='rtol') \n\n # Run for some seconds\n x = np.zeros((filt.A.shape[0],1))\n u = 10*np.ones((filt.B.shape[1],1))\n t = [0] # Time vector\n y = np.array(init_states).reshape((len(init_states),1)) # Initial states\n while True:\n # Solve ODE (simulate based on model)\n solv.set_initial_value(y[:,-1]) # Current initial value is last state\n solv.set_f_params(u) # Apply control input into system\n while solv.successful() and solv.t < filt.T:\n solv.integrate(solv.t+dt)\n y = np.c_[y, solv.y[:]] # Store simulated output\n\n # Update states (equivalent to sensing)\n filt.z = np.copy(solv.y[:].reshape(solv.y.shape[0],1))\n filt.z += np.random.normal(scale=sensor_err, size=filt.z.shape)\n\n # Run filter\n filt.run()\n\n # Store estimated states\n x = np.c_[x, filt.x]\n\n # Append time\n t.append(t[-1]+filt.T)\n if t[-1] >= sim_time: # If end of simulation, break loop\n break\n\n # Plot results\n try:\n import matplotlib.pyplot as plt\n\n legend = []\n\n # Plot states\n plt.figure()\n t = np.array(t)\n for k in range(x.shape[0]):\n plt.plot(t, x[k,:], lw=2.0)\n plt.xlabel('Time (s)')\n plt.ylabel('x')\n for k in range(0,x.shape[0]):\n legend.append('Estimated x%d' % (k+1))\n\n # Plot outputs\n for k in range(y.shape[0]):\n plt.plot(t, y[k,:], lw=2.0)\n plt.xlabel('Time (s)')\n plt.ylabel('Angular velocity (rad/s)')\n for k in range(0,y.shape[0]):\n legend.append('Simulated x%d' % (k+1))\n\n # Show figures\n plt.legend(legend)\n plt.grid()\n plt.show()\n except ImportError:\n pass"
] |
[
[
"numpy.diag",
"matplotlib.pyplot.legend",
"scipy.integrate.ode",
"numpy.ones",
"matplotlib.pyplot.plot",
"numpy.random.normal",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
kritikashah20/pandas
|
[
"361166f86da1a665a1b5808e0935c28f8cf56d34"
] |
[
"pandas/core/indexes/interval.py"
] |
[
"\"\"\" define the IntervalIndex \"\"\"\nfrom operator import le, lt\nimport textwrap\nfrom typing import TYPE_CHECKING, Any, List, Optional, Tuple, Union, cast\n\nimport numpy as np\n\nfrom pandas._config import get_option\n\nfrom pandas._libs import lib\nfrom pandas._libs.interval import Interval, IntervalMixin, IntervalTree\nfrom pandas._libs.tslibs import BaseOffset, Timedelta, Timestamp, to_offset\nfrom pandas._typing import AnyArrayLike, Label\nfrom pandas.errors import InvalidIndexError\nfrom pandas.util._decorators import Appender, Substitution, cache_readonly\nfrom pandas.util._exceptions import rewrite_exception\n\nfrom pandas.core.dtypes.cast import (\n find_common_type,\n infer_dtype_from_scalar,\n maybe_downcast_to_dtype,\n)\nfrom pandas.core.dtypes.common import (\n ensure_platform_int,\n is_categorical_dtype,\n is_datetime64tz_dtype,\n is_datetime_or_timedelta_dtype,\n is_dtype_equal,\n is_float,\n is_float_dtype,\n is_integer,\n is_integer_dtype,\n is_interval_dtype,\n is_list_like,\n is_number,\n is_object_dtype,\n is_scalar,\n)\nfrom pandas.core.dtypes.missing import isna\n\nfrom pandas.core.algorithms import take_1d\nfrom pandas.core.arrays.interval import IntervalArray, _interval_shared_docs\nimport pandas.core.common as com\nfrom pandas.core.indexers import is_valid_positional_slice\nimport pandas.core.indexes.base as ibase\nfrom pandas.core.indexes.base import (\n Index,\n _index_shared_docs,\n default_pprint,\n ensure_index,\n maybe_extract_name,\n)\nfrom pandas.core.indexes.datetimes import DatetimeIndex, date_range\nfrom pandas.core.indexes.extension import ExtensionIndex, inherit_names\nfrom pandas.core.indexes.multi import MultiIndex\nfrom pandas.core.indexes.timedeltas import TimedeltaIndex, timedelta_range\nfrom pandas.core.ops import get_op_result_name\n\nif TYPE_CHECKING:\n from pandas import CategoricalIndex\n\n_VALID_CLOSED = {\"left\", \"right\", \"both\", \"neither\"}\n_index_doc_kwargs = dict(ibase._index_doc_kwargs)\n\n_index_doc_kwargs.update(\n dict(\n klass=\"IntervalIndex\",\n qualname=\"IntervalIndex\",\n target_klass=\"IntervalIndex or list of Intervals\",\n name=textwrap.dedent(\n \"\"\"\\\n name : object, optional\n Name to be stored in the index.\n \"\"\"\n ),\n )\n)\n\n\ndef _get_next_label(label):\n dtype = getattr(label, \"dtype\", type(label))\n if isinstance(label, (Timestamp, Timedelta)):\n dtype = \"datetime64\"\n if is_datetime_or_timedelta_dtype(dtype) or is_datetime64tz_dtype(dtype):\n return label + np.timedelta64(1, \"ns\")\n elif is_integer_dtype(dtype):\n return label + 1\n elif is_float_dtype(dtype):\n return np.nextafter(label, np.infty)\n else:\n raise TypeError(f\"cannot determine next label for type {repr(type(label))}\")\n\n\ndef _get_prev_label(label):\n dtype = getattr(label, \"dtype\", type(label))\n if isinstance(label, (Timestamp, Timedelta)):\n dtype = \"datetime64\"\n if is_datetime_or_timedelta_dtype(dtype) or is_datetime64tz_dtype(dtype):\n return label - np.timedelta64(1, \"ns\")\n elif is_integer_dtype(dtype):\n return label - 1\n elif is_float_dtype(dtype):\n return np.nextafter(label, -np.infty)\n else:\n raise TypeError(f\"cannot determine next label for type {repr(type(label))}\")\n\n\ndef _new_IntervalIndex(cls, d):\n \"\"\"\n This is called upon unpickling, rather than the default which doesn't have\n arguments and breaks __new__.\n \"\"\"\n return cls.from_arrays(**d)\n\n\nclass SetopCheck:\n \"\"\"\n This is called to decorate the set operations of IntervalIndex\n to perform the type check in advance.\n \"\"\"\n\n def __init__(self, op_name):\n self.op_name = op_name\n\n def __call__(self, setop):\n def func(intvidx_self, other, sort=False):\n intvidx_self._assert_can_do_setop(other)\n other = ensure_index(other)\n\n if not isinstance(other, IntervalIndex):\n result = getattr(intvidx_self.astype(object), self.op_name)(other)\n if self.op_name in (\"difference\",):\n result = result.astype(intvidx_self.dtype)\n return result\n elif intvidx_self.closed != other.closed:\n raise ValueError(\n \"can only do set operations between two IntervalIndex \"\n \"objects that are closed on the same side\"\n )\n\n # GH 19016: ensure set op will not return a prohibited dtype\n subtypes = [intvidx_self.dtype.subtype, other.dtype.subtype]\n common_subtype = find_common_type(subtypes)\n if is_object_dtype(common_subtype):\n raise TypeError(\n f\"can only do {self.op_name} between two IntervalIndex \"\n \"objects that have compatible dtypes\"\n )\n\n return setop(intvidx_self, other, sort)\n\n return func\n\n\n@Appender(\n _interval_shared_docs[\"class\"]\n % dict(\n klass=\"IntervalIndex\",\n summary=\"Immutable index of intervals that are closed on the same side.\",\n name=_index_doc_kwargs[\"name\"],\n versionadded=\"0.20.0\",\n extra_attributes=\"is_overlapping\\nvalues\\n\",\n extra_methods=\"\",\n examples=textwrap.dedent(\n \"\"\"\\\n Examples\n --------\n A new ``IntervalIndex`` is typically constructed using\n :func:`interval_range`:\n\n >>> pd.interval_range(start=0, end=5)\n IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]],\n closed='right',\n dtype='interval[int64]')\n\n It may also be constructed using one of the constructor\n methods: :meth:`IntervalIndex.from_arrays`,\n :meth:`IntervalIndex.from_breaks`, and :meth:`IntervalIndex.from_tuples`.\n\n See further examples in the doc strings of ``interval_range`` and the\n mentioned constructor methods.\n \"\"\"\n ),\n )\n)\n@inherit_names([\"set_closed\", \"to_tuples\"], IntervalArray, wrap=True)\n@inherit_names(\n [\"__array__\", \"overlaps\", \"contains\", \"left\", \"right\", \"length\"], IntervalArray\n)\n@inherit_names(\n [\"is_non_overlapping_monotonic\", \"mid\", \"closed\"], IntervalArray, cache=True\n)\nclass IntervalIndex(IntervalMixin, ExtensionIndex):\n _typ = \"intervalindex\"\n _comparables = [\"name\"]\n _attributes = [\"name\"]\n\n # we would like our indexing holder to defer to us\n _defer_to_indexing = True\n\n # Immutable, so we are able to cache computations like isna in '_mask'\n _mask = None\n\n _data: IntervalArray\n # --------------------------------------------------------------------\n # Constructors\n\n def __new__(\n cls,\n data,\n closed=None,\n dtype=None,\n copy: bool = False,\n name=None,\n verify_integrity: bool = True,\n ):\n\n name = maybe_extract_name(name, data, cls)\n\n with rewrite_exception(\"IntervalArray\", cls.__name__):\n array = IntervalArray(\n data,\n closed=closed,\n copy=copy,\n dtype=dtype,\n verify_integrity=verify_integrity,\n )\n\n return cls._simple_new(array, name)\n\n @classmethod\n def _simple_new(cls, array: IntervalArray, name: Label = None):\n \"\"\"\n Construct from an IntervalArray\n\n Parameters\n ----------\n array : IntervalArray\n name : Label, default None\n Attached as result.name\n \"\"\"\n assert isinstance(array, IntervalArray), type(array)\n\n result = IntervalMixin.__new__(cls)\n result._data = array\n result.name = name\n result._cache = {}\n result._no_setting_name = False\n result._reset_identity()\n return result\n\n @classmethod\n @Appender(\n _interval_shared_docs[\"from_breaks\"]\n % dict(\n klass=\"IntervalIndex\",\n examples=textwrap.dedent(\n \"\"\"\\\n Examples\n --------\n >>> pd.IntervalIndex.from_breaks([0, 1, 2, 3])\n IntervalIndex([(0, 1], (1, 2], (2, 3]],\n closed='right',\n dtype='interval[int64]')\n \"\"\"\n ),\n )\n )\n def from_breaks(\n cls, breaks, closed: str = \"right\", name=None, copy: bool = False, dtype=None\n ):\n with rewrite_exception(\"IntervalArray\", cls.__name__):\n array = IntervalArray.from_breaks(\n breaks, closed=closed, copy=copy, dtype=dtype\n )\n return cls._simple_new(array, name=name)\n\n @classmethod\n @Appender(\n _interval_shared_docs[\"from_arrays\"]\n % dict(\n klass=\"IntervalIndex\",\n examples=textwrap.dedent(\n \"\"\"\\\n Examples\n --------\n >>> pd.IntervalIndex.from_arrays([0, 1, 2], [1, 2, 3])\n IntervalIndex([(0, 1], (1, 2], (2, 3]],\n closed='right',\n dtype='interval[int64]')\n \"\"\"\n ),\n )\n )\n def from_arrays(\n cls,\n left,\n right,\n closed: str = \"right\",\n name=None,\n copy: bool = False,\n dtype=None,\n ):\n with rewrite_exception(\"IntervalArray\", cls.__name__):\n array = IntervalArray.from_arrays(\n left, right, closed, copy=copy, dtype=dtype\n )\n return cls._simple_new(array, name=name)\n\n @classmethod\n @Appender(\n _interval_shared_docs[\"from_tuples\"]\n % dict(\n klass=\"IntervalIndex\",\n examples=textwrap.dedent(\n \"\"\"\\\n Examples\n --------\n >>> pd.IntervalIndex.from_tuples([(0, 1), (1, 2)])\n IntervalIndex([(0, 1], (1, 2]],\n closed='right',\n dtype='interval[int64]')\n \"\"\"\n ),\n )\n )\n def from_tuples(\n cls, data, closed: str = \"right\", name=None, copy: bool = False, dtype=None\n ):\n with rewrite_exception(\"IntervalArray\", cls.__name__):\n arr = IntervalArray.from_tuples(data, closed=closed, copy=copy, dtype=dtype)\n return cls._simple_new(arr, name=name)\n\n # --------------------------------------------------------------------\n\n @Appender(Index._shallow_copy.__doc__)\n def _shallow_copy(self, values=None, name: Label = lib.no_default):\n name = self.name if name is lib.no_default else name\n cache = self._cache.copy() if values is None else {}\n if values is None:\n values = self._data\n\n result = self._simple_new(values, name=name)\n result._cache = cache\n return result\n\n @cache_readonly\n def _isnan(self):\n \"\"\"\n Return a mask indicating if each value is NA.\n \"\"\"\n if self._mask is None:\n self._mask = isna(self.left)\n return self._mask\n\n @cache_readonly\n def _engine(self):\n left = self._maybe_convert_i8(self.left)\n right = self._maybe_convert_i8(self.right)\n return IntervalTree(left, right, closed=self.closed)\n\n def __contains__(self, key: Any) -> bool:\n \"\"\"\n return a boolean if this key is IN the index\n We *only* accept an Interval\n\n Parameters\n ----------\n key : Interval\n\n Returns\n -------\n bool\n \"\"\"\n hash(key)\n if not isinstance(key, Interval):\n return False\n\n try:\n self.get_loc(key)\n return True\n except KeyError:\n return False\n\n @cache_readonly\n def _multiindex(self) -> MultiIndex:\n return MultiIndex.from_arrays([self.left, self.right], names=[\"left\", \"right\"])\n\n @cache_readonly\n def values(self) -> IntervalArray:\n \"\"\"\n Return the IntervalIndex's data as an IntervalArray.\n \"\"\"\n return self._data\n\n @property\n def _has_complex_internals(self) -> bool:\n # used to avoid libreduction code paths, which raise or require conversion\n return True\n\n def __array_wrap__(self, result, context=None):\n # we don't want the superclass implementation\n return result\n\n def __reduce__(self):\n d = dict(left=self.left, right=self.right)\n d.update(self._get_attributes_dict())\n return _new_IntervalIndex, (type(self), d), None\n\n @Appender(Index.astype.__doc__)\n def astype(self, dtype, copy=True):\n with rewrite_exception(\"IntervalArray\", type(self).__name__):\n new_values = self._values.astype(dtype, copy=copy)\n if is_interval_dtype(new_values.dtype):\n return self._shallow_copy(new_values)\n return Index.astype(self, dtype, copy=copy)\n\n @property\n def inferred_type(self) -> str:\n \"\"\"Return a string of the type inferred from the values\"\"\"\n return \"interval\"\n\n @Appender(Index.memory_usage.__doc__)\n def memory_usage(self, deep: bool = False) -> int:\n # we don't use an explicit engine\n # so return the bytes here\n return self.left.memory_usage(deep=deep) + self.right.memory_usage(deep=deep)\n\n # IntervalTree doesn't have a is_monotonic_decreasing, so have to override\n # the Index implementation\n @cache_readonly\n def is_monotonic_decreasing(self) -> bool:\n \"\"\"\n Return True if the IntervalIndex is monotonic decreasing (only equal or\n decreasing values), else False\n \"\"\"\n return self[::-1].is_monotonic_increasing\n\n @cache_readonly\n def is_unique(self):\n \"\"\"\n Return True if the IntervalIndex contains unique elements, else False.\n \"\"\"\n left = self.left\n right = self.right\n\n if self.isna().sum() > 1:\n return False\n\n if left.is_unique or right.is_unique:\n return True\n\n seen_pairs = set()\n check_idx = np.where(left.duplicated(keep=False))[0]\n for idx in check_idx:\n pair = (left[idx], right[idx])\n if pair in seen_pairs:\n return False\n seen_pairs.add(pair)\n\n return True\n\n @property\n def is_overlapping(self) -> bool:\n \"\"\"\n Return True if the IntervalIndex has overlapping intervals, else False.\n\n Two intervals overlap if they share a common point, including closed\n endpoints. Intervals that only have an open endpoint in common do not\n overlap.\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n bool\n Boolean indicating if the IntervalIndex has overlapping intervals.\n\n See Also\n --------\n Interval.overlaps : Check whether two Interval objects overlap.\n IntervalIndex.overlaps : Check an IntervalIndex elementwise for\n overlaps.\n\n Examples\n --------\n >>> index = pd.IntervalIndex.from_tuples([(0, 2), (1, 3), (4, 5)])\n >>> index\n IntervalIndex([(0, 2], (1, 3], (4, 5]],\n closed='right',\n dtype='interval[int64]')\n >>> index.is_overlapping\n True\n\n Intervals that share closed endpoints overlap:\n\n >>> index = pd.interval_range(0, 3, closed='both')\n >>> index\n IntervalIndex([[0, 1], [1, 2], [2, 3]],\n closed='both',\n dtype='interval[int64]')\n >>> index.is_overlapping\n True\n\n Intervals that only have an open endpoint in common do not overlap:\n\n >>> index = pd.interval_range(0, 3, closed='left')\n >>> index\n IntervalIndex([[0, 1), [1, 2), [2, 3)],\n closed='left',\n dtype='interval[int64]')\n >>> index.is_overlapping\n False\n \"\"\"\n # GH 23309\n return self._engine.is_overlapping\n\n def _should_fallback_to_positional(self) -> bool:\n # integer lookups in Series.__getitem__ are unambiguously\n # positional in this case\n return self.dtype.subtype.kind in [\"m\", \"M\"]\n\n def _maybe_cast_slice_bound(self, label, side, kind):\n return getattr(self, side)._maybe_cast_slice_bound(label, side, kind)\n\n @Appender(Index._convert_list_indexer.__doc__)\n def _convert_list_indexer(self, keyarr):\n \"\"\"\n we are passed a list-like indexer. Return the\n indexer for matching intervals.\n \"\"\"\n locs = self.get_indexer_for(keyarr)\n\n # we have missing values\n if (locs == -1).any():\n raise KeyError\n\n return locs\n\n def _can_reindex(self, indexer: np.ndarray) -> None:\n \"\"\"\n Check if we are allowing reindexing with this particular indexer.\n\n Parameters\n ----------\n indexer : an integer indexer\n\n Raises\n ------\n ValueError if its a duplicate axis\n \"\"\"\n # trying to reindex on an axis with duplicates\n if self.is_overlapping and len(indexer):\n raise ValueError(\"cannot reindex from an overlapping axis\")\n\n def _needs_i8_conversion(self, key) -> bool:\n \"\"\"\n Check if a given key needs i8 conversion. Conversion is necessary for\n Timestamp, Timedelta, DatetimeIndex, and TimedeltaIndex keys. An\n Interval-like requires conversion if it's endpoints are one of the\n aforementioned types.\n\n Assumes that any list-like data has already been cast to an Index.\n\n Parameters\n ----------\n key : scalar or Index-like\n The key that should be checked for i8 conversion\n\n Returns\n -------\n bool\n \"\"\"\n if is_interval_dtype(key) or isinstance(key, Interval):\n return self._needs_i8_conversion(key.left)\n\n i8_types = (Timestamp, Timedelta, DatetimeIndex, TimedeltaIndex)\n return isinstance(key, i8_types)\n\n def _maybe_convert_i8(self, key):\n \"\"\"\n Maybe convert a given key to it's equivalent i8 value(s). Used as a\n preprocessing step prior to IntervalTree queries (self._engine), which\n expects numeric data.\n\n Parameters\n ----------\n key : scalar or list-like\n The key that should maybe be converted to i8.\n\n Returns\n -------\n scalar or list-like\n The original key if no conversion occurred, int if converted scalar,\n Int64Index if converted list-like.\n \"\"\"\n original = key\n if is_list_like(key):\n key = ensure_index(key)\n\n if not self._needs_i8_conversion(key):\n return original\n\n scalar = is_scalar(key)\n if is_interval_dtype(key) or isinstance(key, Interval):\n # convert left/right and reconstruct\n left = self._maybe_convert_i8(key.left)\n right = self._maybe_convert_i8(key.right)\n constructor = Interval if scalar else IntervalIndex.from_arrays\n return constructor(left, right, closed=self.closed)\n\n if scalar:\n # Timestamp/Timedelta\n key_dtype, key_i8 = infer_dtype_from_scalar(key, pandas_dtype=True)\n else:\n # DatetimeIndex/TimedeltaIndex\n key_dtype, key_i8 = key.dtype, Index(key.asi8)\n if key.hasnans:\n # convert NaT from it's i8 value to np.nan so it's not viewed\n # as a valid value, maybe causing errors (e.g. is_overlapping)\n key_i8 = key_i8.where(~key._isnan)\n\n # ensure consistency with IntervalIndex subtype\n subtype = self.dtype.subtype\n\n if not is_dtype_equal(subtype, key_dtype):\n raise ValueError(\n f\"Cannot index an IntervalIndex of subtype {subtype} with \"\n f\"values of dtype {key_dtype}\"\n )\n\n return key_i8\n\n def _check_method(self, method):\n if method is None:\n return\n\n if method in [\"bfill\", \"backfill\", \"pad\", \"ffill\", \"nearest\"]:\n raise NotImplementedError(\n f\"method {method} not yet implemented for IntervalIndex\"\n )\n\n raise ValueError(\"Invalid fill method\")\n\n def _searchsorted_monotonic(self, label, side, exclude_label=False):\n if not self.is_non_overlapping_monotonic:\n raise KeyError(\n \"can only get slices from an IntervalIndex if bounds are \"\n \"non-overlapping and all monotonic increasing or decreasing\"\n )\n\n if isinstance(label, IntervalMixin):\n raise NotImplementedError(\"Interval objects are not currently supported\")\n\n # GH 20921: \"not is_monotonic_increasing\" for the second condition\n # instead of \"is_monotonic_decreasing\" to account for single element\n # indexes being both increasing and decreasing\n if (side == \"left\" and self.left.is_monotonic_increasing) or (\n side == \"right\" and not self.left.is_monotonic_increasing\n ):\n sub_idx = self.right\n if self.open_right or exclude_label:\n label = _get_next_label(label)\n else:\n sub_idx = self.left\n if self.open_left or exclude_label:\n label = _get_prev_label(label)\n\n return sub_idx._searchsorted_monotonic(label, side)\n\n def get_loc(\n self, key, method: Optional[str] = None, tolerance=None\n ) -> Union[int, slice, np.ndarray]:\n \"\"\"\n Get integer location, slice or boolean mask for requested label.\n\n Parameters\n ----------\n key : label\n method : {None}, optional\n * default: matches where the label is within an interval only.\n\n Returns\n -------\n int if unique index, slice if monotonic index, else mask\n\n Examples\n --------\n >>> i1, i2 = pd.Interval(0, 1), pd.Interval(1, 2)\n >>> index = pd.IntervalIndex([i1, i2])\n >>> index.get_loc(1)\n 0\n\n You can also supply a point inside an interval.\n\n >>> index.get_loc(1.5)\n 1\n\n If a label is in several intervals, you get the locations of all the\n relevant intervals.\n\n >>> i3 = pd.Interval(0, 2)\n >>> overlapping_index = pd.IntervalIndex([i1, i2, i3])\n >>> overlapping_index.get_loc(0.5)\n array([ True, False, True])\n\n Only exact matches will be returned if an interval is provided.\n\n >>> index.get_loc(pd.Interval(0, 1))\n 0\n \"\"\"\n self._check_method(method)\n\n if not is_scalar(key):\n raise InvalidIndexError(key)\n\n if isinstance(key, Interval):\n if self.closed != key.closed:\n raise KeyError(key)\n mask = (self.left == key.left) & (self.right == key.right)\n else:\n # assume scalar\n op_left = le if self.closed_left else lt\n op_right = le if self.closed_right else lt\n try:\n mask = op_left(self.left, key) & op_right(key, self.right)\n except TypeError as err:\n # scalar is not comparable to II subtype --> invalid label\n raise KeyError(key) from err\n\n matches = mask.sum()\n if matches == 0:\n raise KeyError(key)\n elif matches == 1:\n return mask.argmax()\n return lib.maybe_booleans_to_slice(mask.view(\"u1\"))\n\n @Substitution(\n **dict(\n _index_doc_kwargs,\n **{\n \"raises_section\": textwrap.dedent(\n \"\"\"\n Raises\n ------\n NotImplementedError\n If any method argument other than the default of\n None is specified as these are not yet implemented.\n \"\"\"\n )\n },\n )\n )\n @Appender(_index_shared_docs[\"get_indexer\"])\n def get_indexer(\n self,\n target: AnyArrayLike,\n method: Optional[str] = None,\n limit: Optional[int] = None,\n tolerance: Optional[Any] = None,\n ) -> np.ndarray:\n\n self._check_method(method)\n\n if self.is_overlapping:\n raise InvalidIndexError(\n \"cannot handle overlapping indices; \"\n \"use IntervalIndex.get_indexer_non_unique\"\n )\n\n target_as_index = ensure_index(target)\n\n if isinstance(target_as_index, IntervalIndex):\n # equal indexes -> 1:1 positional match\n if self.equals(target_as_index):\n return np.arange(len(self), dtype=\"intp\")\n\n # different closed or incompatible subtype -> no matches\n common_subtype = find_common_type(\n [self.dtype.subtype, target_as_index.dtype.subtype]\n )\n if self.closed != target_as_index.closed or is_object_dtype(common_subtype):\n return np.repeat(np.intp(-1), len(target_as_index))\n\n # non-overlapping -> at most one match per interval in target_as_index\n # want exact matches -> need both left/right to match, so defer to\n # left/right get_indexer, compare elementwise, equality -> match\n left_indexer = self.left.get_indexer(target_as_index.left)\n right_indexer = self.right.get_indexer(target_as_index.right)\n indexer = np.where(left_indexer == right_indexer, left_indexer, -1)\n elif is_categorical_dtype(target_as_index.dtype):\n target_as_index = cast(\"CategoricalIndex\", target_as_index)\n # get an indexer for unique categories then propagate to codes via take_1d\n categories_indexer = self.get_indexer(target_as_index.categories)\n indexer = take_1d(categories_indexer, target_as_index.codes, fill_value=-1)\n elif not is_object_dtype(target_as_index):\n # homogeneous scalar index: use IntervalTree\n target_as_index = self._maybe_convert_i8(target_as_index)\n indexer = self._engine.get_indexer(target_as_index.values)\n else:\n # heterogeneous scalar index: defer elementwise to get_loc\n # (non-overlapping so get_loc guarantees scalar of KeyError)\n indexer = []\n for key in target_as_index:\n try:\n loc = self.get_loc(key)\n except KeyError:\n loc = -1\n except InvalidIndexError as err:\n # i.e. non-scalar key\n raise TypeError(key) from err\n indexer.append(loc)\n\n return ensure_platform_int(indexer)\n\n @Appender(_index_shared_docs[\"get_indexer_non_unique\"] % _index_doc_kwargs)\n def get_indexer_non_unique(\n self, target: AnyArrayLike\n ) -> Tuple[np.ndarray, np.ndarray]:\n target_as_index = ensure_index(target)\n\n # check that target_as_index IntervalIndex is compatible\n if isinstance(target_as_index, IntervalIndex):\n common_subtype = find_common_type(\n [self.dtype.subtype, target_as_index.dtype.subtype]\n )\n if self.closed != target_as_index.closed or is_object_dtype(common_subtype):\n # different closed or incompatible subtype -> no matches\n return (\n np.repeat(-1, len(target_as_index)),\n np.arange(len(target_as_index)),\n )\n\n if is_object_dtype(target_as_index) or isinstance(\n target_as_index, IntervalIndex\n ):\n # target_as_index might contain intervals: defer elementwise to get_loc\n indexer, missing = [], []\n for i, key in enumerate(target_as_index):\n try:\n locs = self.get_loc(key)\n if isinstance(locs, slice):\n locs = np.arange(locs.start, locs.stop, locs.step, dtype=\"intp\")\n locs = np.array(locs, ndmin=1)\n except KeyError:\n missing.append(i)\n locs = np.array([-1])\n indexer.append(locs)\n indexer = np.concatenate(indexer)\n else:\n target_as_index = self._maybe_convert_i8(target_as_index)\n indexer, missing = self._engine.get_indexer_non_unique(\n target_as_index.values\n )\n\n return ensure_platform_int(indexer), ensure_platform_int(missing)\n\n def get_indexer_for(self, target: AnyArrayLike, **kwargs) -> np.ndarray:\n \"\"\"\n Guaranteed return of an indexer even when overlapping.\n\n This dispatches to get_indexer or get_indexer_non_unique\n as appropriate.\n\n Returns\n -------\n numpy.ndarray\n List of indices.\n \"\"\"\n if self.is_overlapping:\n return self.get_indexer_non_unique(target)[0]\n return self.get_indexer(target, **kwargs)\n\n def _convert_slice_indexer(self, key: slice, kind: str):\n if not (key.step is None or key.step == 1):\n # GH#31658 if label-based, we require step == 1,\n # if positional, we disallow float start/stop\n msg = \"label-based slicing with step!=1 is not supported for IntervalIndex\"\n if kind == \"loc\":\n raise ValueError(msg)\n elif kind == \"getitem\":\n if not is_valid_positional_slice(key):\n # i.e. this cannot be interpreted as a positional slice\n raise ValueError(msg)\n\n return super()._convert_slice_indexer(key, kind)\n\n @Appender(Index.where.__doc__)\n def where(self, cond, other=None):\n if other is None:\n other = self._na_value\n values = np.where(cond, self._values, other)\n result = IntervalArray(values)\n return self._shallow_copy(result)\n\n def delete(self, loc):\n \"\"\"\n Return a new IntervalIndex with passed location(-s) deleted\n\n Returns\n -------\n IntervalIndex\n \"\"\"\n new_left = self.left.delete(loc)\n new_right = self.right.delete(loc)\n result = self._data._shallow_copy(new_left, new_right)\n return self._shallow_copy(result)\n\n def insert(self, loc, item):\n \"\"\"\n Return a new IntervalIndex inserting new item at location. Follows\n Python list.append semantics for negative values. Only Interval\n objects and NA can be inserted into an IntervalIndex\n\n Parameters\n ----------\n loc : int\n item : object\n\n Returns\n -------\n IntervalIndex\n \"\"\"\n if isinstance(item, Interval):\n if item.closed != self.closed:\n raise ValueError(\n \"inserted item must be closed on the same side as the index\"\n )\n left_insert = item.left\n right_insert = item.right\n elif is_scalar(item) and isna(item):\n # GH 18295\n left_insert = right_insert = item\n else:\n raise ValueError(\n \"can only insert Interval objects and NA into an IntervalIndex\"\n )\n\n new_left = self.left.insert(loc, left_insert)\n new_right = self.right.insert(loc, right_insert)\n result = self._data._shallow_copy(new_left, new_right)\n return self._shallow_copy(result)\n\n @Appender(_index_shared_docs[\"take\"] % _index_doc_kwargs)\n def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs):\n result = self._data.take(\n indices, axis=axis, allow_fill=allow_fill, fill_value=fill_value, **kwargs\n )\n return self._shallow_copy(result)\n\n # --------------------------------------------------------------------\n # Rendering Methods\n # __repr__ associated methods are based on MultiIndex\n\n def _format_with_header(self, header: List[str], na_rep: str = \"NaN\") -> List[str]:\n return header + list(self._format_native_types(na_rep=na_rep))\n\n def _format_native_types(self, na_rep=\"NaN\", quoting=None, **kwargs):\n # GH 28210: use base method but with different default na_rep\n return super()._format_native_types(na_rep=na_rep, quoting=quoting, **kwargs)\n\n def _format_data(self, name=None):\n\n # TODO: integrate with categorical and make generic\n # name argument is unused here; just for compat with base / categorical\n n = len(self)\n max_seq_items = min((get_option(\"display.max_seq_items\") or n) // 10, 10)\n\n formatter = str\n\n if n == 0:\n summary = \"[]\"\n elif n == 1:\n first = formatter(self[0])\n summary = f\"[{first}]\"\n elif n == 2:\n first = formatter(self[0])\n last = formatter(self[-1])\n summary = f\"[{first}, {last}]\"\n else:\n\n if n > max_seq_items:\n n = min(max_seq_items // 2, 10)\n head = [formatter(x) for x in self[:n]]\n tail = [formatter(x) for x in self[-n:]]\n head_joined = \", \".join(head)\n tail_joined = \", \".join(tail)\n summary = f\"[{head_joined} ... {tail_joined}]\"\n else:\n tail = [formatter(x) for x in self]\n joined = \", \".join(tail)\n summary = f\"[{joined}]\"\n\n return summary + \",\" + self._format_space()\n\n def _format_attrs(self):\n attrs = [(\"closed\", repr(self.closed))]\n if self.name is not None:\n attrs.append((\"name\", default_pprint(self.name)))\n attrs.append((\"dtype\", f\"'{self.dtype}'\"))\n return attrs\n\n def _format_space(self) -> str:\n space = \" \" * (len(type(self).__name__) + 1)\n return f\"\\n{space}\"\n\n # --------------------------------------------------------------------\n\n def argsort(self, *args, **kwargs) -> np.ndarray:\n return np.lexsort((self.right, self.left))\n\n def equals(self, other: object) -> bool:\n \"\"\"\n Determines if two IntervalIndex objects contain the same elements.\n \"\"\"\n if self.is_(other):\n return True\n\n # if we can coerce to an IntervalIndex then we can compare\n if not isinstance(other, IntervalIndex):\n if not is_interval_dtype(other):\n return False\n other = Index(other)\n if not isinstance(other, IntervalIndex):\n return False\n\n return (\n self.left.equals(other.left)\n and self.right.equals(other.right)\n and self.closed == other.closed\n )\n\n @Appender(Index.intersection.__doc__)\n @SetopCheck(op_name=\"intersection\")\n def intersection(\n self, other: \"IntervalIndex\", sort: bool = False\n ) -> \"IntervalIndex\":\n if self.left.is_unique and self.right.is_unique:\n taken = self._intersection_unique(other)\n elif other.left.is_unique and other.right.is_unique and self.isna().sum() <= 1:\n # Swap other/self if other is unique and self does not have\n # multiple NaNs\n taken = other._intersection_unique(self)\n else:\n # duplicates\n taken = self._intersection_non_unique(other)\n\n if sort is None:\n taken = taken.sort_values()\n\n return taken\n\n def _intersection_unique(self, other: \"IntervalIndex\") -> \"IntervalIndex\":\n \"\"\"\n Used when the IntervalIndex does not have any common endpoint,\n no mater left or right.\n Return the intersection with another IntervalIndex.\n\n Parameters\n ----------\n other : IntervalIndex\n\n Returns\n -------\n IntervalIndex\n \"\"\"\n lindexer = self.left.get_indexer(other.left)\n rindexer = self.right.get_indexer(other.right)\n\n match = (lindexer == rindexer) & (lindexer != -1)\n indexer = lindexer.take(match.nonzero()[0])\n\n return self.take(indexer)\n\n def _intersection_non_unique(self, other: \"IntervalIndex\") -> \"IntervalIndex\":\n \"\"\"\n Used when the IntervalIndex does have some common endpoints,\n on either sides.\n Return the intersection with another IntervalIndex.\n\n Parameters\n ----------\n other : IntervalIndex\n\n Returns\n -------\n IntervalIndex\n \"\"\"\n mask = np.zeros(len(self), dtype=bool)\n\n if self.hasnans and other.hasnans:\n first_nan_loc = np.arange(len(self))[self.isna()][0]\n mask[first_nan_loc] = True\n\n other_tups = set(zip(other.left, other.right))\n for i, tup in enumerate(zip(self.left, self.right)):\n if tup in other_tups:\n mask[i] = True\n\n return self[mask]\n\n def _setop(op_name: str, sort=None):\n @SetopCheck(op_name=op_name)\n def func(self, other, sort=sort):\n result = getattr(self._multiindex, op_name)(other._multiindex, sort=sort)\n result_name = get_op_result_name(self, other)\n\n # GH 19101: ensure empty results have correct dtype\n if result.empty:\n result = result._values.astype(self.dtype.subtype)\n else:\n result = result._values\n\n return type(self).from_tuples(result, closed=self.closed, name=result_name)\n\n return func\n\n @property\n def is_all_dates(self) -> bool:\n \"\"\"\n This is False even when left/right contain datetime-like objects,\n as the check is done on the Interval itself\n \"\"\"\n return False\n\n union = _setop(\"union\")\n difference = _setop(\"difference\")\n symmetric_difference = _setop(\"symmetric_difference\")\n\n # TODO: arithmetic operations\n\n # GH#30817 until IntervalArray implements inequalities, get them from Index\n def __lt__(self, other):\n return Index.__lt__(self, other)\n\n def __le__(self, other):\n return Index.__le__(self, other)\n\n def __gt__(self, other):\n return Index.__gt__(self, other)\n\n def __ge__(self, other):\n return Index.__ge__(self, other)\n\n\nIntervalIndex._add_logical_methods_disabled()\n\n\ndef _is_valid_endpoint(endpoint) -> bool:\n \"\"\"\n Helper for interval_range to check if start/end are valid types.\n \"\"\"\n return any(\n [\n is_number(endpoint),\n isinstance(endpoint, Timestamp),\n isinstance(endpoint, Timedelta),\n endpoint is None,\n ]\n )\n\n\ndef _is_type_compatible(a, b) -> bool:\n \"\"\"\n Helper for interval_range to check type compat of start/end/freq.\n \"\"\"\n is_ts_compat = lambda x: isinstance(x, (Timestamp, BaseOffset))\n is_td_compat = lambda x: isinstance(x, (Timedelta, BaseOffset))\n return (\n (is_number(a) and is_number(b))\n or (is_ts_compat(a) and is_ts_compat(b))\n or (is_td_compat(a) and is_td_compat(b))\n or com.any_none(a, b)\n )\n\n\ndef interval_range(\n start=None, end=None, periods=None, freq=None, name=None, closed=\"right\"\n):\n \"\"\"\n Return a fixed frequency IntervalIndex.\n\n Parameters\n ----------\n start : numeric or datetime-like, default None\n Left bound for generating intervals.\n end : numeric or datetime-like, default None\n Right bound for generating intervals.\n periods : int, default None\n Number of periods to generate.\n freq : numeric, str, or DateOffset, default None\n The length of each interval. Must be consistent with the type of start\n and end, e.g. 2 for numeric, or '5H' for datetime-like. Default is 1\n for numeric and 'D' for datetime-like.\n name : str, default None\n Name of the resulting IntervalIndex.\n closed : {'left', 'right', 'both', 'neither'}, default 'right'\n Whether the intervals are closed on the left-side, right-side, both\n or neither.\n\n Returns\n -------\n IntervalIndex\n\n See Also\n --------\n IntervalIndex : An Index of intervals that are all closed on the same side.\n\n Notes\n -----\n Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,\n exactly three must be specified. If ``freq`` is omitted, the resulting\n ``IntervalIndex`` will have ``periods`` linearly spaced elements between\n ``start`` and ``end``, inclusively.\n\n To learn more about datetime-like frequency strings, please see `this link\n <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.\n\n Examples\n --------\n Numeric ``start`` and ``end`` is supported.\n\n >>> pd.interval_range(start=0, end=5)\n IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]],\n closed='right', dtype='interval[int64]')\n\n Additionally, datetime-like input is also supported.\n\n >>> pd.interval_range(start=pd.Timestamp('2017-01-01'),\n ... end=pd.Timestamp('2017-01-04'))\n IntervalIndex([(2017-01-01, 2017-01-02], (2017-01-02, 2017-01-03],\n (2017-01-03, 2017-01-04]],\n closed='right', dtype='interval[datetime64[ns]]')\n\n The ``freq`` parameter specifies the frequency between the left and right.\n endpoints of the individual intervals within the ``IntervalIndex``. For\n numeric ``start`` and ``end``, the frequency must also be numeric.\n\n >>> pd.interval_range(start=0, periods=4, freq=1.5)\n IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]],\n closed='right', dtype='interval[float64]')\n\n Similarly, for datetime-like ``start`` and ``end``, the frequency must be\n convertible to a DateOffset.\n\n >>> pd.interval_range(start=pd.Timestamp('2017-01-01'),\n ... periods=3, freq='MS')\n IntervalIndex([(2017-01-01, 2017-02-01], (2017-02-01, 2017-03-01],\n (2017-03-01, 2017-04-01]],\n closed='right', dtype='interval[datetime64[ns]]')\n\n Specify ``start``, ``end``, and ``periods``; the frequency is generated\n automatically (linearly spaced).\n\n >>> pd.interval_range(start=0, end=6, periods=4)\n IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]],\n closed='right',\n dtype='interval[float64]')\n\n The ``closed`` parameter specifies which endpoints of the individual\n intervals within the ``IntervalIndex`` are closed.\n\n >>> pd.interval_range(end=5, periods=4, closed='both')\n IntervalIndex([[1, 2], [2, 3], [3, 4], [4, 5]],\n closed='both', dtype='interval[int64]')\n \"\"\"\n start = com.maybe_box_datetimelike(start)\n end = com.maybe_box_datetimelike(end)\n endpoint = start if start is not None else end\n\n if freq is None and com.any_none(periods, start, end):\n freq = 1 if is_number(endpoint) else \"D\"\n\n if com.count_not_none(start, end, periods, freq) != 3:\n raise ValueError(\n \"Of the four parameters: start, end, periods, and \"\n \"freq, exactly three must be specified\"\n )\n\n if not _is_valid_endpoint(start):\n raise ValueError(f\"start must be numeric or datetime-like, got {start}\")\n elif not _is_valid_endpoint(end):\n raise ValueError(f\"end must be numeric or datetime-like, got {end}\")\n\n if is_float(periods):\n periods = int(periods)\n elif not is_integer(periods) and periods is not None:\n raise TypeError(f\"periods must be a number, got {periods}\")\n\n if freq is not None and not is_number(freq):\n try:\n freq = to_offset(freq)\n except ValueError as err:\n raise ValueError(\n f\"freq must be numeric or convertible to DateOffset, got {freq}\"\n ) from err\n\n # verify type compatibility\n if not all(\n [\n _is_type_compatible(start, end),\n _is_type_compatible(start, freq),\n _is_type_compatible(end, freq),\n ]\n ):\n raise TypeError(\"start, end, freq need to be type compatible\")\n\n # +1 to convert interval count to breaks count (n breaks = n-1 intervals)\n if periods is not None:\n periods += 1\n\n if is_number(endpoint):\n # force consistency between start/end/freq (lower end if freq skips it)\n if com.all_not_none(start, end, freq):\n end -= (end - start) % freq\n\n # compute the period/start/end if unspecified (at most one)\n if periods is None:\n periods = int((end - start) // freq) + 1\n elif start is None:\n start = end - (periods - 1) * freq\n elif end is None:\n end = start + (periods - 1) * freq\n\n breaks = np.linspace(start, end, periods)\n if all(is_integer(x) for x in com.not_none(start, end, freq)):\n # np.linspace always produces float output\n breaks = maybe_downcast_to_dtype(breaks, \"int64\")\n else:\n # delegate to the appropriate range function\n if isinstance(endpoint, Timestamp):\n range_func = date_range\n else:\n range_func = timedelta_range\n\n breaks = range_func(start=start, end=end, periods=periods, freq=freq)\n\n return IntervalIndex.from_breaks(breaks, name=name, closed=closed)\n"
] |
[
[
"numpy.linspace",
"pandas.core.dtypes.common.is_dtype_equal",
"pandas.core.indexes.base.Index.__le__",
"pandas.core.dtypes.common.is_datetime64tz_dtype",
"pandas._libs.interval.IntervalTree",
"numpy.concatenate",
"pandas.core.indexes.base.Index",
"pandas._config.get_option",
"numpy.where",
"numpy.nextafter",
"pandas.core.indexes.extension.inherit_names",
"pandas._libs.tslibs.to_offset",
"pandas.core.dtypes.common.is_interval_dtype",
"pandas.core.common.all_not_none",
"pandas.core.common.not_none",
"numpy.arange",
"numpy.lexsort",
"pandas.core.common.any_none",
"pandas.core.indexes.base.maybe_extract_name",
"pandas.core.dtypes.common.is_number",
"pandas.core.ops.get_op_result_name",
"pandas.core.dtypes.common.is_float_dtype",
"pandas.core.dtypes.common.is_float",
"pandas.core.arrays.interval.IntervalArray",
"pandas.util._exceptions.rewrite_exception",
"pandas.core.dtypes.common.is_categorical_dtype",
"pandas.core.indexes.base.default_pprint",
"pandas.core.indexers.is_valid_positional_slice",
"pandas.core.dtypes.common.is_integer_dtype",
"pandas.core.dtypes.common.is_list_like",
"pandas.util._decorators.Appender",
"pandas.errors.InvalidIndexError",
"pandas.core.common.maybe_box_datetimelike",
"pandas.core.indexes.base.ensure_index",
"pandas.core.dtypes.cast.infer_dtype_from_scalar",
"pandas.core.indexes.base.Index.__lt__",
"pandas.core.indexes.base.Index.__gt__",
"numpy.timedelta64",
"pandas.core.dtypes.common.ensure_platform_int",
"pandas.core.arrays.interval.IntervalArray.from_tuples",
"numpy.array",
"pandas.core.arrays.interval.IntervalArray.from_arrays",
"pandas.core.indexes.base.Index.astype",
"pandas.core.common.count_not_none",
"pandas.core.dtypes.cast.maybe_downcast_to_dtype",
"numpy.intp",
"pandas.core.algorithms.take_1d",
"pandas.core.dtypes.common.is_scalar",
"pandas.core.dtypes.common.is_integer",
"pandas.core.dtypes.cast.find_common_type",
"pandas.core.indexes.multi.MultiIndex.from_arrays",
"pandas.core.dtypes.common.is_object_dtype",
"pandas.core.indexes.base.Index.__ge__",
"pandas.core.dtypes.common.is_datetime_or_timedelta_dtype",
"pandas.core.dtypes.missing.isna",
"pandas._libs.interval.IntervalMixin.__new__",
"pandas.core.arrays.interval.IntervalArray.from_breaks"
]
] |
jiangxunmu/ASP
|
[
"cb12bcd7bf30cc28c09b3d3c10a40fa5dc8c9dc7"
] |
[
"py/HW3/option_models/sabr.py"
] |
[
" # -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 10\n\n@author: jaehyuk\n\"\"\"\n\nimport numpy as np\nimport scipy.stats as ss\nimport scipy.optimize as sopt\nfrom . import normal\nfrom . import bsm\nimport pyfeng as pf\n\n'''\nMC model class for Beta=1\n'''\nclass ModelBsmMC:\n beta = 1.0 # fixed (not used)\n vov, rho = 0.0, 0.0\n sigma, intr, divr = None, None, None\n bsm_model = None\n '''\n You may define more members for MC: time step, etc\n '''\n \n def __init__(self, sigma, vov=0, rho=0.0, beta=1.0, intr=0, divr=0):\n self.sigma = sigma\n self.vov = vov\n self.rho = rho\n self.intr = intr\n self.divr = divr\n self.bsm_model = pf.Bsm(sigma, intr=intr, divr=divr)\n \n def bsm_vol(self, strike, spot, texp=None, sigma=None):\n ''''\n From the price from self.price() compute the implied vol\n this is the opposite of bsm_vol in ModelHagan class\n use bsm_model\n \n sigma = self.sigma if(sigma is None) else sigma\n texp = self.texp if(texp is None) else texp\n \n price = self.price(strike, spot, texp, sigm)\n bs = bsm.Model(texp, None, self.intr, self.divr)\n result = bs.impvol(price, strike, spot, texp, cp_sign=1)\n '''\n return 0\n \n def price(self, strike, spot, texp=None, sigma=None, cp=1):\n '''\n Your MC routine goes here\n Generate paths for vol and price first. Then get prices (vector) for all strikes\n You may fix the random number seed\n '''\n sigma = self.sigma if(sigma is None) else sigma\n texp = self.texp if(texp is None) else texp\n div_fac = np.exp(-texp*self.divr)\n disc_fac = np.exp(-texp*self.intr)\n forward = spot / disc_fac * div_fac\n \n np.random.seed(12345)\n \n step, M = 0.01, 10000\n N = int(texp / step)\n \n X = np.random.multivariate_normal([0,0], [[1, self.rho], [self.rho, 1]], (M,N), 'raise')\n W = X[:,:,0]\n Z = X[:,:,1]\n \n sigma_list = np.cumprod(np.exp(self.vov * np.sqrt(step) * Z - 0.5 * (self.vov ** 2) * step), axis=1) * self.sigma \n ## we will generate a waste sigma since S_k+1 is generated by sigma_k and W_k\n sigma_list = sigma_list[:,:-1]\n log_S_1 = np.log(forward) + self.sigma * W[:,0] * np.sqrt(step) - 0.5 * (self.sigma ** 2) * step\n stock_price_list = np.cumsum(sigma_list * W[:,1:] * np.sqrt(step) - 0.5 * sigma_list * sigma_list * step, axis=1) + log_S_1[:,None]\n price_list = np.fmax(np.exp(stock_price_list[:,-1])-strike[:,None],0)\n price = disc_fac * cp * np.mean(price_list,axis=1)\n \n return price\n\n'''\nMC model class for Beta=0\n'''\nclass ModelNormalMC:\n beta = 0.0 # fixed (not used)\n vov, rho = 0.0, 0.0\n sigma, intr, divr = None, None, None\n normal_model = None\n \n def __init__(self, sigma, vov=0, rho=0.0, beta=0.0, intr=0, divr=0):\n self.sigma = sigma\n self.vov = vov\n self.rho = rho\n self.intr = intr\n self.divr = divr\n self.normal_model = pf.Norm(sigma, intr=intr, divr=divr)\n \n def norm_vol(self, strike, spot, texp=None, sigma=None):\n ''''\n From the price from self.price() compute the implied vol\n this is the opposite of normal_vol in ModelNormalHagan class\n use normal_model \n '''\n return 0\n \n def price(self, strike, spot, texp=None, sigma=None, cp=1):\n '''\n Your MC routine goes here\n Generate paths for vol and price first. Then get prices (vector) for all strikes\n You may fix the random number seed\n '''\n sigma = self.sigma if(sigma is None) else sigma\n texp = self.texp if(texp is None) else texp\n \n div_fac = np.exp(-texp*self.divr)\n disc_fac = np.exp(-texp*self.intr)\n forward = spot / disc_fac * div_fac\n \n np.random.seed(12345)\n \n step, M = 0.01, 10000\n N = int(texp / step)\n \n X = np.random.multivariate_normal([0,0], [[1, self.rho], [self.rho, 1]], (M,N), 'raise')\n W = X[:,:,0]\n Z = X[:,:,1]\n \n sigma_list = np.cumprod(np.exp(self.vov * np.sqrt(step) * Z - 0.5 * (self.vov ** 2) * step), axis=1) * self.sigma\n ## we will generate a waste sigma since S_k+1 is generated by sigma_k and W_k\n S_1 = forward + self.sigma * W[:,0] * np.sqrt(step)\n stock_price_list = np.cumsum(sigma_list[:,:-1] * W[:,1:] * np.sqrt(step), axis=1) + S_1[:,None]\n price_list = np.fmax(stock_price_list[:,-1]-strike[:,None],0)\n price = disc_fac * cp * np.mean(price_list, axis=1)\n \n return price\n\n'''\nConditional MC model class for Beta=1\n'''\nclass ModelBsmCondMC:\n beta = 1.0 # fixed (not used)\n vov, rho = 0.0, 0.0\n sigma, intr, divr = None, None, None\n bsm_model = None\n '''\n You may define more members for MC: time step, etc\n '''\n \n def __init__(self, sigma, vov=0, rho=0.0, beta=1.0, intr=0, divr=0):\n self.sigma = sigma\n self.vov = vov\n self.rho = rho\n self.intr = intr\n self.divr = divr\n self.bsm_model = pf.Bsm(sigma, intr=intr, divr=divr)\n \n def bsm_vol(self, strike, spot, texp=None):\n ''''\n From the price from self.price() compute the implied vol\n this is the opposite of bsm_vol in ModelHagan class\n use bsm_model\n should be same as bsm_vol method in ModelBsmMC (just copy & paste)\n '''\n return 0\n \n def price(self, strike, spot, texp=None, cp=1):\n '''\n Your MC routine goes here\n Generate paths for vol only. Then compute integrated variance and BSM price.\n Then get prices (vector) for all strikes\n You may fix the random number seed\n ''' \n div_fac = np.exp(-texp*self.divr)\n disc_fac = np.exp(-texp*self.intr)\n forward = spot / disc_fac * div_fac\n \n np.random.seed(12345)\n \n step, M = 0.01, 10000\n N = int(texp / step)\n \n Z = np.random.normal(size=(M,N))\n \n sigma_list = np.cumprod(np.exp(self.vov * np.sqrt(step) * Z - 0.5 * (self.vov ** 2) * step), axis=1) * self.sigma\n if N//2 == 0:\n even = list(range(2,N,2))\n odd = list(range(1,N,2))\n I_T = (self.sigma**2 +sigma_list[:,-1]**2+2*np.sum(sigma_list[:,even]**2, axis=1)+4*np.sum(sigma_list[:,odd]**2, axis=1))/(3*N)/(self.sigma**2)\n else:\n I_T = np.mean(sigma_list ** 2, axis = 1) / (self.sigma ** 2)\n S_0 = forward * np.exp(self.rho / self.vov * (sigma_list[:,-1] - self.sigma) - (self.rho * self.sigma) ** 2 * texp /2 * I_T)\n sigma_BS = self.sigma * np.sqrt((1-self.rho ** 2) * I_T)\n price_list = bsm.price(strike[:,None], S_0, texp, sigma_BS , intr=0.0, divr=0.0, cp_sign=cp)\n price = np.mean(price_list, axis=1)\n return price\n \n return price\n\n'''\nConditional MC model class for Beta=0\n'''\nclass ModelNormalCondMC:\n beta = 0.0 # fixed (not used)\n vov, rho = 0.0, 0.0\n sigma, intr, divr = None, None, None\n normal_model = None\n \n def __init__(self, sigma, vov=0, rho=0.0, beta=0.0, intr=0, divr=0):\n self.sigma = sigma\n self.vov = vov\n self.rho = rho\n self.intr = intr\n self.divr = divr\n self.normal_model = pf.Norm(sigma, intr=intr, divr=divr)\n \n def norm_vol(self, strike, spot, texp=None):\n ''''\n From the price from self.price() compute the implied vol\n this is the opposite of normal_vol in ModelNormalHagan class\n use normal_model\n should be same as norm_vol method in ModelNormalMC (just copy & paste)\n '''\n return 0\n \n def price(self, strike, spot, texp=None, cp=1):\n '''\n Your MC routine goes here\n Generate paths for vol only. Then compute integrated variance and normal price.\n You may fix the random number seed\n '''\n div_fac = np.exp(-texp*self.divr)\n disc_fac = np.exp(-texp*self.intr)\n forward = spot / disc_fac * div_fac\n \n np.random.seed(12345)\n \n step, M = 0.01, 10000\n N = int(texp / step)\n \n Z = np.random.normal(size=(M,N))\n \n sigma_list = np.cumprod(np.exp(self.vov * np.sqrt(step) * Z - 0.5 * (self.vov ** 2) * step), axis=1) * self.sigma\n I_T = (np.mean(sigma_list ** 2, axis=1) + (self.sigma ** 2 - sigma_list[:,-1] ** 2)/ (2 * N)) / (self.sigma ** 2)\n S_0 = forward + self.rho / self.vov * (sigma_list[:,-1] - self.sigma)\n sigma_N = self.sigma * np.sqrt((1-self.rho ** 2) * I_T)\n price_list = normal.price(strike[:,None], S_0, texp, sigma_N , intr=0.0, divr=0.0, cp_sign=cp)\n price = np.mean(price_list, axis=1)\n return price"
] |
[
[
"numpy.log",
"numpy.sqrt",
"numpy.random.seed",
"numpy.random.multivariate_normal",
"numpy.random.normal",
"numpy.fmax",
"numpy.mean",
"numpy.exp",
"numpy.sum"
]
] |
jgrss/cultionet
|
[
"91d2104aecaec3c8baace55915cf3a74492a3540"
] |
[
"src/cultionet/scripts/cultionet.py"
] |
[
"#!/usr/bin/env python\n\nimport argparse\nimport typing as T\nimport logging\nfrom pathlib import Path\nfrom datetime import datetime\n\nimport cultionet\nfrom cultionet.data.datasets import EdgeDataset\nfrom cultionet.utils.project_paths import setup_paths\nfrom cultionet.utils.normalize import get_norm_values\nfrom cultionet.data.create import create_dataset\nfrom cultionet.utils import model_preprocessing\nfrom cultionet.data.utils import create_network_data, NetworkDataset\n\nimport torch\nimport geopandas as gpd\nimport yaml\n\n\nlogger = logging.getLogger(__name__)\n\nDEFAULT_AUGMENTATIONS = ['none', 'fliplr', 'flipud', 'flipfb',\n 'rot90', 'rot180', 'rot270',\n 'ts-warp', 'ts-noise', 'ts-drift']\n\n\ndef open_config(config_file: T.Union[str, Path, bytes]) -> dict:\n with open(config_file, 'r') as pf:\n config = yaml.load(pf, Loader=yaml.FullLoader)\n\n return config\n\n\ndef get_centroid_coords_from_image(vi_path: Path, dst_crs: T.Optional[str] = None) -> T.Tuple[float, float]:\n \"\"\"Gets the lon/lat or x/y coordinates of a centroid\n \"\"\"\n import geowombat as gw\n\n with gw.open(list(vi_path.glob('*.tif'))[0]) as src:\n df = src.gw.geodataframe\n centroid = df.to_crs(dst_crs).centroid\n\n return float(centroid.x), float(centroid.y)\n\n\ndef get_image_list(ppaths, region, config):\n image_list = []\n for image_vi in model_preprocessing.VegetationIndices(image_vis=config['image_vis']).image_vis:\n # Set the full path to the images\n if str(ppaths.image_path).endswith('time_series_vars'):\n vi_path = ppaths.image_path / region / image_vi\n else:\n vi_path = ppaths.image_path / region / 'brdf_ts' / 'ms' / image_vi\n\n if not vi_path.is_dir():\n logger.warning(f'{str(vi_path)} does not exist')\n continue\n\n # Get the centroid coordinates of the grid\n lon, lat = get_centroid_coords_from_image(vi_path, dst_crs='epsg:4326')\n # Get the image year\n file = list(vi_path.glob('*.tif'))[0]\n image_year = int(datetime.strptime(file.stem, '%Y%j').strftime('%Y')) + 1\n\n if lat > 0:\n start_date = '01-01'\n end_date = '01-01'\n else:\n start_date = '07-01'\n end_date = '07-01'\n\n # Get the requested time slice\n ts_list = model_preprocessing.get_time_series_list(\n vi_path, image_year, start_date, end_date\n )\n\n if len(ts_list) <= 1:\n continue\n\n image_list += ts_list\n\n return image_list\n\n\ndef predict_image(args):\n import geowombat as gw\n from geowombat.core.windows import get_window_offsets\n import numpy as np\n import rasterio as rio\n from rasterio.windows import Window\n import torch\n import yaml\n from tqdm.auto import tqdm\n\n logging.getLogger('pytorch_lightning').setLevel(logging.WARNING)\n\n config = open_config(args.config_file)\n\n # This is a helper function to manage paths\n ppaths = setup_paths(args.project_path, append_ts=True if args.append_ts == 'y' else False)\n\n # Load the z-score norm values\n data_values = torch.load(ppaths.norm_file)\n\n try:\n tmp = int(args.grid_id)\n region = f'{tmp:06d}'\n except:\n region = args.grid_id\n\n # Get the image list\n image_list = get_image_list(ppaths, region, config)\n\n with gw.open(\n image_list,\n stack_dim='band',\n band_names=list(range(1, len(image_list)+1))\n ) as src_ts:\n time_series = ((src_ts * args.gain + args.offset)\n .astype('float64')\n .clip(0, 1))\n # Get the image dimensions\n nvars = model_preprocessing.VegetationIndices(image_vis=config['image_vis']).n_vis\n nfeas, height, width = time_series.shape\n ntime = int(nfeas / nvars)\n # TODO: chunk size and padding\n windows = get_window_offsets(\n height,\n width,\n args.window_size,\n args.window_size,\n padding=(\n args.padding, args.padding, args.padding, args.padding\n )\n )\n\n profile = {\n 'crs': src_ts.crs,\n 'transform': src_ts.gw.transform,\n 'height': height,\n 'width': width,\n 'count': 4,\n 'dtype': 'uint16',\n 'blockxsize': 64 if 64 < width else width,\n 'blockysize': 64 if 64 < height else height,\n 'driver': 'GTiff'\n }\n\n # Create the output file\n with rio.open(args.out_path, mode='w', **profile) as dst:\n pass\n\n for w, w_pad in tqdm(windows, total=len(windows)):\n slc = (\n slice(0, None),\n slice(w_pad.row_off, w_pad.row_off+w_pad.height),\n slice(w_pad.col_off, w_pad.col_off+w_pad.width)\n )\n # Create the data for the chunk\n data = create_network_data(time_series[slc].data.compute(num_workers=8), ntime)\n # Create the temporary dataset\n net_ds = NetworkDataset(data, ppaths.predict_path, data_values)\n\n # Apply inference on the chunk\n stack, lit_model = cultionet.predict(\n predict_ds=net_ds.ds,\n ckpt_file=ppaths.ckpt_file,\n filters=args.filters,\n device=args.device,\n w=w,\n w_pad=w_pad\n )\n # Write the prediction stack to file\n with rio.open(args.out_path, mode='r+') as dst:\n dst.write(\n (stack*10000.0).clip(0, 10000).astype('uint16'),\n indexes=[1, 2, 3, 4],\n window=w\n )\n\n # Remove the temporary dataset\n net_ds.unlink()\n\n\ndef cycle_data(year_lists: list,\n regions_lists: list,\n project_path_lists: list,\n lc_paths_lists: list,\n ref_res_lists: list):\n for years, regions, project_path, lc_path, ref_res in zip(year_lists,\n regions_lists,\n project_path_lists,\n lc_paths_lists,\n ref_res_lists):\n for region in regions:\n for image_year in years:\n yield region, image_year, project_path, lc_path, ref_res\n\n\ndef get_centroid_coords(df: gpd.GeoDataFrame, dst_crs: T.Optional[str] = None) -> T.Tuple[float, float]:\n \"\"\"Gets the lon/lat or x/y coordinates of a centroid\n \"\"\"\n centroid = df.to_crs(dst_crs).centroid\n\n return float(centroid.x), float(centroid.y)\n\n\ndef persist_dataset(args):\n config = open_config(args.config_file)\n project_path_lists = [args.project_path]\n ref_res_lists = [args.ref_res]\n\n inputs = model_preprocessing.TrainInputs(\n regions=config['regions'],\n years=config['years'],\n lc_path=config['lc_path']\n )\n\n for region, image_year, project_path, lc_path, ref_res in cycle_data(\n inputs.year_lists,\n inputs.regions_lists,\n project_path_lists,\n inputs.lc_paths_lists,\n ref_res_lists\n ):\n ppaths = setup_paths(project_path, append_ts=True if args.append_ts == 'y' else False)\n\n try:\n tmp = int(region)\n region = f'{tmp:06d}'\n except:\n pass\n\n # Read the training data\n grids = ppaths.edge_training_path / f'{region}_grid_{image_year}.gpkg'\n edges = ppaths.edge_training_path / f'{region}_edges_{image_year}.gpkg'\n if not grids.is_file():\n logger.warning(f'{grids} does not exist.')\n continue\n if not edges.is_file():\n edges = ppaths.edge_training_path / f'{region}_poly_{image_year}.gpkg'\n df_grids = gpd.read_file(grids)\n df_edges = gpd.read_file(edges)\n\n image_list = []\n for image_vi in model_preprocessing.VegetationIndices(image_vis=config['image_vis']).image_vis:\n # Set the full path to the images\n if str(ppaths.image_path).endswith('time_series_vars'):\n vi_path = ppaths.image_path / region / image_vi\n else:\n vi_path = ppaths.image_path / region / 'brdf_ts' / 'ms' / image_vi\n\n if not vi_path.is_dir():\n logger.warning(f'{str(vi_path)} does not exist')\n continue\n\n # Get the centroid coordinates of the grid\n lon, lat = get_centroid_coords(df_grids.centroid, dst_crs='epsg:4326')\n\n if lat > 0:\n start_date = '01-01'\n end_date = '01-01'\n else:\n start_date = '07-01'\n end_date = '07-01'\n\n # Get the requested time slice\n ts_list = model_preprocessing.get_time_series_list(\n vi_path, image_year, start_date, end_date\n )\n\n if len(ts_list) <= 1:\n continue\n\n image_list += ts_list\n\n if image_list:\n if lc_path is None:\n lc_image = None\n else:\n if (Path(lc_path) / f'{image_year-1}_30m_cdls.tif').is_file():\n lc_image = str(Path(lc_path) / f'{image_year-1}_30m_cdls.tif')\n else:\n if not (Path(lc_path) / f'{image_year-1}_30m_cdls.img').is_file():\n continue\n lc_image = str(Path(lc_path) / f'{image_year-1}_30m_cdls.img')\n\n create_dataset(\n image_list,\n df_grids,\n df_edges,\n group_id=f'{region}_{image_year}',\n process_path=ppaths.process_path,\n transforms=args.transforms,\n ref_res=ref_res,\n resampling=args.resampling,\n num_workers=args.num_workers,\n grid_size=args.grid_size,\n lc_path=lc_image,\n n_ts=args.n_ts,\n data_type='boundaries'\n )\n\n\ndef train_model(args):\n # This is a helper function to manage paths\n ppaths = setup_paths(args.project_path)\n\n # Check dimensions\n ds = EdgeDataset(ppaths.train_path)\n ds.check_dims()\n # Get the normalization means and std. deviations on the train data\n cultionet.model.seed_everything(args.random_seed)\n train_ds, val_ds = ds.split_train_val(val_frac=args.val_frac)\n # Calculate the values needed to transform to z-scores, using\n # the training data\n data_values = get_norm_values(dataset=train_ds, batch_size=args.batch_size*4)\n torch.save(data_values, str(ppaths.norm_file))\n\n # Create the train data object again, this time passing\n # the means and standard deviation tensors\n ds = EdgeDataset(\n ppaths.train_path,\n data_means=data_values.mean,\n data_stds=data_values.std\n )\n\n # Fit the model\n cultionet.fit(\n dataset=ds,\n ckpt_file=ppaths.ckpt_file,\n val_frac=args.val_frac,\n batch_size=args.batch_size,\n epochs=args.epochs,\n learning_rate=args.learning_rate,\n filters=args.filters,\n random_seed=args.random_seed,\n reset_model=args.reset_model,\n auto_lr_find=args.auto_lr_find,\n device=args.device,\n gradient_clip_val=args.gradient_clip_val,\n early_stopping_patience=args.patience,\n stochastic_weight_avg=args.stochastic_weight_avg\n )\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description='Cultionet models',\n formatter_class=argparse.RawTextHelpFormatter,\n epilog=\"########\\n\"\n \"Examples\\n\"\n \"########\\n\\n\"\n \"# Create training data\\n\"\n \"cultionet create --project-path /projects/data \\n\\n\"\n \"# Train a model\\n\"\n \"cultionet train --project-path /projects/data \\n\\n\"\n \"# Apply inference over an image\\n\"\n \"cultionet predict --project-path /projects/data -o estimates.tif \\n\\n\"\n )\n\n subparsers = parser.add_subparsers(dest='process')\n available_processes = ['create', 'train', 'predict', 'version']\n for process in available_processes:\n subparser = subparsers.add_parser(process)\n\n if process == 'version':\n continue\n\n subparser.add_argument('-p', '--project-path', dest='project_path', help='The project path', default=None)\n\n if process == 'create':\n subparser.add_argument(\n '-n', '--num-workers', dest='num_workers',\n help='The number of CPUs for data creation (default: %(default)s)',\n default=4, type=int\n )\n subparser.add_argument(\n '-t', '--transforms', dest='transforms', help='Augmentation transforms (default: %(default)s)',\n default=DEFAULT_AUGMENTATIONS, choices=DEFAULT_AUGMENTATIONS, nargs='+'\n )\n subparser.add_argument(\n '--n-ts', dest='n_ts', help='The number of temporal augmentations (default: %(default)s)',\n default=6, type=int\n )\n subparser.add_argument(\n '-r', '--res', dest='ref_res', help='The cell resolution (default: %(default)s)', default=10.0,\n type=float\n )\n subparser.add_argument(\n '-rm', '--resampling', dest='resampling', help='The resampling method (default: %(default)s)',\n default='nearest'\n )\n subparser.add_argument(\n '-gs', '--grid-size', dest='grid_size',\n help='The grid size (*If not given, grid size is taken from the the grid vector. If given, grid size '\n 'is taken from the upper left coordinate of the grid vector.) (default: %(default)s)',\n default=None, nargs='+', type=int\n )\n elif process == 'train':\n subparser.add_argument(\n '--val-frac', dest='val_frac', help='The validation fraction (default: %(default)s)',\n default=0.2, type=float\n )\n subparser.add_argument(\n '--random-seed', dest='random_seed', help='The random seed (default: %(default)s)',\n default=42, type=int\n )\n subparser.add_argument(\n '--batch-size', dest='batch_size', help='The batch size (default: %(default)s)',\n default=4, type=int\n )\n subparser.add_argument(\n '--epochs', dest='epochs', help='The number of training epochs (default: %(default)s)',\n default=30, type=int\n )\n subparser.add_argument(\n '--learning-rate', dest='learning_rate', help='The learning rate (default: %(default)s)',\n default=0.001, type=float\n )\n subparser.add_argument(\n '--reset-model', dest='reset_model', help='Whether to reset the model (default: %(default)s)',\n action='store_true'\n )\n subparser.add_argument(\n '--lr-find', dest='auto_lr_find', help='Whether to tune the learning rate (default: %(default)s)',\n action='store_true'\n )\n subparser.add_argument(\n '--gradient-clip-val', dest='gradient_clip_val', help='The gradient clip value (default: %(default)s)',\n default=0.1, type=float\n )\n subparser.add_argument(\n '--patience', dest='patience', help='The early stopping patience (default: %(default)s)',\n default=7, type=int\n )\n subparser.add_argument(\n '--apply-swa', dest='stochastic_weight_avg',\n help='Whether to apply stochastic weight averaging (default: %(default)s)',\n action='store_true'\n )\n elif process == 'predict':\n subparser.add_argument('-o', '--out-path', dest='out_path', help='The output path', default=None)\n subparser.add_argument('-g', '--grid-id', dest='grid_id', help='The grid id to process', default=None)\n subparser.add_argument(\n '-w', '--window-size', dest='window_size', help='The window size (default: %(default)s)',\n default=256, type=int\n )\n subparser.add_argument(\n '--padding', dest='padding', help='The window size (default: %(default)s)',\n default=5, type=int\n )\n subparser.add_argument(\n '--gain', dest='gain', help='The image gain (default: %(default)s)', default=0.0001, type=float\n )\n subparser.add_argument(\n '--offset', dest='offset', help='The image offset (default: %(default)s)', default=0.0, type=float\n )\n if process in ['create', 'predict']:\n subparser.add_argument(\n '--append-ts',\n dest='append_ts',\n help='Whether to append time_series_vars to the image path (default: %(default)s)',\n default='y',\n choices=['y', 'n']\n )\n subparser.add_argument(\n '--config-file',\n dest='config_file',\n help='The configuration YAML file (default: %(default)s)',\n default=(Path('.') / 'config.yml').resolve()\n )\n if process in ['train', 'predict']:\n subparser.add_argument(\n '--filters', dest='filters', help='The number of base filters (default: %(default)s)', default=32,\n type=int\n )\n subparser.add_argument(\n '--device', dest='device', help='The device to train on (default: %(default)s)',\n default='gpu', choices=['cpu', 'gpu']\n )\n\n args = parser.parse_args()\n\n if args.process == 'version':\n print(cultionet.__version__)\n return\n\n if args.process == 'create':\n persist_dataset(args)\n elif args.process == 'train':\n train_model(args)\n elif args.process == 'predict':\n predict_image(args)\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"torch.load"
]
] |
sribaheti/aws-iot-analytics-demo
|
[
"113668d019756234548621aaa0814e0e0528bfd8"
] |
[
"notebooks/processing.py"
] |
[
"from __future__ import print_function\n\nimport time\nimport sys\nfrom io import StringIO\nimport os\nimport shutil\n\nimport argparse\nimport csv\nimport json\nimport numpy as np\nimport pandas as pd\nimport logging\n\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.externals import joblib\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import Binarizer, StandardScaler, OneHotEncoder\n\nfrom sagemaker_containers.beta.framework import (\n content_types, encoders, env, modules, transformer, worker)\n\n# Since we get a headerless CSV file we specify the column names here.\nfeature_columns_names = [\n 'type',\n 'note',\n 'med',\n \"doses\",\n 'preemp',\n 'symptoms',\n 'triggers',\n 'activitymonth',\n 'activitydate',\n 'activityyear',\n 'patientid',\n 'locationid',\n 'condition',\n 'status'\n ] \n\nlabel_column = 'Churn?'\n\nfeature_columns_dtype = {\n 'type' : str,\n 'note' : str,\n 'med' : str,\n \"doses\" : np.int64,\n 'preemp' : str,\n 'symptoms' : str,\n 'triggers' : str,\n 'activitymonth' : str,\n 'acvtivitydate' : np.int64,\n 'activityyear' : np.int64,\n 'patientid' : np.int64,\n 'locationid' : np.int64,\n 'condition' : str,\n 'status' : str}\n\nlabel_column_dtype = {'Churn?': str} \n\ndef merge_two_dicts(x, y):\n z = x.copy() # start with x's keys and values\n z.update(y) # modifies z with y's keys and values & returns None\n return z\n\ndef _is_inverse_label_transform():\n \"\"\"Returns True if if it's running in inverse label transform.\"\"\"\n return os.getenv('TRANSFORM_MODE') == 'inverse-label-transform'\n\ndef _is_feature_transform():\n \"\"\"Returns True if it's running in feature transform mode.\"\"\"\n return os.getenv('TRANSFORM_MODE') == 'feature-transform'\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n\n # Sagemaker specific arguments. Defaults are set in the environment variables.\n parser.add_argument('--output-data-dir', type=str, default=os.environ['SM_OUTPUT_DATA_DIR'])\n parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR'])\n parser.add_argument('--train', type=str, default=os.environ['SM_CHANNEL_TRAIN'])\n\n\n args = parser.parse_args()\n\n input_files = [ os.path.join(args.train, file) for file in os.listdir(args.train) ]\n if len(input_files) == 0:\n raise ValueError(('There are no files in {}.\\n' +\n 'This usually indicates that the channel ({}) was incorrectly specified,\\n' +\n 'the data specification in S3 was incorrectly specified or the role specified\\n' +\n 'does not have permission to access the data.').format(args.train, \"train\"))\n\n raw_data = [ pd.read_csv(\n file, \n header=None, \n names=feature_columns_names + [label_column],\n dtype=merge_two_dicts(feature_columns_dtype, label_column_dtype)) for file in input_files ]\n concat_data = pd.concat(raw_data)\n\n numeric_features = list([\n 'doses',\n 'patientid',\n 'locationid',\n 'activitydate',\n 'activityyear'])\n\n\n numeric_transformer = Pipeline(steps=[\n ('imputer', SimpleImputer(strategy='median')),\n ('scaler', StandardScaler())])\n\n categorical_features = ['type','note',\"med\",'preemp','symptoms','activitymonth','condition','status']\n categorical_transformer = Pipeline(steps=[\n ('imputer', SimpleImputer(strategy='constant', fill_value='missing')),\n ('onehot', OneHotEncoder(handle_unknown='ignore'))])\n\n preprocessor = ColumnTransformer(\n transformers=[\n ('num', numeric_transformer, numeric_features),\n ('cat', categorical_transformer, categorical_features)],\n remainder=\"drop\")\n\n preprocessor.fit(concat_data)\n\n joblib.dump(preprocessor, os.path.join(args.model_dir, \"model.joblib\"))\n\n print(\"saved model!\")\n \n \ndef input_fn(input_data, request_content_type):\n \"\"\"Parse input data payload\n \n We currently only take csv input. Since we need to process both labelled\n and unlabelled data we first determine whether the label column is present\n by looking at how many columns were provided.\n \"\"\"\n \n \n content_type = request_content_type.lower(\n ) if request_content_type else \"text/csv\"\n content_type = content_type.split(\";\")[0].strip()\n \n \n if isinstance(input_data, str):\n str_buffer = input_data\n else:\n str_buffer = str(input_data,'utf-8')\n \n\n if _is_feature_transform():\n if content_type == 'text/csv':\n # Read the raw input data as CSV.\n df = pd.read_csv(StringIO(input_data), header=None)\n if len(df.columns) == len(feature_columns_names) + 1:\n # This is a labelled example, includes the label\n df.columns = feature_columns_names + [label_column]\n elif len(df.columns) == len(feature_columns_names):\n # This is an unlabelled example.\n df.columns = feature_columns_names\n return df\n else:\n raise ValueError(\"{} not supported by script!\".format(content_type))\n \n \n if _is_inverse_label_transform():\n if (content_type == 'text/csv' or content_type == 'text/csv; charset=utf-8'):\n # Read the raw input data as CSV.\n df = pd.read_csv(StringIO(str_buffer), header=None)\n logging.info(f\"Shape of the requested data: '{df.shape}'\")\n return df\n else:\n raise ValueError(\"{} not supported by script!\".format(content_type))\n \n \ndef output_fn(prediction, accept):\n \"\"\"Format prediction output\n \n The default accept/content-type between containers for serial inference is JSON.\n We also want to set the ContentType or mimetype as the same value as accept so the next\n container can read the response payload correctly.\n \"\"\"\n \n accept = 'text/csv'\n if type(prediction) is not np.ndarray:\n prediction=prediction.toarray()\n \n \n if accept == \"application/json\":\n instances = []\n for row in prediction.tolist():\n instances.append({\"features\": row})\n\n json_output = {\"instances\": instances}\n\n return worker.Response(json.dumps(json_output), mimetype=accept)\n elif accept == 'text/csv':\n return worker.Response(encoders.encode(prediction, accept), mimetype=accept)\n else:\n raise RuntimeException(\"{} accept type is not supported by this script.\".format(accept))\n\n\ndef predict_fn(input_data, model):\n \"\"\"Preprocess input data\n \n We implement this because the default predict_fn uses .predict(), but our model is a preprocessor\n so we want to use .transform().\n The output is returned in the following order:\n \n rest of features either one hot encoded or standardized\n \"\"\"\n\n \n if _is_feature_transform():\n features = model.transform(input_data)\n\n\n if label_column in input_data:\n # Return the label (as the first column) and the set of features.\n return np.insert(features, 0, pd.get_dummies(input_data[label_column])['True.'], axis=1)\n else:\n # Return only the set of features\n return features\n \n if _is_inverse_label_transform():\n features = input_data.iloc[:,0]>0.5\n features = features.values\n return features\n \n\ndef model_fn(model_dir):\n \"\"\"Deserialize fitted model\n \"\"\"\n if _is_feature_transform():\n preprocessor = joblib.load(os.path.join(model_dir, \"model.joblib\"))\n return preprocessor\n"
] |
[
[
"pandas.concat",
"sklearn.preprocessing.OneHotEncoder",
"sklearn.impute.SimpleImputer",
"sklearn.preprocessing.StandardScaler",
"pandas.get_dummies",
"sklearn.compose.ColumnTransformer"
]
] |
Javadzb/models
|
[
"5fff97264564bf132c20b24dc94b9fdbe6afd56e"
] |
[
"research/object_detection/model_lib.py"
] |
[
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nr\"\"\"Constructs model, inputs, and training environment.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport copy\nimport functools\nimport os\n\nimport pdb\n\nimport tensorflow as tf\n\nfrom object_detection import eval_util\nfrom object_detection import exporter as exporter_lib\nfrom object_detection import inputs\nfrom object_detection.builders import graph_rewriter_builder\nfrom object_detection.builders import model_builder\nfrom object_detection.builders import optimizer_builder\nfrom object_detection.core import standard_fields as fields\nfrom object_detection.utils import config_util\nfrom object_detection.utils import label_map_util\nfrom object_detection.utils import shape_utils\nfrom object_detection.utils import variables_helper\nfrom object_detection.utils import visualization_utils as vis_utils\n\n# A map of names to methods that help build the model.\nMODEL_BUILD_UTIL_MAP = {\n 'get_configs_from_pipeline_file':\n config_util.get_configs_from_pipeline_file,\n 'create_pipeline_proto_from_configs':\n config_util.create_pipeline_proto_from_configs,\n 'merge_external_params_with_configs':\n config_util.merge_external_params_with_configs,\n 'create_train_input_fn':\n inputs.create_train_input_fn,\n 'create_eval_input_fn':\n inputs.create_eval_input_fn,\n 'create_predict_input_fn':\n inputs.create_predict_input_fn,\n}\n\n\ndef _prepare_groundtruth_for_eval(detection_model, class_agnostic,\n max_number_of_boxes):\n \"\"\"Extracts groundtruth data from detection_model and prepares it for eval.\n\n Args:\n detection_model: A `DetectionModel` object.\n class_agnostic: Whether the detections are class_agnostic.\n max_number_of_boxes: Max number of groundtruth boxes.\n\n Returns:\n A tuple of:\n groundtruth: Dictionary with the following fields:\n 'groundtruth_boxes': [batch_size, num_boxes, 4] float32 tensor of boxes,\n in normalized coordinates.\n 'groundtruth_classes': [batch_size, num_boxes] int64 tensor of 1-indexed\n classes.\n 'groundtruth_masks': 4D float32 tensor of instance masks (if provided in\n groundtruth)\n 'groundtruth_is_crowd': [batch_size, num_boxes] bool tensor indicating\n is_crowd annotations (if provided in groundtruth).\n 'num_groundtruth_boxes': [batch_size] tensor containing the maximum number\n of groundtruth boxes per image..\n class_agnostic: Boolean indicating whether detections are class agnostic.\n \"\"\"\n input_data_fields = fields.InputDataFields()\n groundtruth_boxes = tf.stack(\n detection_model.groundtruth_lists(fields.BoxListFields.boxes))\n groundtruth_boxes_shape = tf.shape(groundtruth_boxes)\n # For class-agnostic models, groundtruth one-hot encodings collapse to all\n # ones.\n if class_agnostic:\n groundtruth_classes_one_hot = tf.ones(\n [groundtruth_boxes_shape[0], groundtruth_boxes_shape[1], 1])\n else:\n groundtruth_classes_one_hot = tf.stack(\n detection_model.groundtruth_lists(fields.BoxListFields.classes))\n label_id_offset = 1 # Applying label id offset (b/63711816)\n groundtruth_classes = (\n tf.argmax(groundtruth_classes_one_hot, axis=2) + label_id_offset)\n groundtruth = {\n input_data_fields.groundtruth_boxes: groundtruth_boxes,\n input_data_fields.groundtruth_classes: groundtruth_classes\n }\n if detection_model.groundtruth_has_field(fields.BoxListFields.masks):\n groundtruth[input_data_fields.groundtruth_instance_masks] = tf.stack(\n detection_model.groundtruth_lists(fields.BoxListFields.masks))\n\n if detection_model.groundtruth_has_field(fields.BoxListFields.is_crowd):\n groundtruth[input_data_fields.groundtruth_is_crowd] = tf.stack(\n detection_model.groundtruth_lists(fields.BoxListFields.is_crowd))\n\n groundtruth[input_data_fields.num_groundtruth_boxes] = (\n tf.tile([max_number_of_boxes], multiples=[groundtruth_boxes_shape[0]]))\n return groundtruth\n\n\ndef unstack_batch(tensor_dict, unpad_groundtruth_tensors=True):\n \"\"\"Unstacks all tensors in `tensor_dict` along 0th dimension.\n\n Unstacks tensor from the tensor dict along 0th dimension and returns a\n tensor_dict containing values that are lists of unstacked, unpadded tensors.\n\n Tensors in the `tensor_dict` are expected to be of one of the three shapes:\n 1. [batch_size]\n 2. [batch_size, height, width, channels]\n 3. [batch_size, num_boxes, d1, d2, ... dn]\n\n When unpad_groundtruth_tensors is set to true, unstacked tensors of form 3\n above are sliced along the `num_boxes` dimension using the value in tensor\n field.InputDataFields.num_groundtruth_boxes.\n\n Note that this function has a static list of input data fields and has to be\n kept in sync with the InputDataFields defined in core/standard_fields.py\n\n Args:\n tensor_dict: A dictionary of batched groundtruth tensors.\n unpad_groundtruth_tensors: Whether to remove padding along `num_boxes`\n dimension of the groundtruth tensors.\n\n Returns:\n A dictionary where the keys are from fields.InputDataFields and values are\n a list of unstacked (optionally unpadded) tensors.\n\n Raises:\n ValueError: If unpad_tensors is True and `tensor_dict` does not contain\n `num_groundtruth_boxes` tensor.\n \"\"\"\n unbatched_tensor_dict = {\n key: tf.unstack(tensor) for key, tensor in tensor_dict.items()\n }\n if unpad_groundtruth_tensors:\n if (fields.InputDataFields.num_groundtruth_boxes not in\n unbatched_tensor_dict):\n raise ValueError('`num_groundtruth_boxes` not found in tensor_dict. '\n 'Keys available: {}'.format(\n unbatched_tensor_dict.keys()))\n unbatched_unpadded_tensor_dict = {}\n unpad_keys = set([\n # List of input data fields that are padded along the num_boxes\n # dimension. This list has to be kept in sync with InputDataFields in\n # standard_fields.py.\n fields.InputDataFields.groundtruth_instance_masks,\n fields.InputDataFields.groundtruth_classes,\n fields.InputDataFields.groundtruth_boxes,\n fields.InputDataFields.groundtruth_keypoints,\n fields.InputDataFields.groundtruth_group_of,\n fields.InputDataFields.groundtruth_difficult,\n fields.InputDataFields.groundtruth_is_crowd,\n fields.InputDataFields.groundtruth_area,\n fields.InputDataFields.groundtruth_weights\n ]).intersection(set(unbatched_tensor_dict.keys()))\n\n for key in unpad_keys:\n unpadded_tensor_list = []\n for num_gt, padded_tensor in zip(\n unbatched_tensor_dict[fields.InputDataFields.num_groundtruth_boxes],\n unbatched_tensor_dict[key]):\n tensor_shape = shape_utils.combined_static_and_dynamic_shape(\n padded_tensor)\n slice_begin = tf.zeros([len(tensor_shape)], dtype=tf.int32)\n slice_size = tf.stack(\n [num_gt] + [-1 if dim is None else dim for dim in tensor_shape[1:]])\n unpadded_tensor = tf.slice(padded_tensor, slice_begin, slice_size)\n unpadded_tensor_list.append(unpadded_tensor)\n unbatched_unpadded_tensor_dict[key] = unpadded_tensor_list\n unbatched_tensor_dict.update(unbatched_unpadded_tensor_dict)\n\n return unbatched_tensor_dict\n\n\ndef create_model_fn(detection_model_fn, configs, hparams, use_tpu=False):\n \"\"\"Creates a model function for `Estimator`.\n\n Args:\n detection_model_fn: Function that returns a `DetectionModel` instance.\n configs: Dictionary of pipeline config objects.\n hparams: `HParams` object.\n use_tpu: Boolean indicating whether model should be constructed for\n use on TPU.\n\n Returns:\n `model_fn` for `Estimator`.\n \"\"\"\n train_config = configs['train_config']\n eval_input_config = configs['eval_input_config']\n eval_config = configs['eval_config']\n\n def model_fn(features, labels, mode, params=None):\n \"\"\"Constructs the object detection model.\n\n Args:\n features: Dictionary of feature tensors, returned from `input_fn`.\n labels: Dictionary of groundtruth tensors if mode is TRAIN or EVAL,\n otherwise None.\n mode: Mode key from tf.estimator.ModeKeys.\n params: Parameter dictionary passed from the estimator.\n\n Returns:\n An `EstimatorSpec` that encapsulates the model and its serving\n configurations.\n \"\"\"\n params = params or {}\n total_loss, train_op, detections, export_outputs = None, None, None, None\n is_training = mode == tf.estimator.ModeKeys.TRAIN\n\n # Make sure to set the Keras learning phase. True during training,\n # False for inference.\n tf.keras.backend.set_learning_phase(is_training)\n detection_model = detection_model_fn(\n is_training=is_training, add_summaries=(not use_tpu))\n scaffold_fn = None\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n labels = unstack_batch(\n labels,\n unpad_groundtruth_tensors=train_config.unpad_groundtruth_tensors)\n elif mode == tf.estimator.ModeKeys.EVAL:\n # For evaling on train data, it is necessary to check whether groundtruth\n # must be unpadded.\n boxes_shape = (\n labels[fields.InputDataFields.groundtruth_boxes].get_shape()\n .as_list())\n unpad_groundtruth_tensors = boxes_shape[1] is not None and not use_tpu\n labels = unstack_batch(\n labels, unpad_groundtruth_tensors=unpad_groundtruth_tensors)\n\n if mode in (tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL):\n gt_boxes_list = labels[fields.InputDataFields.groundtruth_boxes]\n gt_classes_list = labels[fields.InputDataFields.groundtruth_classes]\n gt_masks_list = None\n if fields.InputDataFields.groundtruth_instance_masks in labels:\n gt_masks_list = labels[\n fields.InputDataFields.groundtruth_instance_masks]\n gt_keypoints_list = None\n if fields.InputDataFields.groundtruth_keypoints in labels:\n gt_keypoints_list = labels[fields.InputDataFields.groundtruth_keypoints]\n gt_weights_list = None\n if fields.InputDataFields.groundtruth_weights in labels:\n gt_weights_list = labels[fields.InputDataFields.groundtruth_weights]\n gt_confidences_list = None\n if fields.InputDataFields.groundtruth_confidences in labels:\n gt_confidences_list = labels[\n fields.InputDataFields.groundtruth_confidences]\n gt_is_crowd_list = None\n if fields.InputDataFields.groundtruth_is_crowd in labels:\n gt_is_crowd_list = labels[fields.InputDataFields.groundtruth_is_crowd]\n detection_model.provide_groundtruth(\n groundtruth_boxes_list=gt_boxes_list,\n groundtruth_classes_list=gt_classes_list,\n groundtruth_confidences_list=gt_confidences_list,\n groundtruth_masks_list=gt_masks_list,\n groundtruth_keypoints_list=gt_keypoints_list,\n groundtruth_weights_list=gt_weights_list,\n groundtruth_is_crowd_list=gt_is_crowd_list)\n\n preprocessed_images = features[fields.InputDataFields.image]\n if use_tpu and train_config.use_bfloat16:\n with tf.contrib.tpu.bfloat16_scope():\n prediction_dict = detection_model.predict(\n preprocessed_images,\n features[fields.InputDataFields.true_image_shape])\n for k, v in prediction_dict.items():\n if v.dtype == tf.bfloat16:\n prediction_dict[k] = tf.cast(v, tf.float32)\n else:\n prediction_dict = detection_model.predict(\n preprocessed_images,\n features[fields.InputDataFields.true_image_shape])\n if mode in (tf.estimator.ModeKeys.EVAL, tf.estimator.ModeKeys.PREDICT):\n detections = detection_model.postprocess(\n prediction_dict, features[fields.InputDataFields.true_image_shape])\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n if train_config.fine_tune_checkpoint and hparams.load_pretrained:\n if not train_config.fine_tune_checkpoint_type:\n # train_config.from_detection_checkpoint field is deprecated. For\n # backward compatibility, set train_config.fine_tune_checkpoint_type\n # based on train_config.from_detection_checkpoint.\n if train_config.from_detection_checkpoint:\n train_config.fine_tune_checkpoint_type = 'detection'\n else:\n train_config.fine_tune_checkpoint_type = 'classification'\n asg_map = detection_model.restore_map(\n fine_tune_checkpoint_type=train_config.fine_tune_checkpoint_type,\n load_all_detection_checkpoint_vars=(\n train_config.load_all_detection_checkpoint_vars))\n available_var_map = (\n variables_helper.get_variables_available_in_checkpoint(\n asg_map,\n train_config.fine_tune_checkpoint,\n include_global_step=False))\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(train_config.fine_tune_checkpoint,\n available_var_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(train_config.fine_tune_checkpoint,\n available_var_map)\n\n if mode in (tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL):\n losses_dict = detection_model.loss(\n prediction_dict, features[fields.InputDataFields.true_image_shape])\n losses = [loss_tensor for loss_tensor in losses_dict.values()]\n if train_config.add_regularization_loss:\n regularization_losses = detection_model.regularization_losses()\n if regularization_losses:\n regularization_loss = tf.add_n(\n regularization_losses, name='regularization_loss')\n losses.append(regularization_loss)\n losses_dict['Loss/regularization_loss'] = regularization_loss\n total_loss = tf.add_n(losses, name='total_loss')\n losses_dict['Loss/total_loss'] = total_loss\n\n if 'graph_rewriter_config' in configs:\n graph_rewriter_fn = graph_rewriter_builder.build(\n configs['graph_rewriter_config'], is_training=is_training)\n graph_rewriter_fn()\n\n # TODO(rathodv): Stop creating optimizer summary vars in EVAL mode once we\n # can write learning rate summaries on TPU without host calls.\n global_step = tf.train.get_or_create_global_step()\n training_optimizer, optimizer_summary_vars = optimizer_builder.build(\n train_config.optimizer)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n if use_tpu:\n training_optimizer = tf.contrib.tpu.CrossShardOptimizer(\n training_optimizer)\n\n # Optionally freeze some layers by setting their gradients to be zero.\n trainable_variables = None\n include_variables = (\n train_config.update_trainable_variables\n if train_config.update_trainable_variables else None)\n exclude_variables = (\n train_config.freeze_variables\n if train_config.freeze_variables else None)\n trainable_variables = tf.contrib.framework.filter_variables(\n tf.trainable_variables(),\n include_patterns=include_variables,\n exclude_patterns=exclude_variables)\n\n clip_gradients_value = None\n if train_config.gradient_clipping_by_norm > 0:\n clip_gradients_value = train_config.gradient_clipping_by_norm\n\n if not use_tpu:\n for var in optimizer_summary_vars:\n tf.summary.scalar(var.op.name, var)\n summaries = [] if use_tpu else None\n if train_config.summarize_gradients:\n summaries = ['gradients', 'gradient_norm', 'global_gradient_norm']\n train_op = tf.contrib.layers.optimize_loss(\n loss=total_loss,\n global_step=global_step,\n learning_rate=None,\n clip_gradients=clip_gradients_value,\n optimizer=training_optimizer,\n update_ops=detection_model.updates(),\n variables=trainable_variables,\n summaries=summaries,\n name='') # Preventing scope prefix on all variables.\n\n if mode == tf.estimator.ModeKeys.PREDICT:\n exported_output = exporter_lib.add_output_tensor_nodes(detections)\n export_outputs = {\n tf.saved_model.signature_constants.PREDICT_METHOD_NAME:\n tf.estimator.export.PredictOutput(exported_output)\n }\n\n eval_metric_ops = None\n scaffold = None\n if mode == tf.estimator.ModeKeys.EVAL:\n class_agnostic = (\n fields.DetectionResultFields.detection_classes not in detections)\n groundtruth = _prepare_groundtruth_for_eval(\n detection_model, class_agnostic,\n eval_input_config.max_number_of_boxes)\n use_original_images = fields.InputDataFields.original_image in features\n if use_original_images:\n eval_images = features[fields.InputDataFields.original_image]\n true_image_shapes = tf.slice(\n features[fields.InputDataFields.true_image_shape], [0, 0], [-1, 3])\n original_image_spatial_shapes = features[fields.InputDataFields\n .original_image_spatial_shape]\n else:\n eval_images = features[fields.InputDataFields.image]\n true_image_shapes = None\n original_image_spatial_shapes = None\n\n eval_dict = eval_util.result_dict_for_batched_example(\n eval_images,\n features[inputs.HASH_KEY],\n detections,\n groundtruth,\n class_agnostic=class_agnostic,\n scale_to_absolute=True,\n original_image_spatial_shapes=original_image_spatial_shapes,\n true_image_shapes=true_image_shapes)\n\n\n#================================================================================================\n\n # Hey Javad !\n\n #pdb.set_trace()\n\n\n#================================================================================================\n if class_agnostic:\n category_index = label_map_util.create_class_agnostic_category_index()\n else:\n category_index = label_map_util.create_category_index_from_labelmap(\n eval_input_config.label_map_path)\n vis_metric_ops = None\n if not use_tpu and use_original_images:\n eval_metric_op_vis = vis_utils.VisualizeSingleFrameDetections(\n category_index,\n max_examples_to_draw=eval_config.num_visualizations,\n max_boxes_to_draw=eval_config.max_num_boxes_to_visualize,\n min_score_thresh=eval_config.min_score_threshold,\n use_normalized_coordinates=False)\n vis_metric_ops = eval_metric_op_vis.get_estimator_eval_metric_ops(\n eval_dict)\n\n # Eval metrics on a single example.\n eval_metric_ops = eval_util.get_eval_metric_ops_for_evaluators(\n eval_config, category_index.values(), eval_dict)\n for loss_key, loss_tensor in iter(losses_dict.items()):\n eval_metric_ops[loss_key] = tf.metrics.mean(loss_tensor)\n for var in optimizer_summary_vars:\n eval_metric_ops[var.op.name] = (var, tf.no_op())\n if vis_metric_ops is not None:\n eval_metric_ops.update(vis_metric_ops)\n eval_metric_ops = {str(k): v for k, v in eval_metric_ops.items()}\n\n if eval_config.use_moving_averages:\n variable_averages = tf.train.ExponentialMovingAverage(0.0)\n variables_to_restore = variable_averages.variables_to_restore()\n keep_checkpoint_every_n_hours = (\n train_config.keep_checkpoint_every_n_hours)\n saver = tf.train.Saver(\n variables_to_restore,\n keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours)\n scaffold = tf.train.Scaffold(saver=saver)\n\n # EVAL executes on CPU, so use regular non-TPU EstimatorSpec.\n if use_tpu and mode != tf.estimator.ModeKeys.EVAL:\n return tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n scaffold_fn=scaffold_fn,\n predictions=detections,\n loss=total_loss,\n train_op=train_op,\n eval_metrics=eval_metric_ops,\n export_outputs=export_outputs)\n else:\n if scaffold is None:\n keep_checkpoint_every_n_hours = (\n train_config.keep_checkpoint_every_n_hours)\n saver = tf.train.Saver(\n sharded=True,\n keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours,\n save_relative_paths=True)\n tf.add_to_collection(tf.GraphKeys.SAVERS, saver)\n scaffold = tf.train.Scaffold(saver=saver)\n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=detections,\n loss=total_loss,\n train_op=train_op,\n eval_metric_ops=eval_metric_ops,\n export_outputs=export_outputs,\n scaffold=scaffold)\n\n return model_fn\n\n\ndef create_estimator_and_inputs(Unlabeled_set_length,\n\t\t\t\tActive_set_length,\n\t\t\t\tepochs,\n\t\t data_info,\n\t\t\t\tFLAGS,\n\t\t\t\trestart_cycle,\n\t\t\t\trun_config,\n hparams,\n pipeline_config_path,\n config_override=None,\n train_steps=None,\n sample_1_of_n_eval_examples=1,\n sample_1_of_n_eval_on_train_examples=1,\n model_fn_creator=create_model_fn,\n use_tpu_estimator=False,\n use_tpu=False,\n num_shards=1,\n params=None,\n override_eval_num_epochs=True,\n save_final_config=False,\n **kwargs):\n \"\"\"Creates `Estimator`, input functions, and steps.\n\n Args:\n run_config: A `RunConfig`.\n hparams: A `HParams`.\n pipeline_config_path: A path to a pipeline config file.\n config_override: A pipeline_pb2.TrainEvalPipelineConfig text proto to\n override the config from `pipeline_config_path`.\n train_steps: Number of training steps. If None, the number of training steps\n is set from the `TrainConfig` proto.\n sample_1_of_n_eval_examples: Integer representing how often an eval example\n should be sampled. If 1, will sample all examples.\n sample_1_of_n_eval_on_train_examples: Similar to\n `sample_1_of_n_eval_examples`, except controls the sampling of training\n data for evaluation.\n model_fn_creator: A function that creates a `model_fn` for `Estimator`.\n Follows the signature:\n\n * Args:\n * `detection_model_fn`: Function that returns `DetectionModel` instance.\n * `configs`: Dictionary of pipeline config objects.\n * `hparams`: `HParams` object.\n * Returns:\n `model_fn` for `Estimator`.\n\n use_tpu_estimator: Whether a `TPUEstimator` should be returned. If False,\n an `Estimator` will be returned.\n use_tpu: Boolean, whether training and evaluation should run on TPU. Only\n used if `use_tpu_estimator` is True.\n num_shards: Number of shards (TPU cores). Only used if `use_tpu_estimator`\n is True.\n params: Parameter dictionary passed from the estimator. Only used if\n `use_tpu_estimator` is True.\n override_eval_num_epochs: Whether to overwrite the number of epochs to\n 1 for eval_input.\n save_final_config: Whether to save final config (obtained after applying\n overrides) to `estimator.model_dir`.\n **kwargs: Additional keyword arguments for configuration override.\n\n Returns:\n A dictionary with the following fields:\n 'estimator': An `Estimator` or `TPUEstimator`.\n 'train_input_fn': A training input function.\n 'eval_input_fns': A list of all evaluation input functions.\n 'eval_input_names': A list of names for each evaluation input.\n 'eval_on_train_input_fn': An evaluation-on-train input function.\n 'predict_input_fn': A prediction input function.\n 'train_steps': Number of training steps. Either directly from input or from\n configuration.\n \"\"\"\n get_configs_from_pipeline_file = MODEL_BUILD_UTIL_MAP[\n 'get_configs_from_pipeline_file']\n merge_external_params_with_configs = MODEL_BUILD_UTIL_MAP[\n 'merge_external_params_with_configs']\n create_pipeline_proto_from_configs = MODEL_BUILD_UTIL_MAP[\n 'create_pipeline_proto_from_configs']\n create_train_input_fn = MODEL_BUILD_UTIL_MAP['create_train_input_fn']\n create_eval_input_fn = MODEL_BUILD_UTIL_MAP['create_eval_input_fn']\n create_predict_input_fn = MODEL_BUILD_UTIL_MAP['create_predict_input_fn']\n\n configs = get_configs_from_pipeline_file(pipeline_config_path,\n config_override=config_override)\n\n kwargs.update({\n 'train_steps': train_steps,\n 'sample_1_of_n_eval_examples': sample_1_of_n_eval_examples\n })\n\n\n if override_eval_num_epochs:\n kwargs.update({'eval_num_epochs': 1})\n tf.logging.warning(\n 'Forced number of epochs for all eval validations to be 1.')\n configs = merge_external_params_with_configs(\n configs, hparams, kwargs_dict=kwargs)\n\n model_config = configs['model']\n train_config = configs['train_config']\n train_input_config = configs['train_input_config']\n eval_config = configs['eval_config']\n eval_input_configs = configs['eval_input_configs']\n eval_on_train_input_config = copy.deepcopy(train_input_config)\n eval_on_train_input_config.sample_1_of_n_examples = (\n sample_1_of_n_eval_on_train_examples)\n\n#============================================================================================\n\n # Overriding variables in proto file \n\n # TRAINING SETTING \n # Load active set from cycle 0 and point to right model\n if restart_cycle==0:\n model_dir = FLAGS.model_dir + 'R' + FLAGS.run + 'cycle0/'\n train_config.fine_tune_checkpoint = model_dir + 'model.ckpt'\n else:\n model_dir = FLAGS.model_dir + name + 'R' + FLAGS.run + 'cycle' + str(restart_cycle) + '/'\n # Get actual checkpoint model\n with open(model_dir+'checkpoint','r') as cfile:\n line = cfile.readlines()\n train_config.fine_tune_checkpoint = line[0].split(' ')[1][1:-2]\n\n input_config = configs['train_input_config']\n input_config.tf_record_input_reader.input_path[0] = data_info['output_path']\n # Set number of steps based on epochs\n train_config.num_steps = epochs*Active_set_length\n # Reducing learning\t\n train_config.optimizer.momentum_optimizer.learning_rate.manual_step_learning_rate.schedule[0].step= int(0.5*epochs*Active_set_length)\n train_config.optimizer.momentum_optimizer.learning_rate.manual_step_learning_rate.schedule[1].step= int(0.75*epochs*Active_set_length)\n\n\n #EVALUATION SETTING\n eval_input_configs[0].tf_record_input_reader.input_path[0] = data_info['output_path']\n eval_config.num_examples = Unlabeled_set_length\n\n\n\n#============================================================================================\n\n\n if override_eval_num_epochs and eval_on_train_input_config.num_epochs != 1:\n tf.logging.warning('Expected number of evaluation epochs is 1, but '\n 'instead encountered `eval_on_train_input_config'\n '.num_epochs` = '\n '{}. Overwriting `num_epochs` to 1.'.format(\n eval_on_train_input_config.num_epochs))\n eval_on_train_input_config.num_epochs = 1\n\n # update train_steps from config but only when non-zero value is provided\n if train_steps is None and train_config.num_steps != 0:\n train_steps = train_config.num_steps\n\n detection_model_fn = functools.partial(\n model_builder.build, model_config=model_config)\n\n # Create the input functions for TRAIN/EVAL/PREDICT.\n train_input_fn = create_train_input_fn(\n train_config=train_config,\n train_input_config=train_input_config,\n model_config=model_config)\n eval_input_fns = [\n create_eval_input_fn(\n eval_config=eval_config,\n eval_input_config=eval_input_config,\n model_config=model_config) for eval_input_config in eval_input_configs\n ]\n eval_input_names = [\n eval_input_config.name for eval_input_config in eval_input_configs\n ]\n eval_on_train_input_fn = create_eval_input_fn(\n eval_config=eval_config,\n eval_input_config=eval_on_train_input_config,\n model_config=model_config)\n predict_input_fn = create_predict_input_fn(\n model_config=model_config, predict_input_config=eval_input_configs[0])\n\n export_to_tpu = hparams.get('export_to_tpu', False)\n tf.logging.info('create_estimator_and_inputs: use_tpu %s, export_to_tpu %s',\n use_tpu, export_to_tpu)\n model_fn = model_fn_creator(detection_model_fn, configs, hparams, use_tpu)\n if use_tpu_estimator:\n estimator = tf.contrib.tpu.TPUEstimator(\n model_fn=model_fn,\n train_batch_size=train_config.batch_size,\n # For each core, only batch size 1 is supported for eval.\n eval_batch_size=num_shards * 1 if use_tpu else 1,\n use_tpu=use_tpu,\n config=run_config,\n # TODO(lzc): Remove conditional after CMLE moves to TF 1.9\n params=params if params else {})\n else:\n estimator = tf.estimator.Estimator(model_fn=model_fn, config=run_config)\n\n # Write the as-run pipeline config to disk.\n if run_config.is_chief and save_final_config:\n pipeline_config_final = create_pipeline_proto_from_configs(configs)\n config_util.save_pipeline_config(pipeline_config_final, estimator.model_dir)\n\n return dict(\n estimator=estimator,\n train_input_fn=train_input_fn,\n eval_input_fns=eval_input_fns,\n eval_input_names=eval_input_names,\n eval_on_train_input_fn=eval_on_train_input_fn,\n predict_input_fn=predict_input_fn,\n train_steps=train_steps)\n\n\ndef create_train_and_eval_specs(train_input_fn,\n eval_input_fns,\n eval_on_train_input_fn,\n predict_input_fn,\n train_steps,\n eval_on_train_data=False,\n final_exporter_name='Servo',\n eval_spec_names=None):\n \"\"\"Creates a `TrainSpec` and `EvalSpec`s.\n\n Args:\n train_input_fn: Function that produces features and labels on train data.\n eval_input_fns: A list of functions that produce features and labels on eval\n data.\n eval_on_train_input_fn: Function that produces features and labels for\n evaluation on train data.\n predict_input_fn: Function that produces features for inference.\n train_steps: Number of training steps.\n eval_on_train_data: Whether to evaluate model on training data. Default is\n False.\n final_exporter_name: String name given to `FinalExporter`.\n eval_spec_names: A list of string names for each `EvalSpec`.\n\n Returns:\n Tuple of `TrainSpec` and list of `EvalSpecs`. If `eval_on_train_data` is\n True, the last `EvalSpec` in the list will correspond to training data. The\n rest EvalSpecs in the list are evaluation datas.\n \"\"\"\n train_spec = tf.estimator.TrainSpec(\n input_fn=train_input_fn, max_steps=train_steps)\n\n if eval_spec_names is None:\n eval_spec_names = [str(i) for i in range(len(eval_input_fns))]\n\n eval_specs = []\n for index, (eval_spec_name, eval_input_fn) in enumerate(\n zip(eval_spec_names, eval_input_fns)):\n # Uses final_exporter_name as exporter_name for the first eval spec for\n # backward compatibility.\n if index == 0:\n exporter_name = final_exporter_name\n else:\n exporter_name = '{}_{}'.format(final_exporter_name, eval_spec_name)\n exporter = tf.estimator.FinalExporter(\n name=exporter_name, serving_input_receiver_fn=predict_input_fn)\n eval_specs.append(\n tf.estimator.EvalSpec(\n name=eval_spec_name,\n input_fn=eval_input_fn,\n steps=None,\n exporters=exporter))\n\n if eval_on_train_data:\n eval_specs.append(\n tf.estimator.EvalSpec(\n name='eval_on_train', input_fn=eval_on_train_input_fn, steps=None))\n\n return train_spec, eval_specs\n\n\ndef continuous_eval(estimator, model_dir, input_fn, train_steps, name):\n \"\"\"Perform continuous evaluation on checkpoints written to a model directory.\n\n Args:\n estimator: Estimator object to use for evaluation.\n model_dir: Model directory to read checkpoints for continuous evaluation.\n input_fn: Input function to use for evaluation.\n train_steps: Number of training steps. This is used to infer the last\n checkpoint and stop evaluation loop.\n name: Namescope for eval summary.\n \"\"\"\n\n def terminate_eval():\n tf.logging.info('Terminating eval after 180 seconds of no checkpoints')\n return True\n\n for ckpt in tf.contrib.training.checkpoints_iterator(\n model_dir, min_interval_secs=180, timeout=None,\n timeout_fn=terminate_eval):\n\n tf.logging.info('Starting Evaluation.')\n try:\n eval_results = estimator.evaluate(\n input_fn=input_fn, steps=None, checkpoint_path=ckpt, name=name)\n tf.logging.info('Eval results: %s' % eval_results)\n\n # Terminate eval job when final checkpoint is reached\n current_step = int(os.path.basename(ckpt).split('-')[1])\n if current_step >= train_steps:\n tf.logging.info(\n 'Evaluation finished after training step %d' % current_step)\n break\n\n except tf.errors.NotFoundError:\n tf.logging.info(\n 'Checkpoint %s no longer exists, skipping checkpoint' % ckpt)\n\n\ndef populate_experiment(run_config,\n hparams,\n pipeline_config_path,\n train_steps=None,\n eval_steps=None,\n model_fn_creator=create_model_fn,\n **kwargs):\n \"\"\"Populates an `Experiment` object.\n\n EXPERIMENT CLASS IS DEPRECATED. Please switch to\n tf.estimator.train_and_evaluate. As an example, see model_main.py.\n\n Args:\n run_config: A `RunConfig`.\n hparams: A `HParams`.\n pipeline_config_path: A path to a pipeline config file.\n train_steps: Number of training steps. If None, the number of training steps\n is set from the `TrainConfig` proto.\n eval_steps: Number of evaluation steps per evaluation cycle. If None, the\n number of evaluation steps is set from the `EvalConfig` proto.\n model_fn_creator: A function that creates a `model_fn` for `Estimator`.\n Follows the signature:\n\n * Args:\n * `detection_model_fn`: Function that returns `DetectionModel` instance.\n * `configs`: Dictionary of pipeline config objects.\n * `hparams`: `HParams` object.\n * Returns:\n `model_fn` for `Estimator`.\n\n **kwargs: Additional keyword arguments for configuration override.\n\n Returns:\n An `Experiment` that defines all aspects of training, evaluation, and\n export.\n \"\"\"\n tf.logging.warning('Experiment is being deprecated. Please use '\n 'tf.estimator.train_and_evaluate(). See model_main.py for '\n 'an example.')\n train_and_eval_dict = create_estimator_and_inputs(\n run_config,\n hparams,\n pipeline_config_path,\n train_steps=train_steps,\n eval_steps=eval_steps,\n model_fn_creator=model_fn_creator,\n save_final_config=True,\n **kwargs)\n estimator = train_and_eval_dict['estimator']\n train_input_fn = train_and_eval_dict['train_input_fn']\n eval_input_fns = train_and_eval_dict['eval_input_fns']\n predict_input_fn = train_and_eval_dict['predict_input_fn']\n train_steps = train_and_eval_dict['train_steps']\n\n export_strategies = [\n tf.contrib.learn.utils.saved_model_export_utils.make_export_strategy(\n serving_input_fn=predict_input_fn)\n ]\n\n return tf.contrib.learn.Experiment(\n estimator=estimator,\n train_input_fn=train_input_fn,\n eval_input_fn=eval_input_fns[0],\n train_steps=train_steps,\n eval_steps=None,\n export_strategies=export_strategies,\n eval_delay_secs=120,\n )\n"
] |
[
[
"tensorflow.logging.warning",
"tensorflow.contrib.tpu.bfloat16_scope",
"tensorflow.stack",
"tensorflow.cast",
"tensorflow.train.init_from_checkpoint",
"tensorflow.contrib.learn.Experiment",
"tensorflow.contrib.tpu.CrossShardOptimizer",
"tensorflow.train.ExponentialMovingAverage",
"tensorflow.add_n",
"tensorflow.contrib.training.checkpoints_iterator",
"tensorflow.contrib.tpu.TPUEstimatorSpec",
"tensorflow.estimator.FinalExporter",
"tensorflow.contrib.tpu.TPUEstimator",
"tensorflow.summary.scalar",
"tensorflow.estimator.export.PredictOutput",
"tensorflow.train.get_or_create_global_step",
"tensorflow.estimator.EvalSpec",
"tensorflow.trainable_variables",
"tensorflow.train.Saver",
"tensorflow.keras.backend.set_learning_phase",
"tensorflow.argmax",
"tensorflow.tile",
"tensorflow.metrics.mean",
"tensorflow.estimator.Estimator",
"tensorflow.unstack",
"tensorflow.shape",
"tensorflow.estimator.TrainSpec",
"tensorflow.logging.info",
"tensorflow.no_op",
"tensorflow.contrib.learn.utils.saved_model_export_utils.make_export_strategy",
"tensorflow.add_to_collection",
"tensorflow.train.Scaffold",
"tensorflow.slice",
"tensorflow.ones",
"tensorflow.estimator.EstimatorSpec"
]
] |
cryptexis/debias
|
[
"a9e0106dcb8668b95e4654ccb3e7373a70ea37a3"
] |
[
"models/adversarial_model.py"
] |
[
"import tensorflow as tf\nimport numpy as np\nfrom utils.data import convert_categorical\nfrom models.base_model import BaseModel\n\n\nclass Discriminator:\n\n def __init__(self, discriminator_model, protected_variable):\n\n self.model = discriminator_model\n self.protected_variable = protected_variable\n\n\nclass FairClassifier(BaseModel):\n\n def __init__(self, predictor_model, discriminator_model: Discriminator, hyper_parameters=None):\n\n # assigning predictor and discriminator models\n self.predictor = predictor_model\n self.discriminator = discriminator_model\n\n # losses and optimizers\n self.loss = tf.keras.losses.BinaryCrossentropy(from_logits=True)\n self.cosine_loss = tf.keras.losses.CosineSimilarity()\n self.predictor_optimizer = tf.keras.optimizers.Adam(1e-3)\n self.discriminator_optimizer = tf.keras.optimizers.Adam(1e-3)\n\n self.metrics = [\n tf.keras.metrics.Mean(name='loss_mean'),\n tf.keras.metrics.TruePositives(name='tp'),\n tf.keras.metrics.FalsePositives(name='fp'),\n tf.keras.metrics.TrueNegatives(name='tn'),\n tf.keras.metrics.FalseNegatives(name='fn'),\n tf.keras.metrics.BinaryAccuracy(name='accuracy')\n ]\n\n self.hyper_parameters = hyper_parameters if hyper_parameters is not None else {}\n\n def __predictor_gradient(self, gradients_of_predictor_pred_loss, gradients_of_predictor_disc_loss):\n \"\"\"\n Calculate the final form of the gradient of the predictor network\n :param gradients_of_predictor_pred_loss: gradient of parameters based on the loss from predictor network\n :param gradients_of_predictor_disc_loss: gradient of parameters based on the loss from discriminator network\n :return:\n \"\"\"\n gradients_of_predictor = []\n num_gradients = len(gradients_of_predictor_disc_loss)\n for i in range(num_gradients):\n # weighted gradient coming from the discriminator\n alpha = self.hyper_parameters.get(\"alpha\", 1.0)\n disc_term = alpha*gradients_of_predictor_disc_loss[i]\n # projection of the gradient onto the discriminator gradient\n cosine_term = self.cosine_loss(gradients_of_predictor_pred_loss[i], gradients_of_predictor_disc_loss[i])\n proj_term = (cosine_term*tf.norm(gradients_of_predictor_pred_loss[i])*gradients_of_predictor_disc_loss[i])/\\\n tf.norm(gradients_of_predictor_disc_loss[i])\n\n # final form of the gradient\n gradients_of_predictor.append(gradients_of_predictor_pred_loss[i] - proj_term - disc_term)\n\n return gradients_of_predictor\n\n @tf.function\n def _train_step(self, input_features, labels):\n\n with tf.GradientTape() as predictor_tape, tf.GradientTape(persistent=True) as disc_tape:\n\n # predicting the label\n predictor_output = self.predictor(input_features, training=True)\n predictor_loss = self.loss(labels, predictor_output)\n\n # creating input for the discriminator\n labels = tf.cast(labels, dtype=tf.float32)\n # (\n s = (1.0 + np.abs(self.hyper_parameters.get('c', 1.0)))*predictor_output\n discriminator_input = tf.squeeze(tf.stack([s, s*labels, s*(1.0 - labels)], axis=1))\n\n # predicting the protected_variable\n discriminator_ouput = self.discriminator.model(discriminator_input, training=True)\n # converting protected variable into target column\n protected_feature = tf.keras.layers.DenseFeatures(convert_categorical(self.discriminator.protected_variable,\n self.hyper_parameters['category_maps']\n ))\n\n protected_output = tf.gather(protected_feature(input_features), 0, axis=1)\n # calculating the loss of the discriminator\n disc_loss = self.loss(protected_output, discriminator_ouput)\n\n # calculate and apply the gradient of parameters of the discriminator network\n gradients_of_discriminator = disc_tape.gradient(disc_loss,\n self.discriminator.model.trainable_variables)\n self.discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator,\n self.discriminator.model.trainable_variables))\n\n # calculate gradients of parameters of predictor network based on\n # loss in the discriminator network\n gradients_of_predictor_disc_loss = disc_tape.gradient(disc_loss, self.predictor.trainable_variables)\n # loss in the predictor network\n gradients_of_predictor_pred_loss = predictor_tape.gradient(predictor_loss, self.predictor.trainable_variables)\n\n gradients_of_predictor = self.__predictor_gradient(gradients_of_predictor_pred_loss,\n gradients_of_predictor_disc_loss)\n\n # apply gradient updates\n self.predictor_optimizer.apply_gradients(zip(gradients_of_predictor, self.predictor.trainable_variables))\n\n return tf.cast(tf.greater(predictor_output, 0.0), dtype=tf.int32), predictor_loss\n"
] |
[
[
"tensorflow.keras.metrics.BinaryAccuracy",
"tensorflow.norm",
"tensorflow.keras.metrics.TruePositives",
"tensorflow.greater",
"tensorflow.keras.losses.CosineSimilarity",
"tensorflow.stack",
"tensorflow.keras.metrics.TrueNegatives",
"tensorflow.cast",
"tensorflow.keras.losses.BinaryCrossentropy",
"tensorflow.GradientTape",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.metrics.FalsePositives",
"tensorflow.keras.metrics.Mean",
"tensorflow.keras.metrics.FalseNegatives"
]
] |
neale/PySATDNN
|
[
"85ed9f416702dc9bcbaf3e0a2e93af2ad1fdeee9"
] |
[
"pysatdnn/models.py"
] |
[
"#####################################\n# models.py\n# contains the definitions for the target neural network architectures\n#\n####################################\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass OneLayerDNN(nn.Module):\n def __init__(self, d_in, d_out):\n super(OneLayerDNN, self).__init__()\n self.linear1 = nn.Linear(d_in, 2)\n\n def forward(self, x):\n x = self.linear1(x)\n return x\n\n\nclass TwoLayerDNN(nn.Module):\n def __init__(self, d_in, d_hidden):\n super(TwoLayerDNN, self).__init__()\n self.linear1 = nn.Linear(d_in, d_hidden)\n self.linear2 = nn.Linear(d_hidden, 2)\n\n def forward(self, x):\n x = self.linear1(x)\n x = self.linear2(x)\n return x\n\n\n"
] |
[
[
"torch.nn.Linear"
]
] |
jcydlxc/czsc
|
[
"9be21bec016b9bb1fe335142c746f8dc178de000"
] |
[
"czsc/utils/echarts_plot.py"
] |
[
"# coding: utf-8\n\"\"\"\n使用 pyecharts 定制绘图模块\n\n\"\"\"\n\nfrom pyecharts import options as opts\nfrom pyecharts.charts import HeatMap, Kline, Line, Bar, Scatter, Grid\nfrom pyecharts.commons.utils import JsCode\nfrom typing import List\nimport numpy as np\nfrom .ta import SMA, MACD\n\n\ndef heat_map(data: List[dict],\n x_label: List[str] = None,\n y_label: List[str] = None,\n title: str = \"热力图\",\n width: str = \"900px\",\n height: str = \"680px\") -> HeatMap:\n \"\"\"绘制热力图\n\n :param data: 用于绘制热力图的数据,示例如下\n [{'x': '0hour', 'y': '0day', 'heat': 11},\n {'x': '0hour', 'y': '1day', 'heat': 40},\n {'x': '0hour', 'y': '2day', 'heat': 38},\n {'x': '0hour', 'y': '3day', 'heat': 36},\n {'x': '0hour', 'y': '4day', 'heat': 11}]\n :param x_label: x轴标签\n :param y_label: y轴标签\n :param title: 图表标题\n :param width: 图表宽度\n :param height: 图表高度\n :return: 图表\n \"\"\"\n\n value = [[s['x'], s['y'], s['heat']] for s in data]\n heat = [s['heat'] for s in data]\n\n if not x_label:\n x_label = sorted(list(set([s['x'] for s in data])))\n\n if not y_label:\n y_label = sorted(list(set([s['y'] for s in data])))\n\n vis_map_opts = opts.VisualMapOpts(pos_left=\"90%\", pos_top=\"20%\", min_=min(heat), max_=max(heat))\n title_opts = opts.TitleOpts(title=title)\n init_opts = opts.InitOpts(page_title=title, width=width, height=height)\n dz_inside = opts.DataZoomOpts(False, \"inside\", xaxis_index=[0], range_start=80, range_end=100)\n dz_slider = opts.DataZoomOpts(True, \"slider\", xaxis_index=[0], pos_top=\"96%\", pos_bottom=\"0%\",\n range_start=80, range_end=100)\n legend_opts = opts.LegendOpts(is_show=False)\n\n hm = HeatMap(init_opts=init_opts)\n hm.add_xaxis(x_label)\n hm.add_yaxis(\"heat\", y_label, value, label_opts=opts.LabelOpts(is_show=True, position=\"inside\"))\n hm.set_global_opts(title_opts=title_opts, visualmap_opts=vis_map_opts, legend_opts=legend_opts,\n xaxis_opts=opts.AxisOpts(grid_index=0), datazoom_opts=[dz_inside, dz_slider])\n return hm\n\n\ndef kline_pro(kline: List[dict],\n fx: List[dict] = None,\n bi: List[dict] = None,\n xd: List[dict] = None,\n bs: List[dict] = None,\n title: str = \"缠中说禅K线分析\",\n width: str = \"1400px\",\n height: str = '580px') -> Grid:\n \"\"\"绘制缠中说禅K线分析结果\n\n :param kline: K线\n :param fx: 分型识别结果\n :param bi: 笔识别结果\n :param xd: 线段识别结果\n :param bs: 买卖点\n :param title: 图表标题\n :param width: 图表宽度\n :param height: 图表高度\n :return: 用Grid组合好的图表\n \"\"\"\n # 配置项设置\n # ------------------------------------------------------------------------------------------------------------------\n bg_color = \"#1f212d\" # 背景\n up_color = \"#F9293E\"\n down_color = \"#00aa3b\"\n\n init_opts = opts.InitOpts(bg_color=bg_color, width=width, height=height, animation_opts=opts.AnimationOpts(False))\n title_opts = opts.TitleOpts(title=title, pos_top=\"1%\",\n title_textstyle_opts=opts.TextStyleOpts(color=up_color, font_size=20),\n subtitle_textstyle_opts=opts.TextStyleOpts(color=down_color, font_size=12))\n\n label_not_show_opts = opts.LabelOpts(is_show=False)\n legend_not_show_opts = opts.LegendOpts(is_show=False)\n red_item_style = opts.ItemStyleOpts(color=up_color)\n green_item_style = opts.ItemStyleOpts(color=down_color)\n k_style_opts = opts.ItemStyleOpts(color=up_color, color0=down_color, border_color=up_color,\n border_color0=down_color, opacity=0.8)\n\n legend_opts = opts.LegendOpts(is_show=True, pos_top=\"1%\", pos_left=\"30%\", item_width=14, item_height=8,\n textstyle_opts=opts.TextStyleOpts(font_size=12, color=\"#0e99e2\"))\n brush_opts = opts.BrushOpts(tool_box=[\"rect\", \"polygon\", \"keep\", \"clear\"],\n x_axis_index=\"all\", brush_link=\"all\",\n out_of_brush={\"colorAlpha\": 0.1}, brush_type=\"lineX\")\n\n axis_pointer_opts = opts.AxisPointerOpts(is_show=True, link=[{\"xAxisIndex\": \"all\"}])\n\n dz_inside = opts.DataZoomOpts(False, \"inside\", xaxis_index=[0, 1, 2], range_start=80, range_end=100)\n dz_slider = opts.DataZoomOpts(True, \"slider\", xaxis_index=[0, 1, 2], pos_top=\"96%\",\n pos_bottom=\"0%\", range_start=80, range_end=100)\n\n yaxis_opts = opts.AxisOpts(is_scale=True, axislabel_opts=opts.LabelOpts(color=\"#c7c7c7\", font_size=8, position=\"inside\"))\n\n grid0_xaxis_opts = opts.AxisOpts(type_=\"category\", grid_index=0, axislabel_opts=label_not_show_opts,\n split_number=20, min_=\"dataMin\", max_=\"dataMax\",\n is_scale=True, boundary_gap=False, axisline_opts=opts.AxisLineOpts(is_on_zero=False))\n\n tool_tip_opts = opts.TooltipOpts(\n trigger=\"axis\",\n axis_pointer_type=\"cross\",\n background_color=\"rgba(245, 245, 245, 0.8)\",\n border_width=1,\n border_color=\"#ccc\",\n position=JsCode(\"\"\"\n function (pos, params, el, elRect, size) {\n \t\t\t\t\tvar obj = {top: 10};\n \t\t\t\t\tobj[['left', 'right'][+(pos[0] < size.viewSize[0] / 2)]] = 30;\n \t\t\t\t\treturn obj;\n \t\t\t\t}\n \"\"\"),\n textstyle_opts=opts.TextStyleOpts(color=\"#000\"),\n )\n\n # 数据预处理\n # ------------------------------------------------------------------------------------------------------------------\n dts = [x['dt'] for x in kline]\n # k_data = [[x['open'], x['close'], x['low'], x['high']] for x in kline]\n k_data = [opts.CandleStickItem(name=i, value=[x['open'], x['close'], x['low'], x['high']])\n for i, x in enumerate(kline)]\n\n vol = []\n for i, row in enumerate(kline):\n item_style = red_item_style if row['close'] > row['open'] else green_item_style\n bar = opts.BarItem(name=i, value=row['vol'], itemstyle_opts=item_style, label_opts=label_not_show_opts)\n vol.append(bar)\n\n close = np.array([x['close'] for x in kline], dtype=np.double)\n diff, dea, macd = MACD(close)\n\n ma5 = SMA(close, timeperiod=5)\n ma34 = SMA(close, timeperiod=34)\n ma233 = SMA(close, timeperiod=233)\n\n macd_bar = []\n for i, v in enumerate(macd.tolist()):\n item_style = red_item_style if v > 0 else green_item_style\n bar = opts.BarItem(name=i, value=round(v, 4), itemstyle_opts=item_style,\n label_opts=label_not_show_opts)\n macd_bar.append(bar)\n\n diff = diff.round(4)\n dea = dea.round(4)\n\n # K 线主图\n # ------------------------------------------------------------------------------------------------------------------\n chart_k = Kline()\n chart_k.add_xaxis(xaxis_data=dts)\n chart_k.add_yaxis(series_name=\"Kline\", y_axis=k_data, itemstyle_opts=k_style_opts)\n\n chart_k.set_global_opts(\n legend_opts=legend_opts,\n datazoom_opts=[dz_inside, dz_slider],\n yaxis_opts=yaxis_opts,\n tooltip_opts=tool_tip_opts,\n axispointer_opts=axis_pointer_opts,\n brush_opts=brush_opts,\n title_opts=title_opts,\n xaxis_opts=grid0_xaxis_opts\n )\n\n # 均线图\n # ------------------------------------------------------------------------------------------------------------------\n chart_ma = Line()\n chart_ma.add_xaxis(xaxis_data=dts)\n\n ma_keys = {\"MA5\": ma5, \"MA34\": ma34, \"MA233\": ma233}\n ma_colors = [\"#39afe6\", \"#da6ee8\", \"#00940b\"]\n for i, (name, ma) in enumerate(ma_keys.items()):\n chart_ma.add_yaxis(series_name=name, y_axis=ma, is_smooth=True,\n is_selected=False, symbol_size=0, label_opts=label_not_show_opts,\n linestyle_opts=opts.LineStyleOpts(opacity=0.8, width=1.0, color=ma_colors[i]))\n\n chart_ma.set_global_opts(xaxis_opts=grid0_xaxis_opts, legend_opts=legend_not_show_opts)\n chart_k = chart_k.overlap(chart_ma)\n\n # 缠论结果\n # ------------------------------------------------------------------------------------------------------------------\n if fx:\n fx_dts = [x['dt'] for x in fx]\n fx_val = [x['fx'] for x in fx]\n chart_fx = Scatter()\n chart_fx.add_xaxis(fx_dts)\n chart_fx.add_yaxis(series_name=\"FX\", y_axis=fx_val, is_selected=False,\n symbol=\"circle\", symbol_size=6, label_opts=label_not_show_opts,\n itemstyle_opts=opts.ItemStyleOpts(color=\"rgba(152, 147, 193, 1.0)\",))\n\n chart_fx.set_global_opts(xaxis_opts=grid0_xaxis_opts, legend_opts=legend_not_show_opts)\n chart_k = chart_k.overlap(chart_fx)\n\n if bi:\n bi_dts = [x['dt'] for x in bi]\n bi_val = [x['bi'] for x in bi]\n chart_bi = Scatter()\n chart_bi.add_xaxis(bi_dts)\n chart_bi.add_yaxis(series_name=\"BI\", y_axis=bi_val, is_selected=True,\n symbol=\"diamond\", symbol_size=10, label_opts=label_not_show_opts,\n itemstyle_opts=opts.ItemStyleOpts(color=\"rgba(184, 117, 225, 1.0)\",))\n\n chart_bi.set_global_opts(xaxis_opts=grid0_xaxis_opts, legend_opts=legend_not_show_opts)\n chart_k = chart_k.overlap(chart_bi)\n\n if xd:\n xd_dts = [x['dt'] for x in xd]\n xd_val = [x['xd'] for x in xd]\n chart_xd = Scatter()\n chart_xd.add_xaxis(xd_dts)\n chart_xd.add_yaxis(series_name=\"XD\", y_axis=xd_val, is_selected=True, symbol=\"triangle\", symbol_size=10,\n itemstyle_opts=opts.ItemStyleOpts(color=\"rgba(37, 141, 54, 1.0)\",))\n\n chart_xd.set_global_opts(xaxis_opts=grid0_xaxis_opts, legend_opts=legend_not_show_opts)\n chart_k = chart_k.overlap(chart_xd)\n\n if bs:\n b_dts = [x['dt'] for x in bs if x['mark'] == 'buy']\n if len(b_dts) > 0:\n b_val = [x['price'] for x in bs if x['mark'] == 'buy']\n chart_b = Scatter()\n chart_b.add_xaxis(b_dts)\n chart_b.add_yaxis(series_name=\"BUY\", y_axis=b_val, is_selected=False, symbol=\"arrow\", symbol_size=8,\n itemstyle_opts=opts.ItemStyleOpts(color=\"#f31e1e\",))\n\n chart_b.set_global_opts(xaxis_opts=grid0_xaxis_opts, legend_opts=legend_not_show_opts)\n chart_k = chart_k.overlap(chart_b)\n\n s_dts = [x['dt'] for x in bs if x['mark'] == 'sell']\n if len(s_dts) > 0:\n s_val = [x['price'] for x in bs if x['mark'] == 'sell']\n chart_s = Scatter()\n chart_s.add_xaxis(s_dts)\n chart_s.add_yaxis(series_name=\"SELL\", y_axis=s_val, is_selected=False, symbol=\"pin\", symbol_size=12,\n itemstyle_opts=opts.ItemStyleOpts(color=\"#45b97d\", ))\n\n chart_s.set_global_opts(xaxis_opts=grid0_xaxis_opts, legend_opts=legend_not_show_opts)\n chart_k = chart_k.overlap(chart_s)\n\n # 成交量图\n # ------------------------------------------------------------------------------------------------------------------\n chart_vol = Bar()\n chart_vol.add_xaxis(dts)\n chart_vol.add_yaxis(series_name=\"Volume\", y_axis=vol, bar_width='60%')\n chart_vol.set_global_opts(\n xaxis_opts=opts.AxisOpts(\n type_=\"category\",\n grid_index=1,\n axislabel_opts=opts.LabelOpts(is_show=True, font_size=8, color=\"#9b9da9\"),\n ),\n yaxis_opts=yaxis_opts, legend_opts=legend_not_show_opts,\n )\n\n # MACD图\n # ------------------------------------------------------------------------------------------------------------------\n chart_macd = Bar()\n chart_macd.add_xaxis(dts)\n chart_macd.add_yaxis(series_name=\"MACD\", y_axis=macd_bar, bar_width='60%')\n chart_macd.set_global_opts(\n xaxis_opts=opts.AxisOpts(\n type_=\"category\",\n grid_index=2,\n axislabel_opts=opts.LabelOpts(is_show=False),\n ),\n yaxis_opts=opts.AxisOpts(\n grid_index=2,\n split_number=4,\n axisline_opts=opts.AxisLineOpts(is_on_zero=False),\n axistick_opts=opts.AxisTickOpts(is_show=False),\n splitline_opts=opts.SplitLineOpts(is_show=False),\n axislabel_opts=opts.LabelOpts(is_show=True, color=\"#c7c7c7\"),\n ),\n legend_opts=opts.LegendOpts(is_show=False),\n )\n\n line = Line()\n line.add_xaxis(dts)\n line.add_yaxis(series_name=\"DIFF\", y_axis=diff, label_opts=label_not_show_opts, is_symbol_show=False,\n linestyle_opts=opts.LineStyleOpts(opacity=0.8, width=1.0, color=\"#da6ee8\"))\n line.add_yaxis(series_name=\"DEA\", y_axis=dea, label_opts=label_not_show_opts, is_symbol_show=False,\n linestyle_opts=opts.LineStyleOpts(opacity=0.8, width=1.0, color=\"#39afe6\"))\n\n chart_macd = chart_macd.overlap(line)\n\n grid0_opts = opts.GridOpts(pos_left=\"0%\", pos_right=\"1%\", pos_top=\"12%\", height=\"58%\")\n grid1_opts = opts.GridOpts(pos_left=\"0%\", pos_right=\"1%\", pos_top=\"74%\", height=\"8%\")\n grid2_opts = opts.GridOpts(pos_left=\"0%\", pos_right=\"1%\", pos_top=\"86%\", height=\"10%\")\n\n grid_chart = Grid(init_opts)\n grid_chart.add(chart_k, grid_opts=grid0_opts)\n grid_chart.add(chart_vol, grid_opts=grid1_opts)\n grid_chart.add(chart_macd, grid_opts=grid2_opts)\n return grid_chart\n\n\n"
] |
[
[
"numpy.array"
]
] |
savatia/social-stock
|
[
"67c5122f16610c07e798a92d379ad31bac111915"
] |
[
"backend/api/management/commands/fetch_sentiment.py"
] |
[
"from optparse import make_option\n\nfrom django.core.management.base import BaseCommand\nimport pandas as pd\nimport django\n\nfrom ...models import Sentiment, Company\n\n\nclass Command(BaseCommand):\n\thelp = \"Whatever you want to print here\"\n\n\tdef add_arguments(self, parser):\n\t\tparser.add_argument('--file')\n\t\tparser.add_argument('--security')\n\n\tdef get_sentiments(self, file_path, security):\n\t\tcompany = Company.objects.get(symbol=security)\n\t\tsentiments_df = pd.read_csv(file_path)\n\t\tfor index, item in sentiments_df.iterrows():\n\t\t\ttry:\n\t\t\t\tSentiment.objects.update_or_create(\n\t\t\t\t\tcompany=company,\n\t\t\t\t\tdate=item.Date,\n\t\t\t\t\tdefaults={\n\t\t\t\t\t\t\"tweet_sentiment\": item.TweetSentiment,\n\t\t\t\t\t\t\"tweet_volume\": item.TweetVolume,\n\t\t\t\t\t\t\"retweet_volume\": item.RTVolume,\n\t\t\t\t\t\t\"retweet_sentiment\": item.RTSentiment,\n\t\t\t\t\t\t\"favorite_sentiment\": item.LikeSentiment,\n\t\t\t\t\t\t\"favorite_volume\": item.LikeVolume\n\t\t\t\t\t}\n\n\t\t\t\t)\n\t\t\texcept django.db.utils.IntegrityError:\n\t\t\t\tpass\n\n\tdef handle(self, *args, **options):\n\t\tfile_path = options['file']\n\t\tsecurity = options['security']\n\t\tprint(file_path)\n\t\tprint(security)\n\t\tself.get_sentiments(file_path, security)\n"
] |
[
[
"pandas.read_csv"
]
] |
DucAnhPhi/NewsSearchEngine
|
[
"de274acc116d0a90b42c3279d4a4bd791867c67f"
] |
[
"test/test_feature_extraction.py"
] |
[
"import pytest\nimport json\nfrom ..embedding.model import EmbeddingModel\nfrom ..feature_extraction import FeatureExtraction\nimport numpy as np\n\nclass TestFeatureExtraction():\n @classmethod\n def setup_class(self):\n self.embedder_DE = EmbeddingModel(lang=\"de\")\n self.embedder_EN = EmbeddingModel(lang=\"en\")\n self.fe_DE = FeatureExtraction(self.embedder_DE, None)\n self.fe_EN = FeatureExtraction(self.embedder_EN, None)\n\n def test_mean_of_pairwise_cosine_distances(self):\n ems = np.array([\n [-1,1,1],\n [-11,3,9],\n [22,0,8]\n ], dtype=float)\n assert abs(0.9770 - FeatureExtraction.mean_of_pairwise_cosine_distances(ems)) < 1e-4\n\n def test_keywords_similarity_DE(self):\n keywords_sim = [\n \"Huhn\",\n \"Ei\",\n \"Vogel\",\n \"Geflügel\"\n ]\n keywords_diff = [\n \"Code\",\n \"Geflügel\",\n \"Siebträger\",\n \"Donald Trump\"\n ]\n ss_sim = self.fe_DE.get_keywords_similarity(keywords_sim)\n ss_diff = self.fe_DE.get_keywords_similarity(keywords_diff)\n assert ss_sim < ss_diff\n\n def test_keywords_similarity_empty_DE(self):\n empty = []\n ss = self.fe_DE.get_keywords_similarity(empty)\n assert ss == 0\n\n def test_keywords_similarity_one_DE(self):\n empty = [\"test\"]\n ss = self.fe_DE.get_keywords_similarity(empty)\n assert ss == 0\n\n def test_keywords_similarity_EN(self):\n keywords_sim = [\n \"Chicken\",\n \"Egg\",\n \"Bird\",\n \"Poultry\"\n ]\n keywords_diff = [\n \"Code\",\n \"Poultry\",\n \"Portafilter\",\n \"Donald Trump\"\n ]\n ss_sim = self.fe_EN.get_keywords_similarity(keywords_sim)\n ss_diff = self.fe_EN.get_keywords_similarity(keywords_diff)\n assert ss_sim < ss_diff\n\n def test_keywords_similarity_empty_EN(self):\n empty = []\n ss = self.fe_EN.get_keywords_similarity(empty)\n assert ss == 0\n\n def test_keywords_similarity_one_EN(self):\n empty = [\"test\"]\n ss = self.fe_EN.get_keywords_similarity(empty)\n assert ss == 0"
] |
[
[
"numpy.array"
]
] |
AurelianTactics/dm_env_rpc
|
[
"08f76b83af759d3f940207c53a1955a2af17198c"
] |
[
"dm_env_rpc/v1/tensor_utils_test.py"
] |
[
"# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tests for dm_env_rpc helper functions.\"\"\"\n\nimport struct\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport mock\nimport numpy as np\n\nfrom google.protobuf import any_pb2\nfrom google.protobuf import struct_pb2\nfrom dm_env_rpc.v1 import dm_env_rpc_pb2\nfrom dm_env_rpc.v1 import tensor_utils\n\n\nclass PackTensorTests(parameterized.TestCase):\n\n @parameterized.parameters(\n (np.float32(2.5), 'floats'),\n (2.5, 'doubles'),\n (np.int32(-25), 'int32s'),\n (np.int64(-25), 'int64s'),\n (np.frombuffer(b'\\xF0\\xF1\\xF2\\xF3', np.uint32)[0], 'uint32s'),\n (np.frombuffer(b'\\xF0\\xF1\\xF2\\xF3\\xF4\\xF5\\xF6\\xF7',\n np.uint64)[0], 'uint64s'),\n (True, 'bools'),\n (False, 'bools'),\n ('foo', 'strings'),\n )\n def test_pack_scalars(self, scalar, expected_payload):\n tensor = tensor_utils.pack_tensor(scalar)\n self.assertEqual([], tensor.shape)\n self.assertEqual([scalar], getattr(tensor, expected_payload).array)\n\n @parameterized.parameters(\n (np.int8(-25), 'b', 'int8s'),\n (np.uint8(250), 'B', 'uint8s'),\n )\n def test_pack_scalar_bytes(self, scalar, fmt, expected_payload):\n tensor = tensor_utils.pack_tensor(scalar)\n self.assertEqual([], tensor.shape)\n actual = struct.unpack(fmt, getattr(tensor, expected_payload).array)\n self.assertEqual(scalar, actual)\n\n def test_pack_scalar_protos(self):\n scalar = struct_pb2.Value(string_value='my message')\n tensor = tensor_utils.pack_tensor(scalar)\n self.assertEqual([], tensor.shape)\n self.assertLen(tensor.protos.array, 1)\n unpacked = struct_pb2.Value()\n self.assertTrue(tensor.protos.array[0].Unpack(unpacked))\n self.assertEqual(scalar, unpacked)\n\n def test_pack_scalar_any_proto(self):\n scalar = struct_pb2.Value(string_value='my message')\n scalar_any = any_pb2.Any()\n scalar_any.Pack(scalar)\n tensor = tensor_utils.pack_tensor(scalar_any)\n self.assertEqual([], tensor.shape)\n self.assertLen(tensor.protos.array, 1)\n unpacked = struct_pb2.Value()\n self.assertTrue(tensor.protos.array[0].Unpack(unpacked))\n self.assertEqual(scalar, unpacked)\n\n @parameterized.parameters(\n (25, np.float32, 'floats'),\n (25, np.float64, 'doubles'),\n (25, np.int32, 'int32s'),\n (25, np.int64, 'int64s'),\n (25, np.uint32, 'uint32s'),\n (25, np.uint64, 'uint64s'),\n (2**64-1, np.uint64, 'uint64s'),\n (True, np.bool, 'bools'),\n (False, np.bool, 'bools'),\n ('foo', np.str, 'strings'),\n )\n def test_pack_scalars_specific_dtype(self, scalar, dtype, expected_payload):\n tensor = tensor_utils.pack_tensor(scalar, dtype)\n self.assertEqual([], tensor.shape)\n self.assertEqual([scalar], getattr(tensor, expected_payload).array)\n\n def test_pack_with_dm_env_rpc_data_type(self):\n tensor = tensor_utils.pack_tensor([5], dm_env_rpc_pb2.DataType.FLOAT)\n self.assertEqual([5], tensor.floats.array)\n\n @parameterized.parameters(\n ([np.int8(-25), np.int8(-23)], '2b', 'int8s'),\n ([np.uint8(249), np.uint8(250)], '2B', 'uint8s'),\n )\n def test_pack_bytes_array(self, scalar, fmt, expected_payload):\n tensor = tensor_utils.pack_tensor(scalar)\n self.assertEqual([2], tensor.shape)\n actual = struct.unpack(fmt, getattr(tensor, expected_payload).array)\n np.testing.assert_array_equal(scalar, actual)\n\n @parameterized.parameters(\n (np.array([1.0, 2.0], dtype=np.float32), 'floats'),\n (np.array([1.0, 2.0], dtype=np.float64), 'doubles'),\n ([1.0, 2.0], 'doubles'),\n (np.array([1, 2], dtype=np.int32), 'int32s'),\n (np.array([1, 2], dtype=np.int64), 'int64s'),\n (np.array([1, 2], dtype=np.uint32), 'uint32s'),\n (np.array([1, 2], dtype=np.uint64), 'uint64s'),\n ([True, False], 'bools'),\n (np.array([True, False]), 'bools'),\n (['foo', 'bar'], 'strings'),\n )\n def test_pack_arrays(self, array, expected_payload):\n tensor = tensor_utils.pack_tensor(array)\n self.assertEqual([2], tensor.shape)\n packed_array = getattr(tensor, expected_payload).array\n np.testing.assert_array_equal(array, packed_array)\n\n def test_pack_proto_arrays(self):\n array = np.array([\n struct_pb2.Value(string_value=message)\n for message in ['foo', 'bar']\n ])\n tensor = tensor_utils.pack_tensor(array)\n self.assertEqual([2], tensor.shape)\n unpacked = struct_pb2.Value()\n tensor.protos.array[0].Unpack(unpacked)\n self.assertEqual(array[0], unpacked)\n tensor.protos.array[1].Unpack(unpacked)\n self.assertEqual(array[1], unpacked)\n\n def test_pack_mixed_proto_array_fails(self):\n with self.assertRaisesRegex(ValueError, 'not recognized'):\n tensor_utils.pack_tensor(np.array([struct_pb2.Value(), 1, 2, 3]))\n\n def test_packed_rowmajor(self):\n array2d = np.array([[1, 2], [3, 4], [5, 6]], dtype=np.int32)\n tensor = tensor_utils.pack_tensor(array2d)\n self.assertEqual([3, 2], tensor.shape)\n np.testing.assert_array_equal([1, 2, 3, 4, 5, 6], tensor.int32s.array)\n\n def test_mixed_scalar_types_raises_exception(self):\n with self.assertRaises(TypeError):\n tensor_utils.pack_tensor(['hello!', 75], dtype=np.float32)\n\n def test_jagged_arrays_throw_exceptions(self):\n with self.assertRaises(ValueError):\n tensor_utils.pack_tensor([[1, 2], [3, 4, 5]])\n\n @parameterized.parameters(\n (['foo', 'bar'], np.str_),\n ('baz', dm_env_rpc_pb2.DataType.STRING),\n (['foobar'], np.array(['foobar']).dtype),\n )\n def test_np_object_strings(self, value, dtype):\n object_array = np.array(value, dtype=np.object)\n tensor = tensor_utils.pack_tensor(object_array, dtype=dtype)\n self.assertEqual(list(object_array.shape), tensor.shape)\n self.assertTrue(tensor.HasField('strings'))\n\n def test_np_object_strings_no_dtype_raises_exception(self):\n with self.assertRaises(ValueError):\n tensor_utils.pack_tensor(np.array(['foo'], dtype=np.object))\n\n @parameterized.parameters(\n (['foo', 42, 'bar'],),\n ([1, 2, 3],),\n )\n def test_np_object_to_strings_fail(self, bad_element):\n with self.assertRaisesRegex(TypeError,\n 'not all elements are Python string types'):\n tensor_utils.pack_tensor(\n np.array(bad_element, dtype=np.object), dtype=np.str_)\n\n def test_class_instance_throw_exception(self):\n\n class Foo(object):\n pass\n\n with self.assertRaises(ValueError):\n tensor_utils.pack_tensor(Foo())\n\n def test_compress_integers_to_1_element_when_all_same(self):\n array = np.array([1, 1, 1, 1, 1, 1], dtype=np.uint32)\n packed = tensor_utils.pack_tensor(array, try_compress=True)\n self.assertEqual([6], packed.shape)\n self.assertEqual([1], packed.uint32s.array)\n\n def test_compress_floats_to_1_element_when_all_same(self):\n array = np.array([1.5, 1.5, 1.5, 1.5, 1.5, 1.5], dtype=np.float32)\n packed = tensor_utils.pack_tensor(array, try_compress=True)\n self.assertEqual([6], packed.shape)\n self.assertEqual([1.5], packed.floats.array)\n\n def test_compress_strings_to_1_element_when_all_same(self):\n array = np.array(['foo', 'foo', 'foo', 'foo'], dtype=np.str_)\n packed = tensor_utils.pack_tensor(array, try_compress=True)\n self.assertEqual([4], packed.shape)\n self.assertEqual(['foo'], packed.strings.array)\n\n def test_compress_multidimensional_arrays_to_1_element_when_all_same(self):\n array = np.array([[4, 4], [4, 4]], dtype=np.int32)\n packed = tensor_utils.pack_tensor(array, try_compress=True)\n self.assertEqual([2, 2], packed.shape)\n self.assertEqual([4], packed.int32s.array)\n\n def test_doesnt_compress_if_not_asked_to(self):\n array = np.array([1, 1, 1, 1, 1, 1], dtype=np.uint32)\n packed = tensor_utils.pack_tensor(array)\n self.assertEqual([6], packed.shape)\n self.assertEqual([1, 1, 1, 1, 1, 1], packed.uint32s.array)\n\n def test_ask_to_compress_but_cant(self):\n array = np.array([1, 1, 2, 1, 1, 1], dtype=np.uint32)\n packed = tensor_utils.pack_tensor(array, try_compress=True)\n self.assertEqual([6], packed.shape)\n self.assertEqual([1, 1, 2, 1, 1, 1], packed.uint32s.array)\n\n\nclass UnpackTensorTests(parameterized.TestCase):\n\n @parameterized.parameters(\n np.float32(2.5),\n np.float64(2.5),\n np.int8(-25),\n np.int32(-25),\n np.int64(-25),\n np.uint8(250),\n np.frombuffer(b'\\xF0\\xF1\\xF2\\xF3', np.uint32)[0],\n np.frombuffer(b'\\xF0\\xF1\\xF2\\xF3\\xF4\\xF5\\xF6\\xF7', np.uint64)[0],\n True,\n False,\n 'foo',\n )\n def test_unpack_scalars(self, scalar):\n tensor = tensor_utils.pack_tensor(scalar)\n round_trip = tensor_utils.unpack_tensor(tensor)\n self.assertEqual(scalar, round_trip)\n\n def test_unpack_scalar_proto(self):\n scalar = struct_pb2.Value(string_value='my message')\n tensor = tensor_utils.pack_tensor(scalar)\n\n unpacked = struct_pb2.Value()\n tensor_utils.unpack_tensor(tensor).Unpack(unpacked)\n self.assertEqual(scalar, unpacked)\n\n @parameterized.parameters(\n ([np.float32(2.5), np.float32(3.5)],),\n ([np.float64(2.5), np.float64(3.5)],),\n ([np.int8(-25), np.int8(-23)],),\n ([np.int32(-25), np.int32(-23)],),\n ([np.int64(-25), np.int64(-23)],),\n ([np.uint8(250), np.uint8(249)],),\n ([np.uint32(1), np.uint32(2)],),\n ([np.uint64(1), np.uint64(2)],),\n ([True, False],),\n (['foo', 'bar'],),\n )\n def test_unpack_arrays(self, array):\n tensor = tensor_utils.pack_tensor(array)\n round_trip = tensor_utils.unpack_tensor(tensor)\n np.testing.assert_array_equal(array, round_trip)\n\n def test_unpack_proto_arrays(self):\n array = np.array([\n struct_pb2.Value(string_value=message)\n for message in ['foo', 'bar']\n ])\n tensor = tensor_utils.pack_tensor(array)\n round_trip = tensor_utils.unpack_tensor(tensor)\n\n unpacked = struct_pb2.Value()\n round_trip[0].Unpack(unpacked)\n self.assertEqual(array[0], unpacked)\n round_trip[1].Unpack(unpacked)\n self.assertEqual(array[1], unpacked)\n\n def test_unpack_multidimensional_arrays(self):\n tensor = dm_env_rpc_pb2.Tensor()\n tensor.floats.array[:] = [1, 2, 3, 4, 5, 6, 7, 8]\n tensor.shape[:] = [2, 4]\n round_trip = tensor_utils.unpack_tensor(tensor)\n expected = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])\n np.testing.assert_array_equal(expected, round_trip)\n\n def test_too_few_elements(self):\n tensor = dm_env_rpc_pb2.Tensor()\n tensor.floats.array[:] = [1, 2, 3, 4]\n tensor.shape[:] = [2, 4]\n with self.assertRaisesRegex(ValueError, 'cannot reshape array'):\n tensor_utils.unpack_tensor(tensor)\n\n def test_too_many_elements(self):\n tensor = dm_env_rpc_pb2.Tensor()\n tensor.floats.array[:] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n tensor.shape[:] = [2, 4]\n with self.assertRaisesRegex(ValueError, 'cannot reshape array'):\n tensor_utils.unpack_tensor(tensor)\n\n def test_float_broadcasts_1_element_to_all_elements(self):\n tensor = dm_env_rpc_pb2.Tensor()\n tensor.floats.array[:] = [1]\n tensor.shape[:] = [4]\n unpacked = tensor_utils.unpack_tensor(tensor)\n expected = np.array([1, 1, 1, 1], dtype=np.float32)\n np.testing.assert_array_equal(expected, unpacked)\n\n def test_integer_broadcasts_1_element_to_all_elements(self):\n tensor = dm_env_rpc_pb2.Tensor()\n tensor.int32s.array[:] = [1]\n tensor.shape[:] = [4]\n unpacked = tensor_utils.unpack_tensor(tensor)\n expected = np.array([1, 1, 1, 1], dtype=np.int32)\n np.testing.assert_array_equal(expected, unpacked)\n\n def test_unsigned_integer_broadcasts_1_element_to_all_elements(self):\n tensor = dm_env_rpc_pb2.Tensor()\n tensor.uint8s.array = b'\\x01'\n tensor.shape[:] = [4]\n unpacked = tensor_utils.unpack_tensor(tensor)\n expected = np.array([1, 1, 1, 1], dtype=np.uint8)\n np.testing.assert_array_equal(expected, unpacked)\n\n def test_string_broadcasts_1_element_to_all_elements(self):\n tensor = dm_env_rpc_pb2.Tensor()\n tensor.strings.array[:] = ['foo']\n tensor.shape[:] = [4]\n unpacked = tensor_utils.unpack_tensor(tensor)\n expected = np.array(['foo', 'foo', 'foo', 'foo'], dtype=np.str_)\n np.testing.assert_array_equal(expected, unpacked)\n\n def test_broadcasts_to_multidimensional_arrays(self):\n tensor = dm_env_rpc_pb2.Tensor()\n tensor.int32s.array[:] = [4]\n tensor.shape[:] = [2, 2]\n unpacked = tensor_utils.unpack_tensor(tensor)\n expected = np.array([[4, 4], [4, 4]], dtype=np.int32)\n np.testing.assert_array_equal(expected, unpacked)\n\n def test_negative_dimension(self):\n tensor = dm_env_rpc_pb2.Tensor()\n tensor.int32s.array[:] = [1, 2, 3, 4]\n tensor.shape[:] = [-1]\n unpacked = tensor_utils.unpack_tensor(tensor)\n expected = np.array([1, 2, 3, 4], dtype=np.int32)\n np.testing.assert_array_equal(expected, unpacked)\n\n def test_negative_dimension_in_matrix(self):\n tensor = dm_env_rpc_pb2.Tensor()\n tensor.int32s.array[:] = [1, 2, 3, 4, 5, 6]\n tensor.shape[:] = [2, -1]\n unpacked = tensor_utils.unpack_tensor(tensor)\n expected = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32)\n np.testing.assert_array_equal(expected, unpacked)\n\n def test_two_negative_dimensions_in_matrix(self):\n tensor = dm_env_rpc_pb2.Tensor()\n tensor.int32s.array[:] = [1, 2, 3, 4, 5, 6]\n tensor.shape[:] = [-1, -2]\n with self.assertRaisesRegex(ValueError, 'one unknown dimension'):\n tensor_utils.unpack_tensor(tensor)\n\n def test_negative_dimension_single_element(self):\n tensor = dm_env_rpc_pb2.Tensor()\n tensor.int32s.array[:] = [1]\n tensor.shape[:] = [-1]\n unpacked = tensor_utils.unpack_tensor(tensor)\n expected = np.array([1], dtype=np.int32)\n np.testing.assert_array_equal(expected, unpacked)\n\n def test_unknown_type_raises_error(self):\n tensor = mock.MagicMock()\n tensor.WhichOneof.return_value = 'foo'\n with self.assertRaisesRegex(TypeError, 'type foo'):\n tensor_utils.unpack_tensor(tensor)\n\n def test_scalar_with_too_many_elements_raises_error(self):\n tensor = dm_env_rpc_pb2.Tensor()\n tensor.int32s.array[:] = [1, 2, 3]\n with self.assertRaisesRegex(ValueError, '3 element'):\n tensor_utils.unpack_tensor(tensor)\n\n\nclass GetTensorTypeTests(absltest.TestCase):\n\n def test_float(self):\n tensor = tensor_utils.pack_tensor(1.25)\n self.assertEqual(np.float64, tensor_utils.get_tensor_type(tensor))\n\n def test_unknown_tensor_type(self):\n mock_tensor = mock.MagicMock()\n mock_tensor.WhichOneof.return_value = 'foo'\n with self.assertRaisesRegex(TypeError, 'foo'):\n tensor_utils.get_tensor_type(mock_tensor)\n\n\nclass DataTypeToNpTypeTests(absltest.TestCase):\n\n def test_float(self):\n self.assertEqual(\n np.float32,\n tensor_utils.data_type_to_np_type(dm_env_rpc_pb2.DataType.FLOAT))\n\n def test_empty_object_list(self):\n tensor = tensor_utils.pack_tensor(np.array([], dtype=np.object))\n self.assertEqual([0], tensor.shape)\n\n def test_unknown_type(self):\n with self.assertRaises(TypeError):\n tensor_utils.data_type_to_np_type(30) # pytype: disable=wrong-arg-types\n\n\nclass NpTypeToDataTypeTests(absltest.TestCase):\n\n def test_float32(self):\n self.assertEqual(\n dm_env_rpc_pb2.DataType.FLOAT,\n tensor_utils.np_type_to_data_type(np.float32))\n\n def test_int32(self):\n self.assertEqual(\n dm_env_rpc_pb2.DataType.INT32,\n tensor_utils.np_type_to_data_type(np.int32))\n\n def test_dtype(self):\n self.assertEqual(\n dm_env_rpc_pb2.DataType.INT32,\n tensor_utils.np_type_to_data_type(np.dtype(np.int32)))\n\n def test_unknown_type(self):\n with self.assertRaisesRegex(TypeError, 'dm_env_rpc DataType.*complex64'):\n tensor_utils.np_type_to_data_type(np.complex64)\n\n\nclass GetPackerTests(absltest.TestCase):\n\n def test_cannot_get_packer_for_invalid_type(self):\n with self.assertRaisesRegex(TypeError, 'complex64'):\n tensor_utils.get_packer(np.complex64)\n\n def test_can_pack(self):\n packer = tensor_utils.get_packer(np.int32)\n tensor = dm_env_rpc_pb2.Tensor()\n packer.pack(tensor, np.asarray([1, 2, 3], dtype=np.int32))\n self.assertEqual([1, 2, 3], tensor.int32s.array)\n\n def test_can_unpack(self):\n packer = tensor_utils.get_packer(np.int32)\n tensor = dm_env_rpc_pb2.Tensor()\n tensor.int32s.array[:] = [1, 2, 3]\n np.testing.assert_array_equal([1, 2, 3], packer.unpack(tensor))\n\n\nif __name__ == '__main__':\n absltest.main()\n"
] |
[
[
"numpy.uint32",
"numpy.asarray",
"numpy.uint8",
"numpy.int32",
"numpy.int8",
"numpy.dtype",
"numpy.testing.assert_array_equal",
"numpy.int64",
"numpy.frombuffer",
"numpy.uint64",
"numpy.float64",
"numpy.float32",
"numpy.array"
]
] |
ShiNik/DeepLearning_Tensorflow
|
[
"a7faf8decc72b6c7b6e3ca08226ca33f792a82ab"
] |
[
"dogs_vs_cats/src/data_prepration.py"
] |
[
"from util import get_path\nfrom util import get_index_by_image_name\n\nimport random\nimport numpy as np\nfrom numpy import save\nimport os\nfrom os import listdir\nimport cv2\nimport keras\nfrom sklearn.model_selection import train_test_split\n\n\n\ndef split_and_save(config, data,labels):\n print(\"Pre-processing data!\")\n (x_train, x_test, y_train, y_test) = train_test_split(data, labels,\n test_size=0.25,\n stratify=labels,\n random_state=42,\n shuffle=True)\n\n #todo:remove it from here, add it to when you do training since we might need it for embbeding\n y_train = keras.utils.to_categorical(y_train, config.num_classes)\n y_test = keras.utils.to_categorical(y_test, config.num_classes)\n\n # save the reshaped photos\n x_train_path = get_path(config.data_path_root, config.x_train_path)\n save(x_train_path, x_train)\n\n y_train_path = get_path(config.data_path_root, config.y_train_path)\n save(y_train_path, y_train)\n\n x_test_path = get_path(config.data_path_root, config.x_test_path)\n save(x_test_path, x_test)\n\n y_test_path = get_path(config.data_path_root, config.y_test_path)\n save(y_test_path, y_test)\n\ndef prepare_data(data_dir, config):\n print(\"Start preparing data!\")\n image_paths = []\n\n # enumerate files in the directory\n for file in listdir(data_dir):\n image_path = get_path(data_dir, file) # create path to dogs and cats\n image_paths.append(image_path)\n\n random.seed(42)\n random.shuffle(image_paths)\n data, labels = list(), list()\n\n for image_path in image_paths:\n image = cv2.imread(image_path)\n image = cv2.resize(image, (config.image_size, config.image_size))\n data.append(image)\n\n # determine class\n image_name = os.path.basename(image_path)\n index = get_index_by_image_name(config, image_name)\n if index == -1:\n print(\"unrecognizede label for image:\" + image_path)\n exit(-1)\n\n output = float(index)\n labels.append(output)\n\n if config.small_data_set_size > 0 and (len(labels) == config.small_data_set_size):\n break\n\n # scale the raw pixel intensities to the range [0, 1]\n data = np.array(data, dtype=\"float\") / 255.0\n labels = np.array(labels)\n\n split_and_save(config, data, labels)\n\n print(\"Data preparation completed!\")\n return data, labels\n\n"
] |
[
[
"numpy.array",
"sklearn.model_selection.train_test_split",
"numpy.save"
]
] |
sjmielke/addons
|
[
"f126f9183e9dd62e78ac1cea3d4213626c1ea4d1"
] |
[
"tensorflow_addons/image/distance_transform.py"
] |
[
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Distance transform ops.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nfrom tensorflow_addons.utils.resource_loader import get_path_to_datafile\n\n_image_ops_so = tf.load_op_library(\n get_path_to_datafile(\"custom_ops/image/_image_ops.so\"))\n\ntf.no_gradient(\"EuclideanDistanceTransform\")\n\n\[email protected]\ndef euclidean_dist_transform(images, dtype=tf.float32, name=None):\n \"\"\"Applies euclidean distance transform(s) to the image(s).\n\n Args:\n images: A tensor of shape (num_images, num_rows, num_columns, 1) (NHWC),\n or (num_rows, num_columns, 1) (HWC). The rank must be statically known\n (the shape is not `TensorShape(None)`.\n dtype: DType of the output tensor.\n name: The name of the op.\n\n Returns:\n Image(s) with the type `dtype` and same shape as `images`, with the\n transform applied. If a tensor of all ones is given as input, the\n output tensor will be filled with the max value of the `dtype`.\n\n Raises:\n TypeError: If `image` is not tf.uint8, or `dtype` is not floating point.\n ValueError: If `image` more than one channel, or `image` is not of\n rank 3 or 4.\n \"\"\"\n\n with tf.name_scope(name or \"euclidean_distance_transform\"):\n image_or_images = tf.convert_to_tensor(images, name=\"images\")\n\n if image_or_images.dtype.base_dtype != tf.uint8:\n raise TypeError(\n \"Invalid dtype %s. Expected uint8.\" % image_or_images.dtype)\n if image_or_images.get_shape().ndims is None:\n raise ValueError(\"`images` rank must be statically known\")\n elif len(image_or_images.get_shape()) == 3:\n images = image_or_images[None, :, :, :]\n elif len(image_or_images.get_shape()) == 4:\n images = image_or_images\n else:\n raise ValueError(\"`images` should have rank between 3 and 4\")\n\n if images.get_shape()[3] != 1 and images.get_shape()[3] is not None:\n raise ValueError(\"`images` must have only one channel\")\n\n if dtype not in [tf.float16, tf.float32, tf.float64]:\n raise TypeError(\"`dtype` must be float16, float32 or float64\")\n\n images = tf.cast(images, dtype)\n output = _image_ops_so.euclidean_distance_transform(images)\n\n if len(image_or_images.get_shape()) == 3:\n return output[0, :, :, :]\n return output\n"
] |
[
[
"tensorflow.convert_to_tensor",
"tensorflow.no_gradient",
"tensorflow.cast",
"tensorflow.name_scope"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.