repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
abcnishant007/sklearn-evaluation
[ "77ff2da43097b0451d8cf6f95c534409f612bf6a" ]
[ "tests/test_metrics.py" ]
[ "from unittest import TestCase\nfrom sklearn_evaluation.metrics import (precision_at, labels_at,\n tp_at, fp_at)\n\nimport numpy as np\nfrom numpy import nan\n\n\nclass Test_precision_at(TestCase):\n\n def test_perfect_precision(self):\n labels = np.array([1, 1, 1, 1, 1, 0, 0, 0, 0, 0])\n scores = np.array([100, 90, 80, 70, 60, 50, 40, 30, 20, 10])\n prec, cutoff = precision_at(labels, scores, top_proportion=0.10)\n self.assertEqual(prec, 1.0)\n self.assertEqual(cutoff, 100)\n\n def test_perfect_precision_with_nas(self):\n labels = np.array([1, nan, 1, 1, 1, nan, 0, 0, 0, 0])\n scores = np.array([100, 90, 80, 70, 60, 50, 40, 30, 20, 10])\n prec, cutoff = precision_at(\n labels, scores, top_proportion=0.10, ignore_nas=True)\n self.assertEqual(prec, 1.0)\n self.assertEqual(cutoff, 100)\n\n def test_baseline_precision(self):\n labels = np.array([1, 1, 1, 1, 1, 0, 0, 0, 0, 0])\n scores = np.array([100, 90, 80, 70, 60, 50, 40, 30, 20, 10])\n prec, cutoff = precision_at(labels, scores, top_proportion=1.0)\n self.assertEqual(prec, 0.5)\n self.assertEqual(cutoff, 10)\n\n def test_baseline_precision_with_nas(self):\n labels = np.array([nan, 1, nan, 1, 1, nan, nan, 0, 0, 0])\n scores = np.array([100, 90, 80, 70, 60, 50, 40, 30, 20, 10])\n prec, cutoff = precision_at(\n labels, scores, top_proportion=1.0, ignore_nas=True)\n self.assertEqual(prec, 0.5)\n self.assertEqual(cutoff, 10)\n\n def test_proportion_less_than_zero(self):\n self.assertRaises(ValueError, precision_at, [1], [0], -0.1)\n\n def test_proportion_more_than_one(self):\n self.assertRaises(ValueError, precision_at, [1], [0], top_proportion=1.1)\n\n\nclass Test_labels_at(TestCase):\n\n def test_no_labels_at_1(self):\n y_true = np.array([nan, nan, nan, nan, nan, nan, nan, nan, nan, nan])\n y_score = np.random.rand(1, 10)\n labels = labels_at(y_true, y_score, top_proportion=0.01, normalize=False)\n self.assertEqual(labels, 0)\n\n def test_no_labels_at_50(self):\n y_true = np.array([nan, nan, nan, nan, nan, nan, nan, nan, nan, nan])\n y_score = np.random.rand(1, 10)\n labels = labels_at(y_true, y_score, top_proportion=0.5, normalize=False)\n self.assertEqual(labels, 0)\n\n def test_no_labels_at_100(self):\n y_true = np.array([nan, nan, nan, nan, nan, nan, nan, nan, nan, nan])\n y_score = np.random.rand(1, 10)\n labels = labels_at(y_true, y_score, top_proportion=1.0, normalize=False)\n self.assertEqual(labels, 0)\n\n def test_one_label_at_10(self):\n y_true = np.array([1, nan, nan, nan, nan, nan, nan, nan, nan, nan])\n y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])\n labels = labels_at(y_true, y_score, top_proportion=0.1, normalize=False)\n self.assertEqual(labels, 1)\n\n def test_one_label_at_10_norm(self):\n y_true = np.array([1, nan, nan, nan, nan, nan, nan, nan, nan, nan])\n y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])\n labels = labels_at(y_true, y_score, top_proportion=0.1, normalize=True)\n self.assertEqual(labels, 1.0)\n\n def test_one_label_at_50(self):\n y_true = np.array([1, nan, nan, nan, nan, nan, nan, nan, nan, nan])\n y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])\n labels = labels_at(y_true, y_score, top_proportion=0.5, normalize=False)\n self.assertEqual(labels, 1)\n\n def test_one_label_at_100(self):\n y_true = np.array([1, nan, nan, nan, nan, nan, nan, nan, nan, nan])\n y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])\n labels = labels_at(y_true, y_score, top_proportion=1.0, normalize=False)\n self.assertEqual(labels, 1)\n\n def test_60_labels_at_60(self):\n y_true = np.array([1, 1, 1, 1, 1, 1, nan, nan, nan, nan])\n y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])\n labels = labels_at(y_true, y_score, top_proportion=0.6, normalize=False)\n self.assertEqual(labels, 6)\n\n def test_60_labels_at_60_norm(self):\n y_true = np.array([1, 1, 1, 1, 1, 1, nan, nan, nan, nan])\n y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])\n labels = labels_at(y_true, y_score, top_proportion=0.6, normalize=True)\n self.assertEqual(labels, 1.0)\n\n def test_60_labels_at_60_mixed_values(self):\n y_true = np.array([1, 0, 0, 1, 0, 1, nan, nan, nan, nan])\n y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])\n labels = labels_at(y_true, y_score, top_proportion=0.6, normalize=False)\n self.assertEqual(labels, 6)\n\n def test_60_labels_at_60_norm_mixed_values(self):\n y_true = np.array([0, 0, 0, 1, 0, 1, nan, nan, nan, nan])\n y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])\n labels = labels_at(y_true, y_score, top_proportion=0.6, normalize=True)\n self.assertEqual(labels, 1.0)\n\n def test_60_labels_at_30(self):\n y_true = np.array([1, 1, 1, 1, 1, 1, nan, nan, nan, nan])\n y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])\n labels = labels_at(y_true, y_score, top_proportion=0.3, normalize=False)\n self.assertEqual(labels, 3)\n\n def test_60_labels_at_30_norm(self):\n y_true = np.array([1, 1, 1, 1, 1, 1, nan, nan, nan, nan])\n y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])\n labels = labels_at(y_true, y_score, top_proportion=0.3, normalize=True)\n self.assertEqual(labels, 0.5)\n\n def test_proportion_less_than_zero(self):\n self.assertRaises(ValueError, labels_at, [1], [0], -0.1)\n\n def test_proportion_more_than_one(self):\n self.assertRaises(ValueError, labels_at, [1], [0], top_proportion=1.1)\n\n\nclass Test_tp_at(TestCase):\n\n def test_with_nas(self):\n y_true = np.array([1, nan, 1, 1, 1, 1, 1, 1, 1, nan])\n y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])\n tps = tp_at(y_true, y_score, top_proportion=0.1)\n self.assertEqual(tps, 1)\n\n def test_all_tp_at_10(self):\n y_true = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1])\n y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])\n tps = tp_at(y_true, y_score, top_proportion=0.1)\n self.assertEqual(tps, 1)\n\n def test_all_tp_at_50(self):\n y_true = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1])\n y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])\n tps = tp_at(y_true, y_score, top_proportion=0.5)\n self.assertEqual(tps, 5)\n\n def test_all_tp_at_100(self):\n y_true = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1])\n y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])\n tps = tp_at(y_true, y_score, top_proportion=1.0)\n self.assertEqual(tps, 10)\n\n def test_no_tp_at_50(self):\n y_true = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])\n tps = tp_at(y_true, y_score, top_proportion=0.5)\n self.assertEqual(tps, 0)\n\n def test_no_tp_at_100(self):\n y_true = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])\n tps = tp_at(y_true, y_score, top_proportion=1.0)\n self.assertEqual(tps, 0)\n\n def test_some_tp_at_10(self):\n y_true = np.array([1, 0, 0, 0, 0, 0, 0, 1, 1, 1])\n y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])\n tps = tp_at(y_true, y_score, top_proportion=0.1)\n self.assertEqual(tps, 1)\n\n def test_some_tp_at_50(self):\n y_true = np.array([1, 1, 0, 0, 1, 0, 0, 1, 1, 0])\n y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])\n tps = tp_at(y_true, y_score, top_proportion=0.5)\n self.assertEqual(tps, 3)\n\n def test_some_tp_at_100(self):\n y_true = np.array([0, 0, 0, 0, 1, 0, 0, 1, 1, 1])\n y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])\n tps = tp_at(y_true, y_score, top_proportion=1.0)\n self.assertEqual(tps, 4)\n\n def test_proportion_less_than_zero(self):\n self.assertRaises(ValueError, tp_at, [1], [0], -0.1)\n\n def test_proportion_more_than_one(self):\n self.assertRaises(ValueError, tp_at, [1], [0], top_proportion=1.1)\n\n\nclass Test_fp_at(TestCase):\n\n def test_with_nas(self):\n y_true = np.array([0, nan, 1, 1, 1, 1, 1, 1, 1, nan])\n y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])\n fps = fp_at(y_true, y_score, top_proportion=0.1)\n self.assertEqual(fps, 1)\n\n def test_all_fp_at_10(self):\n y_true = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])\n fps = fp_at(y_true, y_score, top_proportion=0.1)\n self.assertEqual(fps, 1)\n\n def test_all_fp_at_50(self):\n y_true = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])\n fps = fp_at(y_true, y_score, top_proportion=0.5)\n self.assertEqual(fps, 5)\n\n def test_all_fp_at_100(self):\n y_true = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])\n fps = fp_at(y_true, y_score, top_proportion=1.0)\n self.assertEqual(fps, 10)\n\n def test_no_fp_at_50(self):\n y_true = np.array([1, 1, 1, 1, 1, 0, 0, 0, 0, 0])\n y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])\n fps = fp_at(y_true, y_score, top_proportion=0.5)\n self.assertEqual(fps, 0)\n\n def test_no_fp_at_100(self):\n y_true = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1])\n y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])\n fps = fp_at(y_true, y_score, top_proportion=1.0)\n self.assertEqual(fps, 0)\n\n def test_some_fp_at_10(self):\n y_true = np.array([0, 0, 0, 0, 0, 0, 0, 1, 1, 1])\n y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])\n fps = fp_at(y_true, y_score, top_proportion=0.1)\n self.assertEqual(fps, 1)\n\n def test_some_fp_at_50(self):\n y_true = np.array([1, 1, 0, 0, 1, 0, 0, 1, 1, 0])\n y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])\n fps = fp_at(y_true, y_score, top_proportion=0.5)\n self.assertEqual(fps, 2)\n\n def test_some_fp_at_100(self):\n y_true = np.array([0, 0, 0, 0, 1, 0, 0, 1, 1, 1])\n y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])\n fps = fp_at(y_true, y_score, top_proportion=1.0)\n self.assertEqual(fps, 6)\n\n def test_proportion_less_than_zero(self):\n self.assertRaises(ValueError, fp_at, [1], [0], -0.1)\n\n def test_proportion_more_than_one(self):\n self.assertRaises(ValueError, fp_at, [1], [0], top_proportion=1.1)\n" ]
[ [ "numpy.array", "numpy.random.rand" ] ]
ronaldseoh/GRACE
[ "ab32a79677ed6dd5dfcfb14aaa4d6422ff48675c" ]
[ "ate_run.py" ]
[ "from __future__ import absolute_import, division, print_function\n\nimport argparse\nimport logging\nimport os\nimport random\nimport time\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom file_utils import PYTORCH_PRETRAINED_BERT_CACHE\nfrom ate_modeling import BertForSequenceLabeling\nfrom optimization import BertAdam\nfrom tokenization import BertTokenizer\nfrom seqeval.metrics import f1_score, precision_score, recall_score, accuracy_score\nfrom torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset)\nfrom torch.utils.data.distributed import DistributedSampler\nfrom tqdm import tqdm\n\nfrom ate_features import ATEProcessor, convert_examples_to_features, get_labels\nfrom utils import get_logger, get_aspect_chunks\n\ndef warmup_linear(x, warmup=0.002):\n if x < warmup:\n return x / warmup\n return max(0, (1.0 - x) / (1.0 - warmup))\n\ndef parse_input_parameter():\n global logger\n parser = argparse.ArgumentParser()\n\n ## Required parameters\n parser.add_argument(\"--data_dir\", default=None, type=str, required=True,\n help=\"The input data dir. Should contain the .tsv files (or other data files) for the task.\")\n parser.add_argument(\"--bert_model\", default=None, type=str, required=True,\n help=\"Bert pre-trained model selected in the list: bert-base-uncased, bert-large-uncased.\")\n parser.add_argument(\"--init_model\", default=None, type=str, required=False, help=\"Initial model.\")\n parser.add_argument(\"--task_name\", default=\"ate\", type=str, required=False, help=\"The name of the task to train.\")\n parser.add_argument(\"--data_name\", default=\"\", type=str, required=False, help=\"The name of the task to train.\")\n parser.add_argument(\"--train_file\", default=None, type=str, required=False)\n parser.add_argument(\"--valid_file\", default=None, type=str, required=False)\n parser.add_argument(\"--test_file\", default=None, type=str, required=False)\n parser.add_argument(\"--output_dir\", default=None, type=str, required=True,\n help=\"The output directory where the model predictions and checkpoints will be written.\")\n\n ## Other parameters\n parser.add_argument(\"--cache_dir\", default=\"\", type=str,\n help=\"Where do you want to store the pre-trained models downloaded from s3\")\n parser.add_argument(\"--max_seq_length\", default=128, type=int,\n help=\"The maximum total input sequence length after WordPiece tokenization. \\nSequences longer than this will be truncated, and sequences shorter \\nthan this will be padded.\")\n parser.add_argument(\"--do_train\", action='store_true', help=\"Whether to run training.\")\n parser.add_argument(\"--do_eval\", action='store_true', help=\"Whether to run eval on the dev set.\")\n parser.add_argument(\"--do_test\", action='store_true', help=\"Whether to run eval on the TEST set.\")\n parser.add_argument(\"--do_lower_case\", action='store_true', help=\"Set this flag if you are using an uncased model.\")\n parser.add_argument(\"--train_batch_size\", default=32, type=int, help=\"Total batch size for training.\")\n parser.add_argument('--gradient_accumulation_steps', type=int, default=1,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\")\n parser.add_argument(\"--eval_batch_size\", default=128, type=int, help=\"Total batch size for eval.\")\n parser.add_argument(\"--learning_rate\", default=5e-5, type=float, help=\"The initial learning rate for Adam.\")\n parser.add_argument(\"--num_train_epochs\", default=3, type=int, help=\"Total number of training epochs to perform.\")\n parser.add_argument(\"--warmup_proportion\", default=0.1, type=float,\n help=\"Proportion of training to perform linear learning rate warmup for. E.g., 0.1 = 10%% of training.\")\n parser.add_argument('--num_thread_reader', type=int, default=0, help='')\n parser.add_argument(\"--no_cuda\", action='store_true', help=\"Whether not to use CUDA when available\")\n parser.add_argument(\"--local_rank\", type=int, default=-1, help=\"local_rank for distributed training on gpus\")\n parser.add_argument('--seed', type=int, default=42, help=\"random seed for initialization\")\n parser.add_argument('--fp16', action='store_true', help=\"Whether to use 16-bit float precision instead of 32-bit\")\n parser.add_argument('--loss_scale', type=float, default=0,\n help=\"Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\\n0 (default value): dynamic loss scaling.\\nPositive power of 2: static loss scaling value.\\n\")\n parser.add_argument(\"--verbose_logging\", default=False, action='store_true',\n help=\"If true, all of the warnings related to data processing will be printed. A number of warnings are expected for a normal CoQA evaluation.\")\n parser.add_argument('--server_ip', type=str, default='', help=\"Can be used for distant debugging.\");\n parser.add_argument('--server_port', type=str, default='', help=\"Can be used for distant debugging.\")\n\n parser.add_argument(\"--use_ghl\", action='store_true', help=\"Whether use weighted cross entropy to decoder.\")\n parser.add_argument(\"--use_vat\", action='store_true', help=\"Whether use vat to encoder.\")\n\n args = parser.parse_args()\n\n if args.gradient_accumulation_steps < 1:\n raise ValueError(\"Invalid gradient_accumulation_steps parameter: {}, should be >= 1\".format(\n args.gradient_accumulation_steps))\n if not args.do_train and not args.do_test:\n raise ValueError(\"At least one of `do_train` or `do_test` must be True.\")\n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir, exist_ok=True)\n\n logger = get_logger(os.path.join(args.output_dir, \"log.txt\"))\n\n logger.info(\"Effective parameters:\")\n for key in sorted(args.__dict__):\n logger.info(\" {}: {}\".format(key, args.__dict__[key]))\n\n args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps\n\n task_config = {\n \"use_ghl\": args.use_ghl,\n \"use_vat\": args.use_vat,\n }\n\n return args, task_config\n\ndef init_device(args):\n if args.server_ip and args.server_port:\n # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script\n import ptvsd\n print(\"Waiting for debugger attach\")\n ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)\n ptvsd.wait_for_attach()\n\n if args.local_rank == -1 or args.no_cuda:\n device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n n_gpu = torch.cuda.device_count()\n else:\n torch.cuda.set_device(args.local_rank)\n device = torch.device(\"cuda\", args.local_rank)\n n_gpu = 1\n # Initializes the distributed backend which will take care of sychronizing nodes/GPUs\n torch.distributed.init_process_group(backend='nccl')\n logger.info(\"device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}\".format(\n device, n_gpu, bool(args.local_rank != -1), args.fp16))\n\n return device, n_gpu\n\ndef init_model(args, num_labels, task_config, device, n_gpu):\n\n if args.init_model:\n model_state_dict = torch.load(args.init_model, map_location='cpu')\n if \"model_state_dict\" in model_state_dict:\n model_state_dict = model_state_dict['model_state_dict']\n else:\n model_state_dict = None\n\n # Prepare model\n cache_dir = args.cache_dir if args.cache_dir else \\\n os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed_{}'.format(args.local_rank))\n model = BertForSequenceLabeling.from_pretrained(args.bert_model, cache_dir=cache_dir, state_dict=model_state_dict,\n num_labels=num_labels, task_config=task_config)\n if args.fp16:\n model.half()\n model.to(device)\n if args.local_rank != -1:\n try:\n from apex.parallel import DistributedDataParallel as DDP\n except ImportError:\n raise ImportError(\n \"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.\")\n\n model = DDP(model)\n elif n_gpu > 1:\n model = torch.nn.DataParallel(model)\n return model\n\ndef prep_optimizer(args, model, num_train_optimization_steps):\n param_optimizer = list(model.named_parameters())\n no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']\n\n optimizer_grouped_parameters = [\n {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},\n {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n ]\n\n if args.fp16:\n try:\n from apex.optimizers import FP16_Optimizer\n from apex.optimizers import FusedAdam\n except ImportError:\n raise ImportError(\n \"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.\")\n\n optimizer = FusedAdam(optimizer_grouped_parameters, lr=args.learning_rate, bias_correction=False, max_grad_norm=1.0)\n if args.loss_scale == 0:\n optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)\n else:\n optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)\n\n else:\n optimizer = BertAdam(optimizer_grouped_parameters, lr=args.learning_rate, warmup=args.warmup_proportion,\n t_total=num_train_optimization_steps)\n return optimizer\n\ndef dataloader_train(args, tokenizer, file_path):\n dataset = ATEProcessor(file_path=file_path, set_type=\"train\")\n logger.info(\"Loaded train file: {}\".format(file_path))\n labels = get_labels(dataset.label_list)\n\n features = convert_examples_to_features(dataset.examples, labels,\n args.max_seq_length, tokenizer,\n verbose_logging=args.verbose_logging)\n\n all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)\n all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)\n all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)\n all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long)\n\n train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)\n if args.local_rank == -1:\n train_sampler = RandomSampler(train_data)\n else:\n train_sampler = DistributedSampler(train_data)\n dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size, num_workers=args.num_thread_reader)\n\n return dataloader, train_data, labels\n\ndef dataloader_val(args, tokenizer, file_path, labels, set_type=\"val\"):\n\n dataset = ATEProcessor(file_path=file_path, set_type=set_type)\n logger.info(\"Loaded eval file: {}\".format(file_path))\n\n eval_features = convert_examples_to_features(dataset.examples, labels,\n args.max_seq_length, tokenizer,\n verbose_logging=args.verbose_logging)\n\n all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)\n all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)\n all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)\n all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)\n eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)\n\n # Run prediction for full data\n eval_sampler = SequentialSampler(eval_data)\n eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)\n\n return eval_dataloader, eval_data\n\ndef train_epoch(epoch, args, model, train_dataloader, device, n_gpu, tokenizer, optimizer, global_step, num_train_optimization_steps):\n global logger\n torch.cuda.empty_cache()\n model.train()\n log_step = 100\n start_time = time.time()\n tr_loss = 0\n nb_tr_examples, nb_tr_steps = 0, 0\n\n weight_gradient = None # Init in model: [bin_num]\n weight_gradient_labels = None # Init in model:\n for step, batch in enumerate(train_dataloader):\n batch = tuple(t.to(device) for t in batch)\n input_ids, input_mask, segment_ids, label_ids = batch\n loss, acc_sum, weight_gradient, weight_gradient_labels = model(input_ids, segment_ids, input_mask, label_ids, weight_gradient, weight_gradient_labels)\n if n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu.\n if args.gradient_accumulation_steps > 1:\n loss = loss / args.gradient_accumulation_steps\n\n if args.fp16:\n optimizer.backward(loss)\n else:\n loss.backward()\n\n tr_loss += float(loss.item())\n nb_tr_examples += input_ids.size(0)\n nb_tr_steps += 1\n if (step + 1) % args.gradient_accumulation_steps == 0:\n if args.fp16:\n # modify learning rate with special warm up BERT uses\n # if args.fp16 is False, BertAdam is used that handles this automatically\n lr_this_step = args.learning_rate * warmup_linear(global_step / num_train_optimization_steps, args.warmup_proportion)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr_this_step\n\n optimizer.step()\n optimizer.zero_grad()\n global_step += 1\n\n if global_step % log_step == 0:\n logger.info(\"Epoch: %d/%s, Step: %d/%d, Lr: %s, Loss: %f, Time/step: %f\", epoch + 1,\n args.num_train_epochs, step + 1,\n len(train_dataloader), \"-\".join([str('%.6f'%itm) for itm in sorted(list(set(optimizer.get_lr())))]),\n float(loss.item()), (time.time() - start_time) / (log_step * args.gradient_accumulation_steps))\n start_time = time.time()\n\n tr_loss = tr_loss / len(train_dataloader)\n return tr_loss, global_step\n\ndef cal_f1(y_true, y_pred):\n correct_pred, total_ground, total_pred = 0., 0., 0.\n for ground_seq, pred_seq in zip(y_true, y_pred):\n lab_chunks = get_aspect_chunks(ground_seq, default=\"O\")\n lab_pred_chunks = get_aspect_chunks(pred_seq, default=\"O\")\n lab_chunks = set(lab_chunks)\n lab_pred_chunks = set(lab_pred_chunks)\n\n correct_pred += len(lab_chunks & lab_pred_chunks)\n total_pred += len(lab_pred_chunks)\n total_ground += len(lab_chunks)\n\n p = correct_pred / total_pred if total_pred > 0 else 0.\n r = correct_pred / total_ground if total_ground > 0 else 0.\n f1 = 2 * p * r / (p + r) if p > 0 and r > 0 else 0.\n return p,r,f1\n\ndef eval_epoch(model, eval_dataloader, label_list, device):\n model.eval()\n\n y_true = []\n y_pred = []\n label_map = {i: label for i, label in enumerate(label_list)}\n for input_ids, input_mask, segment_ids, label_ids in eval_dataloader:\n input_ids = input_ids.to(device)\n input_mask = input_mask.to(device)\n segment_ids = segment_ids.to(device)\n label_ids = label_ids.to(device)\n\n with torch.no_grad():\n logits = model(input_ids, segment_ids, input_mask)\n logits = torch.argmax(F.log_softmax(logits, dim=2), dim=2)\n logits = logits.detach().cpu().numpy()\n\n label_ids = label_ids.to('cpu').numpy()\n for i, lab_ids in enumerate(label_ids):\n temp_1 = []\n temp_2 = []\n for j, l in enumerate(lab_ids):\n if l != -1:\n temp_1.append(label_map[label_ids[i][j]])\n temp_2.append(label_map[logits[i][j]])\n\n y_true.append(temp_1)\n y_pred.append(temp_2)\n\n p, r, f1 = cal_f1(y_true, y_pred)\n logger.info(\"p:{:.4f}\\tr:{:.4f}\\tf1:{:.4f}\".format(p, r, f1))\n\n return p, r, f1\n\ndef predict(epoch, args, test_dataloader, model, label_list, tokenizer, device):\n model.eval()\n\n y_true = []\n y_pred = []\n label_map = {i: label for i, label in enumerate(label_list)}\n for input_ids, input_mask, segment_ids, label_ids in tqdm(test_dataloader, desc=\"Test\", ncols=100, ascii=True):\n input_ids = input_ids.to(device)\n input_mask = input_mask.to(device)\n segment_ids = segment_ids.to(device)\n label_ids = label_ids.to(device)\n\n with torch.no_grad():\n logits = model(input_ids, segment_ids, input_mask)\n logits = torch.argmax(F.log_softmax(logits, dim=2), dim=2)\n logits = logits.detach().cpu().numpy()\n\n label_ids = label_ids.to('cpu').numpy()\n for i, lab_ids in enumerate(label_ids):\n temp_1 = []\n temp_2 = []\n for j, l in enumerate(lab_ids):\n if l != -1:\n temp_1.append(label_map[label_ids[i][j]])\n temp_2.append(label_map[logits[i][j]])\n\n y_true.append(temp_1)\n y_pred.append(temp_2)\n\n p, r, f1 = cal_f1(y_true, y_pred)\n logger.info(\"p:{:.4f}\\tr:{:.4f}\\tf1:{:.4f}\".format(p, r, f1))\n\ndef save_model(epoch, args, model):\n # Only save the model it-self\n model_to_save = model.module if hasattr(model, 'module') else model\n output_model_file = os.path.join(\n args.output_dir, \"pytorch_model.bin.{}\".format(epoch))\n torch.save(model_to_save.state_dict(), output_model_file)\n logger.info(\"Model saved to %s\", output_model_file)\n return output_model_file\n\ndef load_model(epoch, args, num_labels, task_config, device):\n model_file = os.path.join(\n args.output_dir,\n \"pytorch_model.bin.{}\".format(epoch))\n if os.path.exists(model_file):\n model_state_dict = torch.load(model_file, map_location='cpu')\n logger.info(\"Model loaded from %s\", model_file)\n model = BertForSequenceLabeling.from_pretrained(args.bert_model,\n cache_dir=PYTORCH_PRETRAINED_BERT_CACHE / 'distributed_{}'.format(args.local_rank),\n state_dict=model_state_dict, num_labels=num_labels, task_config=task_config)\n model.to(device)\n else:\n model = None\n return model\n\nDATALOADER_DICT = {}\n\nDATALOADER_DICT[\"ate\"] = {\"train\":dataloader_train, \"eval\":dataloader_val}\n\nDATASET_DICT = {}\n\nDATASET_DICT[\"lap\"] = {\n \"train_file\": \"laptops_2014_train.txt\",\n \"valid_file\": \"laptops_2014_trial.txt\",\n \"test_file\": \"laptops_2014_test.gold.txt\"}\n\nDATASET_DICT[\"res\"] = {\n \"train_file\": \"restaurants_union_train.txt\",\n \"valid_file\": \"restaurants_union_trial.txt\",\n \"test_file\":\"restaurants_union_test.gold.txt\"}\n\nfor i in [\"2014\", \"2015\", \"2016\"]:\n DATASET_DICT[\"res{}\".format(i)] = {\n \"train_file\": \"restaurants_{}_train.txt\".format(i),\n \"valid_file\": \"restaurants_{}_trial.txt\".format(i),\n \"test_file\": \"restaurants_{}_test.gold.txt\".format(i)}\n\nfor i in range(10):\n DATASET_DICT[\"twt{}\".format(i+1)] = {\n \"train_file\":\"twitter_{}_train.txt\".format(i+1),\n \"valid_file\":\"twitter_{}_test.gold.txt\".format(i+1),\n \"test_file\":\"twitter_{}_test.gold.txt\".format(i+1)}\n\n# Joint SemEval 2014 domain == SemEval 2014 Laptop + Restaurants\nDATASET_DICT[\"joint_semeval2014\"] = {\n \"train_file\": \"joint_2014_train.txt\",\n \"test_file\": [\"laptops_2014_test.gold.txt\", \"restaurants_2014_test.gold.txt\"],\n \"valid_file\": [\"laptops_2014_trial.txt\", \"restaurants_2014_trial.txt\"]}\n\ndef main():\n global logger\n args, task_config = parse_input_parameter()\n\n random.seed(args.seed)\n os.environ['PYTHONHASHSEED'] = str(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n torch.cuda.manual_seed(args.seed)\n torch.cuda.manual_seed_all(args.seed) # if you are using multi-GPU.\n torch.backends.cudnn.benchmark = False\n torch.backends.cudnn.deterministic = True\n\n device, n_gpu = init_device(args)\n\n data_name = args.data_name.lower()\n\n if data_name in DATASET_DICT:\n args.train_file = DATASET_DICT[data_name][\"train_file\"]\n\n if args.do_eval:\n args.valid_file = DATASET_DICT[data_name][\"valid_file\"]\n \n if args.do_test:\n args.test_file = DATASET_DICT[data_name][\"test_file\"]\n else:\n assert args.train_file is not None\n \n if args.do_eval:\n assert args.valid_file is not None\n \n if args.do_test:\n assert args.test_file is not None\n\n task_name = args.task_name.lower()\n\n if task_name not in DATALOADER_DICT:\n raise ValueError(\"Task not found: %s\" % (task_name))\n\n if n_gpu > 1 and (args.use_ghl):\n logger.warning(\"Multi-GPU make the results not reproduce.\")\n\n tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)\n\n # Generate label list from training dataset\n file_path = os.path.join(args.data_dir, args.train_file)\n train_dataloader, train_examples, label_list = DATALOADER_DICT[task_name][\"train\"](args, tokenizer, file_path)\n logging.info(\"Labels are = %s:\", \"[\"+\", \".join(label_list)+\"]\")\n num_labels = len(label_list)\n\n # Initialize the model\n model = init_model(args, num_labels, task_config, device, n_gpu)\n\n # Generate test dataset\n logger.info(\"***** Running test *****\")\n\n if data_name.startswith('joint'):\n # May need to perform testing on more than one domain\n test_dataloaders_list = []\n test_examples_list = []\n\n for td, test_file_name in enumerate(args.test_file):\n file_path = os.path.join(args.data_dir, test_file_name)\n test_dataloader, test_examples = DATALOADER_DICT[task_name][\"eval\"](\n args, tokenizer, file_path, labels=label_list, set_type=\"test\")\n\n test_dataloaders_list.append(test_dataloader)\n test_examples_list.append(test_examples)\n \n logger.info(\" Domain %d\", td)\n logger.info(\" Num examples = %d\", len(test_examples))\n logger.info(\" Batch size = %d\", args.eval_batch_size)\n else:\n file_path = os.path.join(args.data_dir, args.test_file)\n test_dataloader, test_examples = DATALOADER_DICT[task_name][\"eval\"](\n args, tokenizer, file_path, labels=label_list, set_type=\"test\")\n\n logger.info(\" Num examples = %d\", len(test_examples))\n logger.info(\" Batch size = %d\", args.eval_batch_size)\n\n if args.do_train:\n num_train_optimization_steps = (int(len(\n train_dataloader) + args.gradient_accumulation_steps - 1) / args.gradient_accumulation_steps) * args.num_train_epochs\n if args.local_rank != -1:\n num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size()\n\n optimizer = prep_optimizer(args, model, num_train_optimization_steps)\n\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", len(train_examples))\n logger.info(\" Batch size = %d\", args.train_batch_size)\n logger.info(\" Num steps = %d\", num_train_optimization_steps)\n\n if args.do_eval:\n logger.info(\"***** Running evaluation *****\")\n if data_name.startswith('joint'):\n # May need to perform evaluation on more than one domain\n eval_dataloaders_list = []\n eval_examples_list = []\n\n for ed, eval_file_name in enumerate(args.valid_file):\n file_path = os.path.join(args.data_dir, eval_file_name)\n eval_dataloader, eval_examples = DATALOADER_DICT[task_name][\"eval\"](\n args, tokenizer, file_path, labels=label_list, set_type=\"val\")\n \n eval_dataloaders_list.append(eval_dataloader)\n eval_examples_list.append(eval_examples)\n\n logger.info(\" Domain %d\", ed)\n logger.info(\" Num examples = %d\", len(eval_examples))\n logger.info(\" Batch size = %d\", args.eval_batch_size)\n else:\n file_path = os.path.join(args.data_dir, args.valid_file)\n eval_dataloader, eval_examples = DATALOADER_DICT[task_name][\"eval\"](\n args, tokenizer, file_path, labels=label_list, set_type=\"val\")\n\n logger.info(\" Num examples = %d\", len(eval_examples))\n logger.info(\" Batch size = %d\", args.eval_batch_size)\n\n global_step = 0\n\n for epoch in range(args.num_train_epochs):\n tr_loss, global_step = train_epoch(epoch, args, model, train_dataloader, device, n_gpu, tokenizer,\n optimizer, global_step, num_train_optimization_steps)\n logger.info(\"Epoch %d/%s Finished, Train Loss: %f\", epoch + 1, args.num_train_epochs, tr_loss)\n save_model(epoch, args, model)\n \n if args.do_eval:\n if data_name.startswith('joint'):\n for ed, eval_dataloader in enumerate(eval_dataloaders_list):\n logger.info(\" Domain %d\", ed)\n eval_epoch(model, eval_dataloader, label_list, device)\n else:\n eval_epoch(model, eval_dataloader, label_list, device)\n\n if args.do_test:\n logger.info(\"***Results on test***\")\n if data_name.startswith('joint'):\n for td, test_dataloader in enumerate(test_dataloaders_list):\n logger.info(\" Domain %d\", td)\n eval_epoch(model, test_dataloader, label_list, device)\n else:\n eval_epoch(model, test_dataloader, label_list, device)\n elif args.do_test:\n if args.init_model:\n if data_name.startswith('joint'):\n for td, test_dataloader in enumerate(test_dataloaders_list):\n logger.info(\" Domain %d\", td)\n eval_epoch(model, test_dataloader, label_list, device)\n else:\n eval_epoch(model, test_dataloader, label_list, device)\n else:\n for epoch in range(args.num_train_epochs):\n # Load a trained model that you have fine-tuned\n model = load_model(epoch, args, num_labels, task_config, device)\n if not model:\n break\n \n if data_name.startswith('joint'):\n for td, test_dataloader in enumerate(test_dataloaders_list):\n logger.info(\" Domain %d\", td)\n eval_epoch(model, test_dataloader, label_list, device)\n else:\n eval_epoch(model, test_dataloader, label_list, device)\n\nif __name__ == \"__main__\":\n try:\n main()\n except KeyboardInterrupt:\n print(\"Keyboard break~\")\n" ]
[ [ "torch.load", "torch.utils.data.DataLoader", "torch.no_grad", "torch.cuda.manual_seed_all", "torch.cuda.is_available", "torch.device", "torch.distributed.init_process_group", "torch.utils.data.distributed.DistributedSampler", "torch.utils.data.TensorDataset", "torch.tensor", "torch.cuda.empty_cache", "torch.cuda.device_count", "torch.distributed.get_world_size", "numpy.random.seed", "torch.cuda.manual_seed", "torch.cuda.set_device", "torch.manual_seed", "torch.utils.data.SequentialSampler", "torch.nn.functional.log_softmax", "torch.utils.data.RandomSampler", "torch.nn.DataParallel" ] ]
mori97/VaDE
[ "b3c3bdcdd758415d61ffe9f21733d32b89304643" ]
[ "main.py" ]
[ "import argparse\n\nimport matplotlib.pyplot as plt\nfrom munkres import Munkres\nfrom sklearn.manifold import TSNE\nimport torch\nimport torch.utils.data\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torchvision import datasets, transforms\n\nfrom vade import VaDE, lossfun\n\nN_CLASSES = 10\nPLOT_NUM_PER_CLASS = 128\n\n\ndef train(model, data_loader, optimizer, device, epoch, writer):\n model.train()\n\n total_loss = 0\n for x, _ in data_loader:\n x = x.to(device).view(-1, 784)\n recon_x, mu, logvar = model(x)\n loss = lossfun(model, x, recon_x, mu, logvar)\n total_loss += loss.item()\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n writer.add_scalar('Loss/train', total_loss / len(data_loader), epoch)\n\n\ndef test(model, data_loader, device, epoch, writer, plot_points):\n model.eval()\n\n gain = torch.zeros((N_CLASSES, N_CLASSES), dtype=torch.int, device=device)\n with torch.no_grad():\n for xs, ts in data_loader:\n xs, ts = xs.to(device).view(-1, 784), ts.to(device)\n ys = model.classify(xs)\n for t, y in zip(ts, ys):\n gain[t, y] += 1\n cost = (torch.max(gain) - gain).cpu().numpy()\n assign = Munkres().compute(cost)\n acc = torch.sum(gain[tuple(zip(*assign))]).float() / torch.sum(gain)\n\n # Plot latent space\n xs, ts = plot_points[0].to(device), plot_points[1].numpy()\n zs = model.encode(xs)[0].cpu().numpy()\n tsne = TSNE(n_components=2, init='pca')\n zs_tsne = tsne.fit_transform(zs)\n\n fig, ax = plt.subplots()\n cmap = plt.get_cmap(\"tab10\")\n for t in range(10):\n points = zs_tsne[ts == t]\n ax.scatter(points[:, 0], points[:, 1], color=cmap(t), label=str(t))\n ax.legend()\n\n writer.add_scalar('Acc/test', acc.item(), epoch)\n writer.add_figure('LatentSpace', fig, epoch)\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description='Train VaDE with MNIST dataset',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--epochs', '-e',\n help='Number of epochs.',\n type=int, default=100)\n parser.add_argument('--gpu', '-g',\n help='GPU id. (Negative number indicates CPU)',\n type=int, default=-1)\n parser.add_argument('--learning-rate', '-l',\n help='Learning Rate.',\n type=float, default=0.002)\n parser.add_argument('--batch-size', '-b',\n help='Batch size.',\n type=int, default=100)\n parser.add_argument('--pretrain', '-p',\n help='Load parameters from pretrained model.',\n type=str, default=None)\n args = parser.parse_args()\n\n if_use_cuda = torch.cuda.is_available() and args.gpu >= 0\n device = torch.device('cuda:{}'.format(args.gpu) if if_use_cuda else 'cpu')\n\n dataset = datasets.MNIST('./data', train=True, download=True,\n transform=transforms.ToTensor())\n data_loader = torch.utils.data.DataLoader(\n dataset, batch_size=args.batch_size, shuffle=True,\n num_workers=2, pin_memory=if_use_cuda)\n\n # For plotting\n plot_points = {}\n for t in range(10):\n points = torch.cat([data for data, label in dataset if label == t])\n points = points.view(-1, 784)[:PLOT_NUM_PER_CLASS].to(device)\n plot_points[t] = points\n xs = []\n ts = []\n for t, x in plot_points.items():\n xs.append(x)\n t = torch.full((x.size(0),), t, dtype=torch.long)\n ts.append(t)\n plot_points = (torch.cat(xs, dim=0), torch.cat(ts, dim=0))\n\n model = VaDE(N_CLASSES, 784, 10)\n if args.pretrain:\n model.load_state_dict(torch.load(args.pretrain))\n model = model.to(device)\n\n optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)\n # LR decreases every 10 epochs with a decay rate of 0.9\n lr_scheduler = torch.optim.lr_scheduler.StepLR(\n optimizer, step_size=10, gamma=0.9)\n\n # TensorBoard\n writer = SummaryWriter()\n\n for epoch in range(1, args.epochs + 1):\n train(model, data_loader, optimizer, device, epoch, writer)\n test(model, data_loader, device, epoch, writer, plot_points)\n lr_scheduler.step()\n\n writer.close()\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.max", "torch.zeros", "torch.cat", "torch.load", "torch.utils.data.DataLoader", "matplotlib.pyplot.subplots", "matplotlib.pyplot.get_cmap", "torch.sum", "sklearn.manifold.TSNE", "torch.no_grad", "torch.utils.tensorboard.SummaryWriter", "torch.cuda.is_available", "torch.optim.lr_scheduler.StepLR" ] ]
GrayKS3248/RL_Agents
[ "dc84321b18131b811f39bc5491f4714e3554d610" ]
[ "Pendulum_Test_Env.py" ]
[ "import random\nimport numpy as np\nimport scipy.integrate\n\n\nclass Pendulum():\n def __init__(self, n_theta=31, n_thetadot=31, n_tau=31):\n # Parameters that describe the physical system\n self.params = {\n 'm': 1.0, # mass\n 'g': 9.8, # acceleration of gravity\n 'l': 1.0, # length\n 'b': 0.1, # coefficient of viscous friction\n }\n\n # Maximum absolute angular velocity\n self.max_thetadot = 15.0\n\n # Maximum absolute angle to be considered \"upright\"\n self.max_theta_for_upright = 0.1 * np.pi\n\n # Maximum absolute angular velocity from which to sample initial condition\n self.max_thetadot_for_init = 5.0\n\n # Maximum absolute torque\n self.max_tau = 5.0\n\n # Time step\n self.dt = 0.1\n\n # Number of grid points in each dimension (should be odd, so that there\n # is always one grid point at \"0\")\n self.n_theta = n_theta\n self.n_thetadot = n_thetadot\n self.n_tau = n_tau\n\n # Number of finite states and actions after discretization\n self.num_states = self.n_theta * self.n_thetadot\n self.num_actions = self.n_tau\n\n # Time horizon\n self.max_num_steps = 100\n\n # Reset to initial conditions\n self.reset()\n\n def _x_to_s(self, x):\n # Get theta - wrapping to [-pi, pi) - and thetadot\n theta = ((x[0] + np.pi) % (2 * np.pi)) - np.pi\n thetadot = x[1]\n # Convert to i, j coordinates\n i = (self.n_theta * (theta + np.pi)) // (2 * np.pi)\n j = (self.n_thetadot * (thetadot + self.max_thetadot)) // (2 * self.max_thetadot)\n # Clamp i, j coordinates\n i = max(0, min(self.n_theta - 1, i))\n j = max(0, min(self.n_thetadot - 1, j))\n # Convert to state\n return int(i * self.n_thetadot + j)\n\n def _a_to_u(self, a):\n return -self.max_tau + ((2 * self.max_tau * a) / (self.n_tau - 1))\n\n def _dxdt(self, x, u):\n theta_ddot = (u - self.params['b'] * x[1] + self.params['m'] * self.params['g'] * self.params['l'] * np.sin(x[0])) / (self.params['m'] * self.params['l']**2)\n return np.array([x[1], theta_ddot])\n\n def step(self, a):\n # Verify action is in range\n if not (a in range(self.num_actions)):\n raise ValueError(f'invalid action {a}')\n\n # Convert a to u\n u = self._a_to_u(a)\n\n # Solve ODEs to find new x\n sol = scipy.integrate.solve_ivp(fun=lambda t, x: self._dxdt(x, u), t_span=[0, self.dt], y0=self.x, t_eval=[self.dt])\n self.x = sol.y[:, 0]\n\n # Convert x to s\n self.s = self._x_to_s(self.x)\n\n # Get theta - wrapping to [-pi, pi) - and thetadot\n theta = ((self.x[0] + np.pi) % (2 * np.pi)) - np.pi\n thetadot = self.x[1]\n\n # Compute reward\n if abs(thetadot) > self.max_thetadot:\n # If constraints are violated, then return large negative reward\n r = -100\n elif abs(theta) < self.max_theta_for_upright:\n # If pendulum is upright, then return small positive reward\n r = 1\n else:\n # Otherwise, return zero reward\n r = 0\n\n # Increment number of steps and check for end of episode\n self.num_steps += 1\n self.t = self.num_steps * self.dt\n done = (self.num_steps >= self.max_num_steps)\n\n return (self.s, r, done)\n\n def reset(self):\n # Sample theta and thetadot\n self.x = np.random.uniform([-np.pi, -self.max_thetadot_for_init], [np.pi, self.max_thetadot_for_init])\n\n # Convert to finite state\n self.s = self._x_to_s(self.x)\n\n # Reset current time (expressed as number of simulation steps taken so far) to zero\n self.num_steps = 0\n self.t = self.num_steps * self.dt\n\n return self.s\n\n def render(self):\n # FIXME (we will happily accept a PR to create a graphic visualization of the pendulum)\n pass\n" ]
[ [ "numpy.random.uniform", "numpy.array", "numpy.sin" ] ]
QuantumLiu/OpenIVA
[ "4c97a28f999965bab1f4e9e9fd0289df96669526", "4c97a28f999965bab1f4e9e9fd0289df96669526" ]
[ "openiva/commons/facial/database.py", "openiva/commons/generators.py" ]
[ "import os\n\nimport json\n\nimport numpy as np\n\nimport h5py\n\nclass FacialDB(object):\n '''\n 人脸信息数据库\n Facial info database\n 存储人脸对应的姓名、性别、特征向量等,可执行查询方法\n '''\n _DB_DICT={}\n def __init__(self,path_json=None,db_dict=None) -> None:\n if not path_json is None:\n self.load_json(path_json)\n if not db_dict is None:\n self.update(db_dict)\n \n\n def query_N2N(self,features_tocheck,known_features=None,threshold=0.6):\n features_tocheck=np.ascontiguousarray(features_tocheck)\n known_features=(self.known_mean_features if known_features is None else known_features)\n\n dists_N=np.dot(features_tocheck,known_features)\n\n dists_max=dists_N.max(axis=-1)\n inds=dists_N.argmax(axis=-1)\n knowns=dists_max>threshold\n\n return inds,knowns,dists_max\n \n def query(self,feature_tocheck,known_features=None,threshold=0.6):\n inds,knowns,dists_max=self.query_N2N([feature_tocheck],known_features,threshold)\n return inds[0],knowns[0],dists_max[0]\n\n\n @ property\n def db_dict(self):\n return self._DB_DICT.copy()\n \n @property\n def known_mean_features(self):\n list_features=[d[\"feature_vector\"] for d in self._DB_DICT.values()]\n return np.ascontiguousarray(list_features).T\n\n @property\n def index2id(self):\n return {ind:id_person for ind,id_person in enumerate(self._DB_DICT.keys())}\n\n @property\n def id2name(self):\n return {k:v[\"name\"] for k,v in self._DB_DICT.items()}\n \n @property\n def ind2name(self):\n return {ind:v[\"name\"] for ind,v in enumerate(self._DB_DICT.values())}\n\n @property\n def all_names(self):\n return [v[\"name\"] for v in self._DB_DICT.values()]\n\n @property\n def nb_people(self):\n return len(self._DB_DICT.keys())\n\n def append(self,id,info_dict):\n self._DB_DICT.update({id:info_dict})\n\n def update(self,db_dict):\n self._DB_DICT.update(db_dict)\n\n def load_json(self,path_json):\n with open(path_json,\"r\") as fp:\n db_dict=json.load(fp)\n self.update(db_dict)\n\n def save_to_json(self,path_json):\n with open(path_json,\"w\") as fp:\n json.dump(self.db_dict)\n", "import os\nimport traceback\n\nfrom tqdm import tqdm\n\nimport cv2\nimport numpy as np\n\nfrom .videocoding import decode_video_batch_local\n\nfrom .io import imread\n\ndef read_images_local(pathes_imgs:list,batch_size=8,shuffle=False):\n q_dict_out={'flag_start':True,'flag_end':False,'real_len':0}\n\n samples_indecies=np.asarray(range(len(pathes_imgs)))\n if shuffle:\n np.random.shuffle(samples_indecies)\n \n last_i=samples_indecies[-1]\n \n pathes_imgs={i:j for i,j in enumerate(pathes_imgs)}\n\n batch_images=[]\n batch_indecies=[]\n batch_pathes=[]\n\n nb_samples=0\n nb_batches=0\n\n empty_frame=np.ones((100,100,3),dtype=np.uint8)\n\n for i_sample in tqdm(samples_indecies):\n nb_samples+=1\n\n path_img=pathes_imgs[i_sample]\n try:\n img=imread(path_img)\n except FileNotFoundError:\n traceback.print_exc()\n continue\n \n batch_images.append(img)\n batch_indecies.append(i_sample)\n batch_pathes.append(path_img)\n q_dict_out['real_len']+=1\n\n if len(batch_images)==batch_size:\n #print('putting')\n\n q_dict_out['flag_end']=(i_sample==last_i)\n q_dict_out['batch_src_size']=[img.shape[:2][::-1] for img in batch_images]\n q_dict_out['batch_images']=batch_images\n q_dict_out['batch_indecies']=batch_indecies\n q_dict_out['batch_pathes']=batch_pathes\n \n nb_batches+=1\n\n # q_compute.put(q_dict_out)\n yield q_dict_out\n\n #Flush\n\n batch_images=[]\n batch_indecies=[]\n batch_pathes=[]\n q_dict_out={'flag_start':False,'flag_end':False,'real_len':0}\n\n q_dict_out['flag_end']=True\n\n #Padding last batch to max batch size\n if q_dict_out['real_len'] :\n print('The end batch real size is:{}'.format(q_dict_out['real_len']))\n if (not q_dict_out['real_len']==batch_size):\n for _ in range(batch_size-q_dict_out['real_len']):\n batch_images.append(empty_frame)\n batch_indecies.append(i_sample)\n batch_pathes.append(path_img)\n\n q_dict_out['flag_start']=(nb_samples==1)\n q_dict_out['batch_src_size']=[img.shape[:2][::-1] for img in batch_images]\n q_dict_out['batch_images']=batch_images\n q_dict_out['batch_indecies']=batch_indecies\n q_dict_out['batch_pathes']=batch_pathes\n \n yield q_dict_out\n\n" ]
[ [ "numpy.ascontiguousarray", "numpy.dot" ], [ "numpy.random.shuffle", "numpy.ones" ] ]
abhinav-chinta/Class-12-comp_project
[ "47f7b5d3ce59517031864f554f19cc816f4e2400" ]
[ "ColordetectionFINAL.py" ]
[ "import numpy as np \r\nimport cv2 \r\n\r\nimport pyautogui\r\n \r\n \r\nwebcam = cv2.VideoCapture(0) \r\n \r\nwhile(1): \r\n \r\n # Reading the video from the \r\n # webcam in image frames \r\n _, imageFrame = webcam.read() \r\n \r\n # Convert the imageFrame in \r\n # BGR(RGB color space) to \r\n # HSV(hue-saturation-value) \r\n # color space \r\n hsvFrame = cv2.cvtColor(imageFrame, cv2.COLOR_BGR2HSV) \r\n \r\n # Set range for red color \r\n \r\n #red_lower = np.array([136, 87, 111], np.uint8) \r\n #red_upper = np.array([180, 255, 255], np.uint8)\r\n red_lower = np.array([140, 90, 111], np.uint8) \r\n red_upper = np.array([180, 255, 255], np.uint8) \r\n red_mask = cv2.inRange(hsvFrame, red_lower, red_upper) \r\n\r\n kernal = np.ones((5, 5), \"uint8\") \r\n \r\n # For red color \r\n red_mask = cv2.dilate(red_mask, kernal) \r\n res_red = cv2.bitwise_and(imageFrame, imageFrame, \r\n mask = red_mask) \r\n \r\n\r\n \r\n # Creating contour to track red color \r\n contours, hierarchy = cv2.findContours(red_mask, \r\n cv2.RETR_TREE, \r\n cv2.CHAIN_APPROX_SIMPLE) \r\n \r\n for pic, contour in enumerate(contours): \r\n area = cv2.contourArea(contour) \r\n if(area > 300): \r\n \r\n x, y, w, h = cv2.boundingRect(contour) \r\n imageFrame = cv2.rectangle(imageFrame, (x, y), \r\n (x + 100, y + 100), \r\n (0, 0, 255), 2) \r\n \r\n cv2.putText(imageFrame, \"Red Colour\", (x, y), \r\n cv2.FONT_HERSHEY_SIMPLEX, 1.0, \r\n (0, 0, 255)) \r\n \r\n pyautogui.moveTo(x+10,y-10)\r\n \r\n # Program Termination \r\n cv2.imshow(\"Multiple Color Detection in Real-TIme\", imageFrame)\r\n \r\n if cv2.waitKey(10) & 0xFF == ord('q'): \r\n cap.release() \r\n cv2.destroyAllWindows() \r\n break\r\n" ]
[ [ "numpy.array", "numpy.ones" ] ]
huangzongheng/NAMA
[ "e9bc5b9ca0c1dd5fff2f0613fdaac9fc5b038152" ]
[ "data/datasets/market1501.py" ]
[ "# encoding: utf-8\n\"\"\"\n@author: sherlock\n@contact: [email protected]\n\"\"\"\n\nimport glob\nimport re\nimport os\nimport mat4py\nimport numpy as np\n\nimport os.path as osp\n\nfrom .bases import BaseImageDataset\n\n\nMARKET_ATTR_PARTS = {\n 'head':['hair',\n 'hat',],\n 'up':['upblack',\n 'upblue',\n 'upgreen',\n 'upgray',\n 'uppurple',\n 'upred',\n 'upwhite',\n 'upyellow',],\n 'arm':['up',],\n 'down':['downblack',\n 'downblue',\n 'downbrown',\n 'downgray',\n 'downgreen',\n 'downpink',\n 'downpurple',\n 'downwhite',\n 'downyellow',],\n 'leg':['down','clothes',],\n # 'shoe':[],\n 'bag':['backpack', 'bag', 'handbag',],\n 'global':['age',\n 'gender',]\n}\n\nclass Market1501(BaseImageDataset):\n \"\"\"\n Market1501\n Reference:\n Zheng et al. Scalable Person Re-identification: A Benchmark. ICCV 2015.\n URL: http://www.liangzheng.org/Project/project_reid.html\n\n Dataset statistics:\n # identities: 1501 (+1 for background)\n # images: 12936 (train) + 3368 (query) + 15913 (gallery)\n \"\"\"\n dataset_dir = 'Market-1501-v15.09.15'\n\n def __init__(self, root='/home/haoluo/data', verbose=True, use_attr=False, combine_all=False, **kwargs):\n super(Market1501, self).__init__()\n self.dataset_dir = osp.join(root, self.dataset_dir)\n self.train_dir = osp.join(self.dataset_dir, 'bounding_box_train')\n self.query_dir = osp.join(self.dataset_dir, 'query')\n self.gallery_dir = osp.join(self.dataset_dir, 'bounding_box_test')\n\n self._check_before_run()\n\n if combine_all:\n train = self._process_dir([self.train_dir, self.query_dir, self.gallery_dir], relabel=True)\n else:\n train = self._process_dir(self.train_dir, relabel=True)\n\n # train = self._process_dir(self.train_dir, relabel=True)\n query = self._process_dir(self.query_dir, relabel=False)\n gallery = self._process_dir(self.gallery_dir, relabel=False)\n\n if verbose:\n print(\"=> Market1501 loaded\")\n self.print_dataset_statistics(train, query, gallery)\n\n self.train = train\n self.query = query\n self.gallery = gallery\n # self.train = train[:64]\n # self.query = query[-16:]\n # self.gallery = gallery[-64:]\n\n self.num_train_pids, self.num_train_imgs, self.num_train_cams = self.get_imagedata_info(self.train)\n self.num_query_pids, self.num_query_imgs, self.num_query_cams = self.get_imagedata_info(self.query)\n self.num_gallery_pids, self.num_gallery_imgs, self.num_gallery_cams = self.get_imagedata_info(self.gallery)\n if use_attr:\n attrs = mat4py.loadmat(os.path.join(self.dataset_dir,'attribute','market_attribute.mat'))['market_attribute']\n attr_train = attrs['train']\n attr_test = attrs['test']\n for age in (1, 2, 3, 4): # 将年龄展开为4个二分类问题\n attr_train['age'+str(age)] = [2 if i == age else 1 for i in attr_train['age']]\n attr_train.pop('age')\n # attr_train['age0'] = [2 if i == age else 1 for i in attr_train['age']]\n\n\n cls = set(attr_train.keys())\n cls.discard('image_index')\n cls = list(cls)\n cls.sort()\n\n c = np.zeros((len(attr_train['image_index']), len(cls)+1)) # +1 for id label\n for i in range(len(cls)):\n c[:, i] = attr_train[cls[i]]\n c[:, -1] = np.arange(c.shape[0]) + 1\n self.train_attr = c.astype(np.int64) - 1\n self.classes_attr = cls\n self.train = [[i[0], i[1], self.train_attr[i[1]]] for i in self.train]\n\n\n def _check_before_run(self):\n \"\"\"Check if all files are available before going deeper\"\"\"\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.query_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.query_dir))\n if not osp.exists(self.gallery_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.gallery_dir))\n\n def _process_dir(self, dir_path, relabel=False):\n if isinstance(dir_path, str):\n img_paths = glob.glob(osp.join(dir_path, '*.jpg'))\n else:\n img_paths=[]\n for d in dir_path:\n img_paths += glob.glob(osp.join(d, '*.jpg'))\n pattern = re.compile(r'([-\\d]+)_c(\\d)')\n\n pid_container = set()\n for img_path in img_paths:\n pid, _ = map(int, pattern.search(img_path).groups())\n if pid == -1: continue # junk images are just ignored\n pid_container.add(pid)\n pid2label = {pid: label for label, pid in enumerate(pid_container)}\n\n dataset = []\n for img_path in img_paths:\n pid, camid = map(int, pattern.search(img_path).groups())\n if pid == -1: continue # junk images are just ignored\n assert 0 <= pid <= 1501 # pid == 0 means background\n assert 1 <= camid <= 6\n camid -= 1 # index starts from 0\n if relabel:\n if pid < 0:\n continue\n pid = pid2label[pid]\n # if relabel: pid = self.train_attr[pid] # use attr & id label\n dataset.append((img_path, pid, camid))\n\n return dataset\n\n\nclass Market1501Partial(Market1501):\n dataset_dir = 'Market-1501-Partial-Head'\n # dataset_dir = 'Market-1501-Partial'\n\n" ]
[ [ "numpy.arange" ] ]
celiagg/celia
[ "2eff50ab901c297e6f6e8491ddba997e325ffcb9" ]
[ "celiagg/tests/test_no_text.py" ]
[ "# The MIT License (MIT)\n#\n# Copyright (c) 2016-2021 Celiagg Contributors\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n# Authors: John Wiggins\nimport unittest\n\nimport numpy as np\n\nimport celiagg as agg\n\n\[email protected](agg.HAS_TEXT, 'Text support is available')\nclass TestNoTextSupport(unittest.TestCase):\n def test_no_text_draw_text_failure(self):\n buffer = np.zeros((1, 1), dtype=np.uint8)\n canvas = agg.CanvasG8(buffer)\n transform = agg.Transform()\n line_paint = agg.SolidPaint(1.0, 1.0, 1.0)\n gs = agg.GraphicsState()\n\n with self.assertRaises(RuntimeError):\n canvas.draw_text('', None, transform, line_paint, line_paint, gs)\n" ]
[ [ "numpy.zeros" ] ]
ronger4242/DisasterPipeline
[ "49bf9851a3207cbb005c55ed7cfaa05c4f002756" ]
[ "data/process_data.py" ]
[ "import sys\nimport pandas as pd\nimport numpy as np\nimport sqlite3\nimport re\nfrom sqlalchemy import create_engine\n\ndef load_data(messages_filepath, categories_filepath):\n '''\n Load two CSV files\n Args:\n message_filepath = path to the message file\n categories_filepath = path to the categories file\n Return df: a merged pandas dataframe contains messages and categories\n '''\n messages = pd.read_csv(messages_filepath)\n categories = pd.read_csv(categories_filepath)\n df = messages.merge(categories, on = 'id')\n return df\n\n\ndef clean_data(df):\n '''\n Cleans the dataframe for machine learning pipeline\n Args: \n df: a dirty dataframe\n Returns:\n df: a tidy dataframe\n '''\n categories = df['categories'].str.split(';', expand = True)\n row = categories.iloc[0]\n category_colnames = row.apply(lambda x: x[:-2])\n categories.columns = category_colnames\n for column in categories:\n categories[column] = categories[column].str.split('-').str.get(1)\n categories[column] = pd.to_numeric(categories[column])\n categories[column] = categories[column].astype('int')\n df = df.drop('categories', axis = 1)\n df = pd.concat([df,categories], axis = 1)\n df = df.drop_duplicates()\n return df\n\ndef save_data(df, database_filename):\n '''\n save the clean dataset into a sqlite database.\n Args:\n df: the pandas dataframe to be saved\n database_filename: the name for the sqlite database file\n '''\n engine = create_engine('sqlite:///{}'.format(database_filename))\n df.to_sql('DisasterData', con = engine, if_exists = 'replace')\n\n\ndef main():\n if len(sys.argv) == 4:\n\n messages_filepath, categories_filepath, database_filepath = sys.argv[1:]\n\n print('Loading data...\\n MESSAGES: {}\\n CATEGORIES: {}'\n .format(messages_filepath, categories_filepath))\n df = load_data(messages_filepath, categories_filepath)\n\n print('Cleaning data...')\n df = clean_data(df)\n \n print('Saving data...\\n DATABASE: {}'.format(database_filepath))\n save_data(df, database_filepath)\n \n print('Cleaned data saved to database!')\n \n else:\n print('Please provide the filepaths of the messages and categories '\\\n 'datasets as the first and second argument respectively, as '\\\n 'well as the filepath of the database to save the cleaned data '\\\n 'to as the third argument. \\n\\nExample: python process_data.py '\\\n 'disaster_messages.csv disaster_categories.csv '\\\n 'DisasterResponse.db')\n\n\nif __name__ == '__main__':\n main()" ]
[ [ "pandas.concat", "pandas.read_csv", "pandas.to_numeric" ] ]
YangRui2015/Sparse-Reward-Algorithms
[ "946656bade883565fc4e9248b9579df3d11b7358" ]
[ "agent_env_params.py" ]
[ "import numpy as np \r\n\r\ndef design_agent_and_env(FLAGS):\r\n\r\n # environment params\r\n env_params = {}\r\n if FLAGS.env == \"reach\":\r\n env_params[\"env_name\"] = \"FetchReach-v1\"\r\n env_params[\"has_object\"] = False\r\n FLAGS.total_steps = 20\r\n else:\r\n raise TypeError(\"No such environment till now\")\r\n\r\n # number of actions to achieve subgoals in HDDPG\r\n x = pow(FLAGS.total_steps, 1/FLAGS.layers)\r\n if x - int(x) == 0:\r\n FLAGS.time_scale = int(x)\r\n else:\r\n FLAGS.time_scale = int(x) + 1 \r\n\r\n FLAGS.num_exploration_episodes = 100\r\n FLAGS.num_test_episodes = 100 \r\n FLAGS.num_epochs = FLAGS.episodes // FLAGS.num_exploration_episodes\r\n\r\n env_params[\"obj_range\"] = 0.15 \r\n env_params[\"target_range\"] = 0.15 \r\n env_params[\"max_actions\"] = FLAGS.total_steps \r\n\r\n distance_threshold = 0.05 # 5cm\r\n env_params[\"end_goal_thresholds\"] = distance_threshold\r\n env_params[\"subgoal_thresholds\"] = distance_threshold\r\n\r\n if FLAGS.curriculum >= 2:\r\n range_lis = list(np.linspace(0.05, 0.15, FLAGS.curriculum))\r\n env_params['curriculum_list'] = range_lis\r\n\r\n # agent params\r\n agent_params = {}\r\n agent_params[\"subgoal_test_perc\"] = 0.3\r\n\r\n agent_params[\"subgoal_penalty\"] = -FLAGS.time_scale # Define subgoal penalty for missing subgoal.\r\n agent_params[\"atomic_noise\"] = 0.1\r\n agent_params[\"subgoal_noise\"] = 0.03\r\n agent_params[\"epsilon\"] = 0.1 # rate of choose random action\r\n\r\n agent_params[\"episodes_to_store\"] = 1000\r\n agent_params[\"update_times\"] = 40 \r\n agent_params[\"batch_size\"] = 64 \r\n \r\n agent_params['imit_batch_size'] = 32\r\n agent_params['imit_ratio'] = FLAGS.imit_ratio\r\n\r\n return agent_params, env_params\r\n" ]
[ [ "numpy.linspace" ] ]
pgfeldman/optevolver
[ "cdb15a8cf40749e20a3bcb72b87a46f269d3eaf5" ]
[ "optevolver/hyperparameter/TF2OptimizerTestBase.py" ]
[ "import os\nfrom typing import Dict\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\nimport time\nfrom tensorflow.keras import layers\n\nimport optevolver.generators.FloatFunctions as FF\n\n\nclass TF2OptimizationTestBase:\n \"\"\"\n A base class that provides the framework for evolutionary optimization of architecture and hyperparameter space\n using Tensorflow 2.0 Keras.\n\n ...\n\n Attributes\n ----------\n sequence_length: int\n The size of the input and output vector. This assumes a sequence to sequence mapping where the\n sequences are the same length\n model: tf.keras.Sequential\n The default Tensorflow model\n full_mat: np.ndarray\n The full matrix of test/train values. Split into train_mat and test_mat\n train_mat: np.ndarray\n The rows of the full_mat that are used to train the model\n test_mat: np.ndarray\n The rows of the full_mat that are used to evaluate the fitness of the model while it's training\n predict_mat: np.ndarray\n A new matrix that the behavior of the trained model can be evaluated against\n model_args: Dict\n A Dict that is used to store the arguments used for model architecture and hyperparameters for each\n evaluation\n strategy: tf.distribute.OneDeviceStrategy\n The mechanism that TF uses to allocate models across multiple processors\n device: str\n The device that the model is running on\n\n Methods\n -------\n reset()\n Resets all the variables. Needed to eliminate class cross-contamination of class-global variables\n generate_train_test() -> (np.ndarray, np.ndarray, np.ndarray):\n Generates a set of test and train data for sequence creation\n build_model()\n Overridable method for the construction of the keras model. The default method creates a MLP model with\n variable neurons and layers based on the arguments passed in as a Dict\n evaluate model()\n Overridable method for the training and evaluation of a keras model. The default method varies batch size, and\n epochs based on the arguments passed in as a Dict\n plot_population()\n The normal use case for this class is to produce an ensemble of models that are stored in a directory. This\n method reads in each of the models and evaluates them against new data. Input, target, and predictions are\n generated for the ensembles, whch are plotted using pyplot\n plot_all()\n A convenience method that plots the full_mat, train_mat, test_mat, and predict_mat\n plot_mats()\n Plots a matrix with some adjustable display parameters\n\n\n \"\"\"\n sequence_length: int\n model: tf.keras.Sequential\n full_mat: np.ndarray\n train_mat: np.ndarray\n test_mat: np.ndarray\n predict_mat: np.ndarray\n model_args: Dict\n strategy: tf.distribute.OneDeviceStrategy\n device: str\n\n def __init__(self, sequence_length: int = 100, device: str = \"gpu:0\"):\n \"\"\"\n Parameters\n ----------\n sequence_length: int = 100\n The sice of the input and output vectors. Defaults to 100\n device: str = \"gpu:0\"\n The string used to set the Tensorflow distribution strategy. If, for example, the machine that this\n was running on had four cpus, an instance of this class would be created for 'cpu:0', 'cpu:1', 'cpu:2', and\n 'cpu:3'. Default is 'gpu:0'\n \"\"\"\n self.reset()\n self.sequence_length = sequence_length\n self.strategy = tf.distribute.OneDeviceStrategy(device=\"/{}\".format(device))\n self.device = device\n\n def reset(self):\n \"\"\"Resets all the variables. Needed to eliminate class cross-contamination of class-global variables\"\"\"\n self.sequence_length = 0\n self.model_args = {}\n self.model: tf.keras.Sequential = None\n self.full_mat: np.ndarray = None\n self.train_mat: np.ndarray = None\n self.test_mat: np.ndarray = None\n self.predict_mat: np.ndarray = None\n self.noise = 0\n self.strategy = None\n self.device = \"gpu:0\"\n\n def generate_train_test(self, num_functions: int, rows_per_function: int, noise: float) -> \\\n (np.ndarray, np.ndarray, np.ndarray):\n \"\"\"\n Method that generates data so that you don't have to find data. You can optionally set the mat values directly.\n The values are set using a set of frequencies for a sin function. All sequences start at sin(0) = 0, then\n diverge from there. Returns a tuple of (full_mat, train_mat, text mat), where the test and train matrices are\n made of the evenly split full matrix.\n\n Parameters\n ----------\n num_functions: int\n the number of sin-based functions to create\n rows_per_function: int\n the number of rows to create for each sin function. With noise == 0, these rows would be identical\n noise: float\n the amount of noise to add to each generated value\n \"\"\"\n\n # create an instance of the class that will generate our values. Note that rows are twice the length of our sequence\n ff = FF.FloatFunctions(rows_per_function, 2 * self.sequence_length)\n npa = None\n for i in range(num_functions):\n mathstr = \"math.sin(xx*{})\".format(0.005 * (i + 1))\n # generate a dataframe that with values\n df2 = ff.generateDataFrame(mathstr, noise=noise)\n # convert the DataFrame to an Numpy array\n npa2 = df2.to_numpy()\n # if this is the first time, set the initial value, otherwise append\n if npa is None:\n npa = npa2\n else:\n ta = np.append(npa, npa2, axis=0)\n npa = ta\n\n # split into the train and test matrices\n split = np.hsplit(npa, 2)\n\n # return the tuple\n return npa, split[0], split[1]\n\n\n def build_model(self, args: Dict) -> tf.keras.Sequential:\n \"\"\"\n Overridable method that builds a tf.keras.Sequential model and returns it. All arguments are passed in the form\n of a Dict, so that there can be any amount of paramaters used to produce the model. In this example case, a\n multilayer perceptron is built using a Dict that would contain {'num_neurons' : x, 'num_layers' : y}. The model\n is defined to run on the device passed in (or the default of 'gpu:0') to the constructor. Returns an executable\n model with input and output size of self.sequence_length\n\n Parameters\n ----------\n num_functions: int\n the number of sin-based functions to create\n rows_per_function: int\n the number of rows to create for each sin function. With noise == 0, these rows would be identical\n noise: float\n the amount of noise to add to each generated value\n \"\"\"\n with self.strategy.scope():\n # make a copy of the args that we can manipulate\n self.model_args = args.copy()\n\n # some defaults\n num_layers = 1\n num_neurons = 200\n\n #change the defaults if there are args in the Dict to do so\n if 'num_neurons' in self.model_args:\n num_neurons = self.model_args['num_neurons']\n if 'num_layers' in self.model_args:\n num_layers = self.model_args['num_layers']\n\n # create the model\n self.model = tf.keras.Sequential()\n\n # Add the input layer with sequence_length units to the model\n self.model.add(layers.Dense(self.sequence_length, activation='relu', input_shape=(self.sequence_length,)))\n # Add the number of layers and neurons as specified in the args\n for i in range(num_layers):\n self.model.add(layers.Dense(num_neurons, activation='relu'))\n # Add the output layer with sequence_length units to the model\n self.model.add(layers.Dense(self.sequence_length))\n\n # set up our loss and optimization functions. These could be passed in as well\n loss_func = tf.keras.losses.MeanSquaredError()\n opt_func = tf.keras.optimizers.Adam(0.01)\n\n # create the model\n self.model.compile(optimizer=opt_func,\n loss=loss_func,\n metrics=['accuracy'])\n #return the model\n return self.model\n\n def evaluate_model(self, num_functions: int = 10, rows_per_function: int = 1, noise: float = 0) -> Dict:\n \"\"\"\n Overridable method that evaluates a tf.keras model and returns a dictionary containing metrics\n (loss, accuracy, duration) about that run\n run.\n\n Parameters\n ----------\n num_functions: int\n the number of sin-based functions to create that we will evaluate against\n rows_per_function: int\n the number of rows to create for each sin function. With noise == 0, these rows would be identical\n noise: float\n the amount of noise to add to each generated value\n \"\"\"\n\n # set the defaults\n self.noise = noise\n epochs = 40\n batch_size = 2\n\n # update the defaults if 'epochs' or 'batch_size' is in the self.model_args Dict that was passed into\n # build_model()\n if 'epochs' in self.model_args:\n epochs = self.model_args['epochs']\n if 'batch_size' in self.model_args:\n batch_size = self.model_args['batch_size']\n\n # generate the full, train, and test matrices\n self.full_mat, self.train_mat, self.test_mat = self.generate_train_test(num_functions, rows_per_function, noise)\n\n # set up values that we will use in the evaluation\n results_dict = {}\n start = time.time()\n\n # use the target processor\n with self.strategy.scope():\n # fit and evaluate the model, getting the list of results back\n self.model.fit(self.train_mat, self.test_mat, epochs=epochs, batch_size=batch_size)\n result_list = self.model.evaluate(self.train_mat, self.test_mat)\n # build our return values\n stop = time.time()\n loss = result_list[0]\n accuracy = result_list[1]\n duration = stop - start\n # print (helps to see what's going on)\n print(\"{}: loss = {:.4f}, accuracy = {:.4f}, duration = {:.4f} seconds\".format(self.device, loss, accuracy, duration))\n # build the results Dict\n results_dict = {'loss': loss, 'accuracy': accuracy, 'duration': duration}\n\n # generate some data for testing\n self.full_mat, self.train_mat, self.test_mat = self.generate_train_test(rows_per_function, rows_per_function, noise)\n\n # calculate the predicted values and save them\n self.predict_mat = self.model.predict(self.train_mat)\n\n # return the results\n return results_dict\n\n def plot_population(self, dirname: str, num_functions: int = 10, rows_per_function: int = 1, noise: float = 0):\n \"\"\"\n loads an ensemble of models and plots their predictions against input values using pyplot\n\n Parameters\n ----------\n dirname: str\n the name of the root directory that contains the TF2.0 models\n num_functions: int\n the number of sin-based functions to create that we will evaluate against\n rows_per_function: int\n the number of rows to create for each sin function. With noise == 0, these rows would be identical\n noise: float\n the amount of noise to add to each generated value\n \"\"\"\n\n # generate a new full, train, and test matrices\n self.full_mat, self.train_mat, self.test_mat = self.generate_train_test(num_functions, rows_per_function, noise)\n\n # create the matrix that will store the ensemble values for plotting\n avg_mat = np.zeros(self.test_mat.shape)\n\n # change to the directory contiaing the models\n d = os.getcwd()\n os.chdir(dirname)\n # iterate over all the child directories\n with os.scandir() as entries:\n count = 1\n for entry in entries:\n #don't do anything if the entry is not a directory\n if entry.is_file() or entry.is_symlink():\n os.remove(entry.path)\n elif entry.is_dir():\n count += 1\n print(\"loading: {}\".format(entry.name))\n # load the model\n new_model = tf.keras.models.load_model(entry.name)\n #generate a prediction matrix\n self.predict_mat = new_model.predict(self.train_mat)\n # add these values to the target matrix\n avg_mat = np.add(self.predict_mat, avg_mat)\n # add the predict values to the \"All predictions\" chart. This will give us multiple overlayed lines\n self.plot_mats(self.predict_mat, rows_per_function, \"All Predictions\", 0)\n # plot the train and test matrices\n self.plot_mats(self.train_mat, rows_per_function, \"Training Set\", 1)\n self.plot_mats(self.test_mat, rows_per_function, \"Ground Truth\", 2)\n\n # normalize the target matrix we've been summing our values to\n avg_mat = avg_mat / count\n # plot the average of the ensemble\n self.plot_mats(avg_mat, rows_per_function, \"Ensemble Average\", 3)\n # show the plots\n plt.show()\n # change back\n os.chdir(d)\n\n def plot_all(self, pop_size:int=10):\n \"\"\"\n Plot the values of this class's matrices\n\n Parameters\n ----------\n pop_size: int = 10\n the number of functions that are being used to evaluate this model\n \"\"\"\n self.plot_mats(self.full_mat, pop_size, \"Full Data\", 1)\n self.plot_mats(self.train_mat, pop_size, \"Input Vector\", 2)\n self.plot_mats(self.test_mat, pop_size, \"Output Vector\", 3)\n self.plot_mats(self.predict_mat, pop_size, \"Predict\", 4)\n plt.show()\n\n def plot_mats(self, mat: np.ndarray, cluster_size: int, title: str, fig_num: int, linestyle:str='solid', marker:str='None'):\n \"\"\"\n Plots a matrix with some adjustable display parameters\n\n Parameters\n ----------\n mat: np.ndarray\n the matrix we are plotting\n cluster_size: int\n the size of the 'color cluster' in this class, this is usually the rows_per_function argument\n title: str\n the title of the chart\n fig_num: int\n the pyplot figure number\n linestyle:str='solid' (see https://matplotlib.org/3.1.0/gallery/lines_bars_and_markers/linestyles.html)\n the linestyle. Options include solid, dotted, dashed, and dashdot\n marker:str='None' (see matplotlib.org/3.1.1/api/markers_api.html)\n the line marker style. Options include ',' (point), 'o' (circle), 'v' (triangle), 's' (square)\n\n \"\"\"\n\n if title != None:\n plt.figure(fig_num)\n\n i = 0\n for row in mat:\n cstr = \"C{}\".format(int(i / cluster_size))\n plt.plot(row, color=cstr, linestyle=linestyle, marker=marker)\n i += 1\n\n if title != None:\n plt.title(title)\n\n# Exercise this class by warning that it needs a subclass\nif __name__ == \"__main__\":\n print(\"TF2OptimizerTestBase needs a subclass...\")\n" ]
[ [ "tensorflow.keras.models.load_model", "matplotlib.pyplot.title", "tensorflow.keras.losses.MeanSquaredError", "tensorflow.keras.layers.Dense", "tensorflow.keras.Sequential", "numpy.hsplit", "matplotlib.pyplot.plot", "numpy.append", "tensorflow.keras.optimizers.Adam", "numpy.add", "matplotlib.pyplot.show", "numpy.zeros", "matplotlib.pyplot.figure" ] ]
gwthmsb/MLAndAI
[ "cb255d9bd480f6f25bf868e831a68257faecbfcc" ]
[ "LDA.py" ]
[ "from numpy import mean\nfrom numpy import std\nfrom sklearn.datasets import make_classification, load_iris\nfrom sklearn.model_selection import cross_val_score, GridSearchCV\nfrom sklearn.model_selection import RepeatedStratifiedKFold\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.pipeline import Pipeline\n\n\"\"\"\n LinearDiscriminantAnalysis is classifier not an reducer. \n\"\"\"\n\n\ndef check_model(X, y, model):\n cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)\n scores = cross_val_score(model, X, y, scoring='accuracy', cv=cv, n_jobs=-1)\n print(scores)\n print(\"Mean accuracy: %.3f, SD: %.3f\"%(mean(scores), std(scores)))\n\n\ndef simple_prediction(X, y, model):\n model.fit(X, y)\n row = [0,0,-2.23268854,-1.82114386,1.75466361,0.1243966,1.03397657,2.35822076,4.44235,0.56768485]\n # make a prediction\n yhat = model.predict([row])\n # summarize prediction\n print('Predicted Class: %d' % yhat)\n\n\ndef hyper_parameters(X, y, model, solver):\n \"\"\"\n Hyper parameters should be configured for LDA.\n Importanta HP is solver, by default it is \"svd\"\n \"\"\"\n cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)\n grid = {\"solver\": [solver,]}\n search_model = GridSearchCV(model, grid, scoring='accuracy', cv=cv, n_jobs=-1)\n results = search_model.fit(X, y)\n print(\"Solver: {}\".format(results.best_params_))\n print(\"Accuracy: %.8f\"%results.best_score_)\n return results\n\n\ndef iris_data():\n # define dataset\n #X, y = make_classification(n_samples=1000, n_features=10, n_informative=10, n_redundant=0, random_state=1)\n X, y = load_iris(True)\n print(X, y)\n # define model\n model = LinearDiscriminantAnalysis()\n print(model.get_params(True))\n #simple_prediction(X, y, model)\n solvers = [\"svd\", \"lsqr\", \"eigen\"]\n row = [0.1, 3.5, 4.2, 100]\n for solver in solvers:\n result = hyper_parameters(X, y, model, solver)\n pr_class = result.predict([[10, 25, 30, 40]])\n print(pr_class)\n\n\ndef iris_data_reduction():\n X, y = load_iris(True)\n model = LinearDiscriminantAnalysis(n_components=2)\n model.fit(X, y)\n X_trans = model.transform(X)\n print(X_trans)\n model.fit(X_trans, y)\n print(model.predict([[0, 0]]))\n print(model.predict([[100, 100]]))\n print(model.predict([[-100, -100]]))\n\n\ndef prediction():\n X, y = make_classification(n_samples=1000, n_features=10, n_informative=10, n_redundant=0, random_state=1)\n model = LinearDiscriminantAnalysis()\n simple_prediction(X, y, model)\n\n\ndef LDA_as_reduction():\n X, y = make_classification(n_samples=10, n_features=6, n_informative=6, n_redundant=0, random_state=2, n_classes=3)\n model = LinearDiscriminantAnalysis(n_components=2)\n print(X, y)\n print(model.get_params(True))\n model.fit(X, y)\n print(model.predict([[2, 4, 5, -1, 0, 4]]))\n X_trans = model.transform(X)\n print(X_trans)\n model.fit(X_trans, y)\n print(model.predict([[2, 4]]))\n print(model.predict([[-3, 5]]))\n\n\ndef LDA_as_reduction_using_pipeline():\n steps = [(\"lda\", LinearDiscriminantAnalysis()), (\"m\", GaussianNB())]\n model = Pipeline(steps)\n X, y = make_classification(n_samples=1000, n_features=20, n_informative=15, n_redundant=0, random_state=2, n_classes=11)\n model.fit(X, y)\n cv = RepeatedStratifiedKFold(n_repeats=3, n_splits=3, random_state=1)\n\n result = cross_val_score(model, X, y, scoring='accuracy', cv=cv, n_jobs=-1)\n print(mean(result), std(result))\n # 0.353323383263503 0.023301238927184588\n\n # LDA feature reduction from 20 to 10\n\n steps = [(\"lda\", LinearDiscriminantAnalysis(n_components=9)), ('m', GaussianNB())]\n model = Pipeline(steps)\n model.fit(X, y)\n result = cross_val_score(model, X, y, scoring='accuracy', cv=cv, n_jobs=-1)\n print(mean(result), std(result))\n # 0.34565903228577877 0.015750493488623934\n\n \"\"\"\n Here accuracy mean is just 0.3456, ie this model is not so great for current data set.\n \"\"\"\n\n\nif __name__ == \"__main__\":\n #prediction()\n #iris_data()\n #iris_data_reduction()\n LDA_as_reduction_using_pipeline()" ]
[ [ "sklearn.model_selection.GridSearchCV", "sklearn.model_selection.cross_val_score", "sklearn.datasets.make_classification", "sklearn.naive_bayes.GaussianNB", "sklearn.datasets.load_iris", "sklearn.pipeline.Pipeline", "numpy.std", "sklearn.model_selection.RepeatedStratifiedKFold", "numpy.mean", "sklearn.discriminant_analysis.LinearDiscriminantAnalysis" ] ]
bwoodsend/hoatzin
[ "22355d2f6ba69484e73766fc5e4f62d29ea53e3a" ]
[ "tests/__init__.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\n\"\"\"\nimport functools\nimport warnings\nfrom contextlib import contextmanager\n\nimport numpy as np\n\nfrom hirola import exceptions\n\n\ndef random_ids(max, count, at_least_once=True, sort=False):\n if at_least_once:\n assert count >= max\n out = np.append(np.arange(max, dtype=np.intp),\n np.random.randint(0, max, count - max, np.intp))\n else:\n out = np.random.randint(0, max, count, np.intp)\n if sort:\n out.sort()\n else:\n np.random.shuffle(out)\n return out\n\n\ndef ignore_almost_full_warnings(test):\n \"\"\"Decorate a test to disable exceptions.AlmostFull warnings.\"\"\"\n\n @functools.wraps(test)\n def wrapped(*args, **kwargs):\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=exceptions.AlmostFull)\n test(*args, **kwargs)\n\n return wrapped\n\n\n@contextmanager\ndef warnings_as_errors():\n \"\"\"A context manager which treats all warnings as errors.\"\"\"\n with warnings.catch_warnings():\n warnings.filterwarnings(\"error\")\n yield\n" ]
[ [ "numpy.arange", "numpy.random.shuffle", "numpy.random.randint" ] ]
philip-papasavvas/PythonSkills
[ "7744779314245f23065a36ba0318e21f83b1019b" ]
[ "src/utils_dataframe.py" ]
[ "\"\"\"\nCreated on: 26 Feb 2021\nUtils module for comparing pd DataFrames\n\"\"\"\nimport re\nfrom typing import Union, List\n\nimport numpy as np\nimport pandas as pd\n\n\ndef get_selected_column_names(df: pd.DataFrame,\n cols_to_exclude: Union[List[str], str]) -> List[str]:\n \"\"\"Return a list with all the column names in the data frame except those specified\"\"\"\n return [x for x in list(df.columns) if x not in cols_to_exclude]\n\n\ndef compare_dataframe_col(df_one: pd.DataFrame,\n df_two: pd.DataFrame,\n index_col: str,\n merge_col: str,\n suffixes: tuple = ('_x', '_y')) -> pd.DataFrame:\n \"\"\"\n Compare two dataframes, specifically for a column choose the common index.\n Percentage difference will be relative to the first suffix dataframe\n\n Args:\n df_one: first dataframe to compare\n df_two: second dataframe to compare\n index_col: common column (in both data frames) on which to use as the 'index'\n merge_col: common column on which to carry out the merge, and compute the differences\n suffixes: specifed to give detail to different data frames compared\n\n Returns:\n pd.DataFrame: A dataframe with columns:\n [index, merge_col_first_suffix, merge_col_second_suffix, absolute_diff, pc_diff]\n Note: the percentage diff is given in decimals. 2% = 0.02\n \"\"\"\n print(f\"Performing data frame compare with index: \\t {index_col}. \\n\"\n f\"Merge column: \\t {merge_col} \")\n\n merged_df = pd.merge(\n left=df_one.set_index(index_col)[[merge_col]],\n right=df_two.set_index(index_col)[[merge_col]],\n left_on=index_col,\n right_on=index_col,\n suffixes=suffixes,\n how='outer'\n ).fillna(0)\n\n merged_df['absolute_diff'] = np.abs(merged_df[merge_col + suffixes[0]].values -\n merged_df[merge_col + suffixes[1]].values)\n merged_df['pc_diff'] = \\\n np.abs(np.abs(merged_df['absolute_diff']) / np.abs(merged_df[merge_col + suffixes[0]]))\n\n return merged_df\n\n\ndef reconcile_dataframes_numeric(df_one: pd.DataFrame,\n df_two: pd.DataFrame,\n tolerance: float = 1E-12) -> pd.DataFrame:\n \"\"\"Method to reconcile two dataframes. This is different to\n pd.testing.assert_frame_equal since it allows the user to set a tolerance\n the difference between the array values.\n It will check that the index and columns are the same. If the columns are the\n same but ordered differently, it will sort them before the comparison takes place\n\n Args:\n df_one: pd.DataFrame\n df_two: pd.DataFrame\n tolerance: specify the tolerance between the values in the array\n\n Returns:\n pd.DataFrame: returns the difference between the two dataframes, with\n labelled columns\n \"\"\"\n assert isinstance(df_one, pd.DataFrame), 'df_one is not a dataframe'\n assert isinstance(df_two, pd.DataFrame), 'df_two is not a dataframe'\n assert all(df_one.index == df_two.index), 'indices do not match'\n assert df_one.shape == df_two.shape, 'shapes of the dataframes do not match'\n\n assert all(np.in1d(df_one.columns, df_two.columns)), 'column values do not match'\n\n compare_mat = df_two.loc[:, df_one.columns]\n\n if np.max(np.absolute(compare_mat.values - df_one.values)) < tolerance:\n print(\"Data frames reconcile\")\n else:\n print(\"Data frames did not reconcile\")\n\n return pd.DataFrame(np.absolute(compare_mat.values - df_one.values),\n columns=df_one.columns)\n\n\ndef drop_null_columns_df(data: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Drop columns from the dataframe with null values\"\"\"\n original_columns = list(data.columns)\n cleaned_data = data.dropna(axis=1)\n new_columns = list(cleaned_data.columns)\n cut_columns = [x for x in original_columns if x not in new_columns]\n\n print(f\"Columns: {cut_columns} \\n have been dropped from the dataframe as they contain NaNs\")\n return cleaned_data\n\n\ndef replace_underscores_df(df: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Replace underscores in a pd.DataFrame\"\"\"\n return df.replace({\"_\": \"\"}, regex=True)\n\n\ndef concat_columns(sep: str = '', *args) -> pd.DataFrame:\n \"\"\"Concatenate multiple columns of pd.DataFrame with sep\"\"\"\n df = pd.DataFrame()\n for arg in args:\n df = pd.concat([df, arg], axis=1, ignore_index=True)\n try:\n out = df.astype(str).add(sep).sum(axis=1).str.replace(\n '%s+$' % re.escape(sep), '', regex=True) # removes trailing sep\n # need to make any columns with nan to output NaN, which is the result when 'A' + '_' +\n # 'NaN'\n mask = df.isnull().any(axis=1)\n out[mask] = np.nan\n except AttributeError:\n # incase of empty data frame\n out = pd.Series()\n return out\n\n\ndef return_reconciliation_summary_table(\n differences_df: pd.DataFrame,\n groupby_key: str) -> pd.DataFrame:\n \"\"\"\n Parse in the result of the compare_dataframe_col method, to give a summary\n of the comparison of the two dataframes. This is especially useful if you\n compare two very large dataframes, across multiple indices.\n If you wish to choose the index by which to group the results, this method\n outputs the mean absolute and percentage difference, as well as the\n maximum absolute and percentage difference\n\n Args:\n differences_df: This is the pd.DataFrame result of the compare_dataframe_col\n method, with the columns:\n [index_columns (fed into compare_dataframe_col, as a parameter)\n merge_column (suffix A)\n merge_column (suffix B)\n [both of the merge_columns are fed into compare_dataframe_col]\n ]\n groupby_key: key on which to group the summary of differences table\n\n Returns\n pd.DataFrame: Columns [groupby key, 'absolute_diff_mean',\n 'absolute_diff_max', 'pc_diff_mean', 'pc_diff_max']\n \"\"\"\n diff_abs_df = pd.merge(\n left=pd.DataFrame({'absolute_diff_mean':\n differences_df.groupby([groupby_key])['absolute_diff'].mean()}),\n right=pd.DataFrame({'absolute_diff_max':\n differences_df.groupby([groupby_key])['absolute_diff'].max()}),\n left_index=True, right_index=True\n )\n\n diffs_percent_diff = pd.merge(\n left=pd.DataFrame({'pc_diff_mean':\n differences_df.groupby([groupby_key])['pc_diff'].mean()}),\n right=pd.DataFrame({'pc_diff_max':\n differences_df.groupby([groupby_key])['pc_diff'].max()}),\n left_index=True, right_index=True\n )\n\n summary_diffs_df = pd.merge(left=diff_abs_df,\n right=diffs_percent_diff,\n left_index=True, right_index=True)\n\n return summary_diffs_df\n\n\nif __name__ == '__main__':\n sample_df = pd.DataFrame(\n {\"a\": [\"liquid\", \"arrogant\", \"imagine\", \"knock\", \"share\"],\n \"b\": range(5)})\n\n concat_columns('_', sample_df[['a', 'b']])\n" ]
[ [ "pandas.merge", "numpy.absolute", "pandas.concat", "numpy.abs", "pandas.Series", "numpy.in1d", "pandas.DataFrame" ] ]
sailist/detectron2
[ "2805d9eb0b2a97899ef187715cf8beb9fd7385fc" ]
[ "detectron2/modeling/mmdet_wrapper.py" ]
[ "# -*- coding: utf-8 -*-\n\nimport itertools\nimport logging\nimport numpy as np\nfrom collections import OrderedDict\nfrom collections.abc import Mapping\nfrom typing import Dict, List, Optional, Tuple, Union\nimport torch\nfrom omegaconf import DictConfig, OmegaConf\nfrom torch import Tensor, nn\n\nfrom detectron2.layers import ShapeSpec\nfrom detectron2.structures import BitMasks, Boxes, ImageList, Instances\nfrom detectron2.utils.events import get_event_storage\n\nfrom .backbone import Backbone\n\nlogger = logging.getLogger(__name__)\n\n\ndef _to_container(cfg):\n \"\"\"\n mmdet will assert the type of dict/list.\n So convert omegaconf objects to dict/list.\n \"\"\"\n if isinstance(cfg, DictConfig):\n cfg = OmegaConf.to_container(cfg, resolve=True)\n from mmcv.utils import ConfigDict\n\n return ConfigDict(cfg)\n\n\nclass MMDetBackbone(Backbone):\n \"\"\"\n Wrapper of mmdetection backbones to use in detectron2.\n\n mmdet backbones produce list/tuple of tensors, while detectron2 backbones\n produce a dict of tensors. This class wraps the given backbone to produce\n output in detectron2's convention, so it can be used in place of detectron2\n backbones.\n \"\"\"\n\n def __init__(\n self,\n backbone: Union[nn.Module, Mapping],\n neck: Union[nn.Module, Mapping, None] = None,\n *,\n pretrained_backbone: Optional[str] = None,\n output_shapes: List[ShapeSpec],\n output_names: Optional[List[str]] = None,\n ):\n \"\"\"\n Args:\n backbone: either a backbone module or a mmdet config dict that defines a\n backbone. The backbone takes a 4D image tensor and returns a\n sequence of tensors.\n neck: either a backbone module or a mmdet config dict that defines a\n neck. The neck takes outputs of backbone and returns a\n sequence of tensors. If None, no neck is used.\n pretrained_backbone: defines the backbone weights that can be loaded by\n mmdet, such as \"torchvision://resnet50\".\n output_shapes: shape for every output of the backbone (or neck, if given).\n stride and channels are often needed.\n output_names: names for every output of the backbone (or neck, if given).\n By default, will use \"out0\", \"out1\", ...\n \"\"\"\n super().__init__()\n if isinstance(backbone, Mapping):\n from mmdet.models import build_backbone\n\n backbone = build_backbone(_to_container(backbone))\n self.backbone = backbone\n\n if isinstance(neck, Mapping):\n from mmdet.models import build_neck\n\n neck = build_neck(_to_container(neck))\n self.neck = neck\n\n # It's confusing that backbone weights are given as a separate argument,\n # but \"neck\" weights, if any, are part of neck itself. This is the interface\n # of mmdet so we follow it. Reference:\n # https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/two_stage.py\n logger.info(f\"Initializing mmdet backbone weights: {pretrained_backbone} ...\")\n self.backbone.init_weights(pretrained_backbone)\n # train() in mmdet modules is non-trivial, and has to be explicitly\n # called. Reference:\n # https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/backbones/resnet.py\n self.backbone.train()\n if self.neck is not None:\n logger.info(\"Initializing mmdet neck weights ...\")\n if isinstance(self.neck, nn.Sequential):\n for m in self.neck:\n m.init_weights()\n else:\n self.neck.init_weights()\n self.neck.train()\n\n self._output_shapes = output_shapes\n if not output_names:\n output_names = [f\"out{i}\" for i in range(len(output_shapes))]\n self._output_names = output_names\n\n def forward(self, x) -> Dict[str, Tensor]:\n outs = self.backbone(x)\n if self.neck is not None:\n outs = self.neck(outs)\n assert isinstance(\n outs, (list, tuple)\n ), \"mmdet backbone should return a list/tuple of tensors!\"\n if len(outs) != len(self._output_shapes):\n raise ValueError(\n \"Length of output_shapes does not match outputs from the mmdet backbone: \"\n f\"{len(outs)} != {len(self._output_shapes)}\"\n )\n return {k: v for k, v in zip(self._output_names, outs)}\n\n def output_shape(self) -> Dict[str, ShapeSpec]:\n return {k: v for k, v in zip(self._output_names, self._output_shapes)}\n\n\nclass MMDetDetector(nn.Module):\n \"\"\"\n Wrapper of a mmdetection detector model, for detection and instance segmentation.\n Input/output formats of this class follow detectron2's convention, so a\n mmdetection model can be trained and evaluated in detectron2.\n \"\"\"\n\n def __init__(\n self,\n detector: Union[nn.Module, Mapping],\n *,\n # Default is 32 regardless of model:\n # https://github.com/open-mmlab/mmdetection/tree/master/configs/_base_/datasets\n size_divisibility=32,\n pixel_mean: Tuple[float],\n pixel_std: Tuple[float],\n ):\n \"\"\"\n Args:\n detector: a mmdet detector, or a mmdet config dict that defines a detector.\n size_divisibility: pad input images to multiple of this number\n pixel_mean: per-channel mean to normalize input image\n pixel_std: per-channel stddev to normalize input image\n \"\"\"\n super().__init__()\n if isinstance(detector, Mapping):\n from mmdet.models import build_detector\n\n detector = build_detector(_to_container(detector))\n self.detector = detector\n self.size_divisibility = size_divisibility\n\n self.register_buffer(\"pixel_mean\", torch.tensor(pixel_mean).view(-1, 1, 1), False)\n self.register_buffer(\"pixel_std\", torch.tensor(pixel_std).view(-1, 1, 1), False)\n assert (\n self.pixel_mean.shape == self.pixel_std.shape\n ), f\"{self.pixel_mean} and {self.pixel_std} have different shapes!\"\n\n def forward(self, batched_inputs: Tuple[Dict[str, torch.Tensor]]):\n images = [x[\"image\"].to(self.device) for x in batched_inputs]\n images = [(x - self.pixel_mean) / self.pixel_std for x in images]\n images = ImageList.from_tensors(images, size_divisibility=self.size_divisibility).tensor\n metas = []\n rescale = {\"height\" in x for x in batched_inputs}\n if len(rescale) != 1:\n raise ValueError(\"Some inputs have original height/width, but some don't!\")\n rescale = list(rescale)[0]\n output_shapes = []\n for input in batched_inputs:\n meta = {}\n c, h, w = input[\"image\"].shape\n meta[\"img_shape\"] = meta[\"ori_shape\"] = (h, w, c)\n if rescale:\n scale_factor = np.sqrt(h * w / (input[\"height\"] * input[\"width\"]))\n ori_shape = (input[\"height\"], input[\"width\"])\n output_shapes.append(ori_shape)\n meta[\"ori_shape\"] = ori_shape + (c,)\n else:\n scale_factor = 1.0\n output_shapes.append((h, w))\n meta[\"scale_factor\"] = scale_factor\n meta[\"flip\"] = False\n padh, padw = images.shape[-2:]\n meta[\"pad_shape\"] = (padh, padw, c)\n metas.append(meta)\n\n if self.training:\n gt_instances = [x[\"instances\"].to(self.device) for x in batched_inputs]\n if gt_instances[0].has(\"gt_masks\"):\n from mmdet.core import PolygonMasks as mm_PolygonMasks, BitmapMasks as mm_BitMasks\n\n def convert_mask(m, shape):\n # mmdet mask format\n if isinstance(m, BitMasks):\n return mm_BitMasks(m.tensor.cpu().numpy(), shape[0], shape[1])\n else:\n return mm_PolygonMasks(m.polygons, shape[0], shape[1])\n\n gt_masks = [convert_mask(x.gt_masks, x.image_size) for x in gt_instances]\n losses_and_metrics = self.detector.forward_train(\n images,\n metas,\n [x.gt_boxes.tensor for x in gt_instances],\n [x.gt_classes for x in gt_instances],\n gt_masks=gt_masks,\n )\n else:\n losses_and_metrics = self.detector.forward_train(\n images,\n metas,\n [x.gt_boxes.tensor for x in gt_instances],\n [x.gt_classes for x in gt_instances],\n )\n return _parse_losses(losses_and_metrics)\n else:\n results = self.detector.simple_test(images, metas, rescale=rescale)\n results = [\n {\"instances\": _convert_mmdet_result(r, shape)}\n for r, shape in zip(results, output_shapes)\n ]\n return results\n\n @property\n def device(self):\n return self.pixel_mean.device\n\n\n# Reference: show_result() in\n# https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/base.py\ndef _convert_mmdet_result(result, shape: Tuple[int, int]) -> Instances:\n if isinstance(result, tuple):\n bbox_result, segm_result = result\n if isinstance(segm_result, tuple):\n segm_result = segm_result[0]\n else:\n bbox_result, segm_result = result, None\n\n bboxes = torch.from_numpy(np.vstack(bbox_result)) # Nx5\n bboxes, scores = bboxes[:, :4], bboxes[:, -1]\n labels = [\n torch.full((bbox.shape[0],), i, dtype=torch.int32) for i, bbox in enumerate(bbox_result)\n ]\n labels = torch.cat(labels)\n inst = Instances(shape)\n inst.pred_boxes = Boxes(bboxes)\n inst.scores = scores\n inst.pred_classes = labels\n\n if segm_result is not None and len(labels) > 0:\n segm_result = list(itertools.chain(*segm_result))\n segm_result = [torch.from_numpy(x) if isinstance(x, np.ndarray) else x for x in segm_result]\n segm_result = torch.stack(segm_result, dim=0)\n inst.pred_masks = segm_result\n return inst\n\n\n# reference: https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/base.py\ndef _parse_losses(losses: Dict[str, Tensor]) -> Dict[str, Tensor]:\n log_vars = OrderedDict()\n for loss_name, loss_value in losses.items():\n if isinstance(loss_value, torch.Tensor):\n log_vars[loss_name] = loss_value.mean()\n elif isinstance(loss_value, list):\n log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)\n else:\n raise TypeError(f\"{loss_name} is not a tensor or list of tensors\")\n\n if \"loss\" not in loss_name:\n # put metrics to storage; don't return them\n storage = get_event_storage()\n value = log_vars.pop(loss_name).cpu().item()\n storage.put_scalar(loss_name, value)\n return log_vars\n" ]
[ [ "numpy.sqrt", "torch.full", "torch.cat", "torch.from_numpy", "torch.tensor", "torch.stack", "numpy.vstack" ] ]
erichensleyibm/capstone_CitiBike
[ "74c8d968179c0f53d69bdbb92420b2948bd6adaa" ]
[ "get_bike_data.py" ]
[ "import os, sys\ntry: # if running in CLI\n cur_path = os.path.abspath(__file__)\n while cur_path.split('/')[-1] != 'capstone':\n cur_path = os.path.abspath(os.path.join(cur_path, os.pardir))\nexcept NameError: # if running in IDE\n cur_path = os.getcwd()\n while cur_path.split('/')[-1] != 'capstone':\n cur_path = os.path.abspath(os.path.join(cur_path, os.pardir))\n sys.path.insert(1, os.path.join(cur_path, 'lib', 'python3.6', 'site-packages'))\n\nimport requests, zipfile, io\nimport pandas as pd\nimport mysql.connector\nfrom mysql.connector import errorcode\nimport datetime\nfrom lxml import html\n\nglobal DB_NAME\nDB_NAME = 'citibike'\n\ndef create_database(cursor):\n try:\n cursor.execute(\n \"CREATE DATABASE {} DEFAULT CHARACTER SET 'utf16'\".format(DB_NAME))\n except mysql.connector.Error as err:\n print(\"Failed creating database: {}\".format(err))\n exit(1)\n\ndef create_tables(cursor):\n TABLES = {}\n TABLES['stations'] = (\n \"CREATE TABLE `stations` (\"\n \" `station_id` int(5) NOT NULL,\"\n \" `station_name` varchar(60) NOT NULL,\" \n \" `latitude` FLOAT(10, 6) NOT NULL,\"\n \" `longitude` FLOAT(10, 6) NOT NULL,\"\n \" PRIMARY KEY (`station_id`)\"\n \") ENGINE=InnoDB\")\n\n TABLES['weather'] = (\n \"CREATE TABLE `weather` (\"\n \" `time_id` DATETIME NOT NULL UNIQUE,\"\n \" `temperature` FLOAT(4,1),\" \n \" `humidity` INT,\"\n \" `wind` FLOAT(3,1) NOT NULL,\" \n \" `precip` FLOAT(4,2) NOT NULL,\"\n \" `label` VARCHAR(10) NOT NULL,\"\n \" `condition` VARCHAR(25) NOT NULL,\"\n \" PRIMARY KEY (`time_id`)\"\n \") ENGINE=InnoDB\")\n \n TABLES['trips'] = (\n \"CREATE TABLE `trips` (\"\n \" `trip_id` int(11) NOT NULL AUTO_INCREMENT,\"\n \" `start` DATETIME NOT NULL,\" \n \" `end` DATETIME NOT NULL,\" \n \" `avg_trip` DATETIME NOT NULL,\"\n \" `duration` INT NOT NULL,\"\n \" `start_station` int(5) NOT NULL,\"\n \" `end_station` int(5) NOT NULL,\"\n \" `bike_id` int(5) NOT NULL,\"\n \" `user` TINYINT NOT NULL,\"\n \" `gender` TINYINT NOT NULL,\"\n \" `birth_year` INT,\"\n \" PRIMARY KEY (`trip_id`),\"\n \" CONSTRAINT `fk_start_station` FOREIGN KEY (`start_station`) \"\n \" REFERENCES `stations` (`station_id`) ON DELETE CASCADE \"\n \" ON UPDATE CASCADE, \"\n \" CONSTRAINT `fk_end_station` FOREIGN KEY (`end_station`) \"\n \" REFERENCES `stations` (`station_id`) ON DELETE CASCADE \"\n \" ON UPDATE CASCADE\"\n \") ENGINE=InnoDB\")\n \n for name, ddl in TABLES.items():\n try:\n print(\"Creating table {}: \".format(name), end='')\n cursor.execute(ddl)\n except mysql.connector.Error as err:\n if err.errno == errorcode.ER_TABLE_EXISTS_ERROR:\n print(\"already exists.\")\n else:\n print(err.msg)\n else:\n print(\"OK\") \n \ndef set_mysql_env(cnx):\n cursor = cnx.cursor()\n try:\n cnx.database = DB_NAME\n except mysql.connector.Error as err:\n if err.errno == errorcode.ER_BAD_DB_ERROR:\n create_database(cursor)\n cnx.database = DB_NAME\n else:\n print(err)\n exit(1) \n create_tables(cursor)\n cursor.close()\n\ndef get_data(month, cache = False): \n month = str(month)\n if len(month) == 1:\n month = '0'+month\n if '2017%s-citibike-tripdata.csv' % (month) in os.listdir(os.path.join(cur_path, 'data')):\n month_data = pd.read_csv(os.path.join(cur_path, 'data', '2017%s-citibike-tripdata.csv' % (month)))\n else:\n zip_file_url = \"https://s3.amazonaws.com/tripdata/2017%s-citibike-tripdata.csv.zip\" % (month)\n r = None\n r = requests.get(zip_file_url)\n while not r.ok:\n r = requests.get(zip_file_url) \n z = zipfile.ZipFile(io.BytesIO(r.content))\n if cache:\n z.extractall(os.path.join(cur_path, 'data'))\n month_data = pd.read_csv(z.open('2017%s-citibike-tripdata.csv' % (month)))\n return month_data\n\ndef trip_insert(cnx, month_data, batch_size):\n month_insert = month_data[['start time', 'stop time', 'trip_time', 'trip duration', 'start_station', 'end_station',\n 'bike id', 'usertype', 'gender', 'birth year']].fillna('NULL').values\n base = 'INSERT INTO trips (start, end, avg_trip, duration, start_station, end_station, bike_id, user, gender, birth_year) VALUES '\n running_insert = 0\n \n while running_insert != len(month_insert):\n insert_vals = []\n while len(insert_vals) < batch_size and running_insert != len(month_insert):\n insert_vals.append('(\"%s\", \"%s\", %s, %s, %s, %s, %s, %s, %s, %s)' %\n (*month_insert[running_insert],))\n running_insert += 1\n full_insert = base+','.join(insert_vals)+';'\n cnx.cursor().execute('SET foreign_key_checks = 0;')\n cnx.cursor().execute(full_insert)\n cnx.commit()\n cnx.cursor().execute('SET foreign_key_checks = 1;')\n\ndef station_insert(cnx, station_dict):\n base = 'INSERT INTO stations VALUES '\n insert_vals = []\n for (name, data) in station_dict.items():\n insert_vals.append('(%s, \"%s\", %s, %s)' % (data['id'], name, data['latitude'], data['longitude']))\n full_insert = base+','.join(insert_vals)+';'\n cnx.cursor().execute('SET foreign_key_checks = 0;')\n cnx.cursor().execute(full_insert)\n cnx.commit()\n cnx.cursor().execute('SET foreign_key_checks = 1;') \n\ndef conv_time(time_, month_, day_):\n _time = []\n for time in time_:\n hour = int(time.split(' ')[0].split(':')[0])\n minute = int(time.split(' ')[0].split(':')[1])\n tod = time.split(' ')[1]\n\n hour, minute = str(hour), str(minute)\n if len(hour) == 1:\n hour = '0'+hour\n if len(minute) == 1:\n minute = '0'+minute\n timedate = '2017-%s-%s %s:%s:00' % (month_, day_, hour, minute)\n if tod == 'PM':\n timedate = str(datetime.datetime.strptime(timedate, '%Y-%m-%d %H:%M:%S') + datetime.timedelta(hours = 12))\n _time.append('\"2017-%s-%s %s:%s:00\"' % (month_, day_, hour, minute))\n return _time\n \ndef weather_insert(cnx, weather_data):\n weather_data = weather_data.fillna('NULL').values\n base = 'INSERT INTO weather VALUES '\n \n insert_vals = []\n for running_insert in range(len(weather_data)):\n insert_vals.append('(%s, %s, %s, %s, %s, %s, %s)' %\n (*weather_data[running_insert],))\n full_insert = base+','.join(insert_vals)+';'\n cnx.cursor().execute('SET foreign_key_checks = 0;')\n cnx.cursor().execute(full_insert)\n cnx.commit()\n cnx.cursor().execute('SET foreign_key_checks = 1;')\n\ndef pull_weather(cnx):\n print('Beginning weather data retrieval and storage...')\n\n con = cnx.cursor()\n con.execute(\"Select Max(time_id) from weather;\")\n latest = con.fetchall()\n \n cur_date = datetime.datetime(latest[0][0].year, latest[0][0].month, latest[0][0].day + 1)\n\n# cur_date = datetime.datetime(2017, 1, 1) \n while cur_date.year == 2017:\n print('Weather Date: %s' % (cur_date.date()))\n month = cur_date.month\n day = cur_date.day\n url = 'https://www.wunderground.com/history/airport/KNYC/2017/%s/%s/DailyHistory.html?&reqdb.zip=&reqdb.magic=&reqdb.wmo=' % (month, day)\n \n page = requests.get(url)\n tree = html.fromstring(page.content)\n tree.xpath('//meta[@name=\"description\"]/text()')\n while str(page.content)[str(page.content).find('<title>') + 7 : str(page.content).find('</title>')] == \"Oops! There\\\\'s been an error. | Weather Underground\":\n print('Error loading page. Reloading...')\n page = requests.get(url)\n tree = html.fromstring(page.content)\n tree.xpath('//meta[@name=\"description\"]/text()')\n \n month, day = str(month), str(day)\n if len(month) == 1:\n month = '0'+month \n if len(day) == 1:\n day = '0'+day \n \n headers = tree.xpath('//div[@id=\"observations_details\"]/table/thead/tr/th/text()')\n headers = ['fill'] + headers\n headers = [i.strip().lower().replace('.','').replace(',', '') for i in headers]\n if 'time' in headers:\n time = tree.xpath('//tr[@class=\"no-metars\"]/td[%i]/text()' % (headers.index('time')))\n elif 'time (est)' in headers:\n time = tree.xpath('//tr[@class=\"no-metars\"]/td[%i]/text()' % (headers.index('time (est)'))) \n time = conv_time(time, month, day)\n temp_val = tree.xpath('//tr[@class=\"no-metars\"]/td[%i]/span/span[1]/text()' % (headers.index('temp')))\n temp = tree.xpath('//tr[@class=\"no-metars\"]/td[%i]/text()' % (headers.index('temp')))\n temp = [i.replace('\\n','') for i in temp]\n temp = [i for i in temp if i != '']\n temp = [i.replace(' ','') for i in temp]\n for i in range(len(temp)):\n if temp[i] == '':\n temp[i] = temp_val[0]\n temp_val = temp_val[1:]\n else:\n temp[i] = 'NULL'\n \n hum = tree.xpath('//tr[@class=\"no-metars\"]/td[%s]/text()' % (headers.index('humidity')))\n hum = [i.replace('%', '') for i in hum]\n hum = ['NULL' if i == 'N/A' else i for i in hum]\n wind = tree.xpath('//tr[@class=\"no-metars\"]/td[%s]/span/span[1]/text()' % (headers.index('wind speed')))\n wind_str = tree.xpath('//tr[@class=\"no-metars\"]/td[%s]/text()' % (headers.index('wind speed')))\n wind_str = [i.replace('\\n','') for i in wind_str]\n wind_str = [i for i in wind_str if i != '']\n wind_str = [0 if i == 'Calm' else i for i in wind_str]\n wind_str = [str(i).replace(' ','') for i in wind_str]\n wind_str = [0 if i == '-' else i for i in wind_str]\n if len(wind) > 0:\n for i in range(len(wind_str)):\n if wind_str[i] == '':\n wind_str[i] = wind[0]\n wind = wind[1:]\n \n prec = tree.xpath('//tr[@class=\"no-metars\"]/td[%i]/text()' % (headers.index('precip')))\n prec_pres = tree.xpath('//tr[@class=\"no-metars\"]/td[%i]/span/span[1]/text()' % (headers.index('precip')))\n prec = [i for i in prec if i != '\\n']\n if len(prec_pres) > 0:\n for i in range(len(prec)):\n if prec[i] == 'N/A':\n prec[i] = '0'\n else:\n prec[i] = prec_pres[0]\n prec_pres = prec_pres[1:]\n prec = [0 if i == 'N/A' else i for i in prec] \n event = tree.xpath('//tr[@class=\"no-metars\"]/td[%i]/text()' % (headers.index('events')))\n event = [i.replace('\\n', '') for i in event]\n event = ['\"None\"' if i == '\\t\\xa0' else '\"'+str(i)+'\"' for i in event]\n cond = tree.xpath('//tr[@class=\"no-metars\"]/td[%i]/text()' % (headers.index('conditions')))\n cond = ['\"'+str(i)+'\"' for i in cond]\n \n weatherdata = pd.DataFrame()\n weatherdata['time_id'] = time\n weatherdata['temperature'] = temp\n weatherdata['humidity'] = hum\n weatherdata['wind'] = wind_str\n weatherdata['precip'] = prec\n weatherdata['label'] = event\n weatherdata['condition'] = cond\n \n weatherdata.drop_duplicates(subset = 'time_id', inplace = True)\n weather_insert(cnx, weatherdata)\n cur_date = cur_date + datetime.timedelta(hours = 24)\n \ndef store_data(cnx, batch_size = 10000):\n con = cnx.cursor()\n con.execute(\"Select Max(trip_id) from trips;\")\n full_check = con.fetchall()\n if full_check[0][0] == 16364657:\n return\n \n station_dict = {}\n \n running_id = 0\n for month in range(1,13): \n month_data = get_data(month)\n print('Processing data for %s/2017' % (month))\n month_data.columns = [x.lower() for x in month_data.columns]\n for colname in ['trip duration', 'start time', 'stop time', 'user type', 'bike id']:\n if ''.join(colname.split(' ')) in list(month_data):\n month_data.rename(columns = {''.join(colname.split(' ')): colname}, inplace = True)\n for name, lat, lon in month_data[['start station name', 'start station latitude', 'start station longitude']].values:\n if name not in station_dict.keys():\n station_dict[name] = {'id': running_id, 'latitude': lat, 'longitude': lon}\n running_id += 1\n for name, lat, lon in month_data[['end station name', 'end station latitude', 'end station longitude']].values:\n if name not in station_dict.keys():\n station_dict[name] = {'id': running_id, 'latitude': lat, 'longitude': lon}\n running_id += 1\n month_data['start_station'] = month_data['start station name'].apply(lambda x: station_dict[x]['id'])\n month_data['end_station'] = month_data['end station name'].apply(lambda x: station_dict[x]['id'])\n month_data['usertype'] = month_data['user type'].apply(lambda x: 0 if x == 'Customer' else 1)\n \n stop = month_data['stop time'].apply(lambda x: datetime.datetime.strptime(x, \"%Y-%m-%d %H:%M:%S\"))\n start = month_data['start time'].apply(lambda x: datetime.datetime.strptime(x, \"%Y-%m-%d %H:%M:%S\"))\n avg_trip = [j + (i - j) for i,j in zip(stop, start)]\n avg_trip = ['\"'+str(i)+'\"' for i in avg_trip]\n month_data['trip_time'] = avg_trip\n print('Storing data for %s/2017' % (month))\n trip_insert(cnx, month_data, batch_size)\n station_insert(cnx, station_dict)\n\nif __name__ == '__main__':\n cnx = mysql.connector.connect(user='root', password = 'ibm1234')\n set_mysql_env(cnx)\n cnx.close()\n cnx = mysql.connector.connect(user='root', password='ibm1234',\n host='127.0.0.1',\n database=DB_NAME)\n store_data(cnx = cnx)\n pull_weather(cnx) \n cnx.close()\n" ]
[ [ "pandas.DataFrame" ] ]
Helw150/DiffPruningTransformers
[ "4af8fc6493e8fc71d523f4d4a58f3b9115a97db1" ]
[ "diffpruningtransformers/diff_prune_transformer.py" ]
[ "import math\nimport torch\nfrom torchreparam import ReparamModule\nfrom torch import nn\nfrom torch.nn import functional as F\n\n\ndef hard_sigmoid(x):\n return x.clamp(0, 1000).clamp(-1000, 1)\n\n\nclass L0Norm(nn.Module):\n def __init__(self, origin_shape, alpha_init, l, r):\n \"\"\"\n Base class of layers using L0 Norm\n :param origin: original layer such as nn.Linear(..), nn.Conv2d(..)\n :param loc_mean: mean of the normal distribution which generates initial location parameters\n :param loc_sdev: standard deviation of the normal distribution which generates initial location parameters\n :param beta: initial temperature parameter\n :param gamma: lower bound of \"stretched\" s\n :param zeta: upper bound of \"stretched\" s\n :param fix_temp: True if temperature is fixed\n \"\"\"\n super(L0Norm, self).__init__()\n self._size = origin_shape\n self.alpha = nn.Parameter(torch.zeros(self._size) + alpha_init)\n self.register_buffer(\"uniform\", torch.zeros(self._size))\n self.l = l\n self.r = r\n self.lower_upper_ratio = math.log(-self.l / self.r)\n\n def _get_mask(self):\n if self.training:\n self.uniform.uniform_()\n s = torch.autograd.Variable(self.uniform).clamp_(0.0001, 0.9999)\n s = F.sigmoid(torch.log(s) - torch.log(1 - s) + self.alpha)\n u = s * (self.r - self.l) + self.l\n penalty = F.sigmoid(self.alpha - self.lower_upper_ratio).sum()\n else:\n s = F.sigmoid(self.alpha) * (self.r - self.l) + self.l\n penalty = torch.tensor(0)\n return hard_sigmoid(s), penalty\n\n\nclass DiffPruningTransformer(torch.nn.Module):\n def __init__(self, parent_model, device, alpha_init=5, l=-1.5, r=1.5):\n super(DiffPruningTransformer, self).__init__()\n self.lm = ReparamModule(parent_model.base_model)\n self.add_module(\"l0_norm\", L0Norm(self.lm.flat_param.shape, alpha_init, l, r))\n self.patch_weight = torch.zeros_like(self.lm.flat_param)\n self.patch_weight.requires_grad = True\n self.patch_weight = torch.nn.Parameter(self.patch_weight)\n self.device = device\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n mask, penalty = self.l0_norm._get_mask()\n penalty = penalty\n patch = mask * self.patch_weight\n flat_params = self.lm.flat_param.detach() + patch\n flat_params = flat_params\n outputs = self.lm(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n flat_param=flat_params,\n )\n sequence_output = outputs[0]\n outputs = outputs[:1] + (penalty,) + outputs[1:]\n return outputs\n\n def save_pretrained(self, path):\n torch.save(self.state_dict(), path)\n\n def load_pretrained(self, path):\n model.load_state_dict(torch.load(path))\n" ]
[ [ "torch.nn.Parameter", "torch.load", "torch.zeros", "torch.zeros_like", "torch.tensor", "torch.nn.functional.sigmoid", "torch.log", "torch.autograd.Variable" ] ]
nekomaruh/Rendimiento_Escolar_en_Chile_2010-2019
[ "de4a12b2d35ffd76902e6f1bdd552a7ae500548a" ]
[ "interface.py" ]
[ "import queries as q\nfrom queries import cursor, connection, ps\nimport static_tables as st\nimport os, psutil\nimport pandas as pd\nimport fnmatch\nimport time\nfrom io import StringIO\nfrom tqdm import tqdm\n\ndef get_ram(info='unknown'):\n # Obtiene la información de la ram utilizada durante la ejecución del programa\n process = psutil.Process(os.getpid())\n print('RAM:', process.memory_info()[0]/800000, 'mb ('+info+')')\n\ndef drop_dimensions():\n # Elimina todas las tablas que existen en la base de datos\n q.drop_tables()\n get_ram(info='Dropped all tables')\n\ndef create_dimensions():\n # Crea las tablas estáticas\n q.create_static_tables()\n q.create_tables()\n get_ram(info='Created all tables')\n\ndef insert_static_dimensions():\n # Inserta los valores a las tablas estáticas\n q.insert_dim_depe(st.data_depe)\n q.insert_dim_region(st.data_region)\n q.insert_dim_provincia(st.data_provincia)\n q.insert_dim_rural(st.data_rural_rbd)\n q.insert_dim_ense(st.data_ense)\n q.insert_dim_grado(st.data_grado)\n q.insert_dim_genero(st.data_genero)\n q.insert_dim_sit_fin(st.data_sit_fin)\n q.insert_dim_jornada(st.data_jornada)\n q.insert_dim_int_alu(st.data_int_alu)\n q.insert_dim_sec(st.data_sec)\n q.insert_dim_espe(st.data_espe)\n q.insert_dim_ense2(st.data_ense2)\n get_ram(info='Static tables inserted')\n\ndef df_to_html(dataframe, year, num_rows):\n # Nota: No se asegura que la cantidad de columnas a exportar sea mayor al dataframe\n # Esta función solo la utilizamos para testing\n header = dataframe.head(num_rows)\n header.to_html(\"datasets_headers_pdf/df_\"+year+\".html\")\n get_ram(info='HTML exported '+str(year))\n\ndef df_to_sql(table_name, engine, data, headers, remove_duplicates):\n # Sube los datos del dataframe a la base de datos\n get_ram(info='Creating dataframe \"'+table_name+'\"')\n df = pd.concat(data, axis=1, keys=headers)\n print(df.head())\n print(df.dtypes)\n get_ram(info='Dropping duplicates from \"'+table_name+'\"')\n df.drop_duplicates(subset=remove_duplicates, keep=\"first\", inplace=True)\n df.reset_index(drop=True, inplace=True)\n get_ram(info='Uploading to database table \"'+table_name+'\"...')\n df.to_sql(table_name,engine, method='multi', if_exists='append',index=False, chunksize=100000)\n get_ram(info='Data \"'+table_name+'\" inserted to database')\n\ndef copy_from_stringio(table_name, data, headers, remove_duplicates):\n get_ram(info='Creating dataframe \"'+table_name+'\"')\n df = pd.concat(data, axis=1, keys=headers)\n print(df.head())\n print(df.dtypes)\n get_ram(info='Dropping duplicates from \"'+table_name+'\"')\n df.drop_duplicates(subset=remove_duplicates, keep=\"first\", inplace=True)\n df.reset_index(drop=True, inplace=True)\n \n\n get_ram(info='Exporting csv \"'+table_name+'\"...')\n \n # Save the dataframe to disk\n tmp_df = \"./tmp_dataframe.csv\"\n df.to_csv(tmp_df, index_label='id', header=False)\n f = open(tmp_df, 'r')\n get_ram(info='Uploading to database table \"'+table_name+'\"...')\n try:\n cursor.copy_from(f, table_name, sep=\";\")\n connection.commit()\n except (Exception, ps.DatabaseError) as error:\n os.remove(tmp_df)\n print(\"Error: %s\" % error)\n connection.rollback()\n return 1\n print(\"copy_from_file() done\")\n os.remove(tmp_df)\n get_ram(info='Data \"'+table_name+'\" inserted to database')\n\n\n\n\n \n\ndef get_amount_of_csv():\n # Obtiene la cantidad de archivos .csv\n return len(fnmatch.filter(os.listdir('datasets/'), '*.csv'))\n\ndef get_time(start_time):\n end_time = time.time()\n difference = round(end_time-start_time,4)\n return print('Time: '+str(difference)+' seconds')\n\ndef insert_dim_comuna(list):\n get_ram(info='Uploading to database table \"comuna\"...')\n q.insert_dim_com(list)\n get_ram(info='Data \"comuna\" inserted to database')\n\ndef get_columns_to_drop():\n return ['FEC_ING_ALU',\n 'NOM_REG_RBD_A', \n 'COD_DEPROV_RBD', \n 'NOM_DEPROV_RBD', \n 'GD_ALU',\n 'COD_DEPE2',\n 'ESTADO_ESTAB', \n 'COD_TIP_CUR', \n 'COD_DES_CUR', \n 'COD_REG_ALU', \n 'COD_RAMA', \n 'COD_MEN', \n 'SIT_FIN_R',\n 'SIT_FINAL_R',\n 'EDAD_ALU']" ]
[ [ "pandas.concat" ] ]
jiapei100/DLTK
[ "50b31c9fd75679acc154d2ce384af2842303cc3c" ]
[ "dltk/io/augmentation.py" ]
[ "from __future__ import unicode_literals\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\n\nimport numpy as np\nfrom scipy.ndimage.interpolation import map_coordinates\nfrom scipy.ndimage.filters import gaussian_filter\n\n\ndef flip(imagelist, axis=1):\n \"\"\"Randomly flip spatial dimensions\n\n Args:\n imagelist (np.ndarray or list or tuple): image(s) to be flipped\n axis (int): axis along which to flip the images\n\n Returns:\n np.ndarray or list or tuple: same as imagelist but randomly flipped\n along axis\n \"\"\"\n\n # Check if a single image or a list of images has been passed\n was_singular = False\n if isinstance(imagelist, np.ndarray):\n imagelist = [imagelist]\n was_singular = True\n\n # With a probility of 0.5 flip the image(s) across `axis`\n do_flip = np.random.random(1)\n if do_flip > 0.5:\n for i in range(len(imagelist)):\n imagelist[i] = np.flip(imagelist[i], axis=axis)\n if was_singular:\n return imagelist[0]\n return imagelist\n\n\ndef add_gaussian_offset(image, sigma=0.1):\n \"\"\"\n Add Gaussian offset to an image. Adds the offset to each channel\n independently.\n\n Args:\n image (np.ndarray): image to add noise to\n sigma (float): stddev of the Gaussian distribution to generate noise\n from\n\n Returns:\n np.ndarray: same as image but with added offset to each channel\n \"\"\"\n\n offsets = np.random.normal(0, sigma, ([1] * (image.ndim - 1) + [image.shape[-1]]))\n image += offsets\n return image\n\n\ndef add_gaussian_noise(image, sigma=0.05):\n \"\"\"\n Add Gaussian noise to an image\n\n Args:\n image (np.ndarray): image to add noise to\n sigma (float): stddev of the Gaussian distribution to generate noise\n from\n\n Returns:\n np.ndarray: same as image but with added offset to each channel\n \"\"\"\n\n image += np.random.normal(0, sigma, image.shape)\n return image\n\n\ndef elastic_transform(image, alpha, sigma):\n \"\"\"\n Elastic deformation of images as described in [1].\n\n [1] Simard, Steinkraus and Platt, \"Best Practices for Convolutional\n Neural Networks applied to Visual Document Analysis\", in Proc. of the\n International Conference on Document Analysis and Recognition, 2003.\n\n Based on gist https://gist.github.com/erniejunior/601cdf56d2b424757de5\n\n Args:\n image (np.ndarray): image to be deformed\n alpha (list): scale of transformation for each dimension, where larger\n values have more deformation\n sigma (list): Gaussian window of deformation for each dimension, where\n smaller values have more localised deformation\n\n Returns:\n np.ndarray: deformed image\n \"\"\"\n\n assert len(alpha) == len(sigma), \\\n \"Dimensions of alpha and sigma are different\"\n\n channelbool = image.ndim - len(alpha)\n out = np.zeros((len(alpha) + channelbool, ) + image.shape)\n\n # Generate a Gaussian filter, leaving channel dimensions zeroes\n for jj in range(len(alpha)):\n array = (np.random.rand(*image.shape) * 2 - 1)\n out[jj] = gaussian_filter(array, sigma[jj],\n mode=\"constant\", cval=0) * alpha[jj]\n\n # Map mask to indices\n shapes = list(map(lambda x: slice(0, x, None), image.shape))\n grid = np.broadcast_arrays(*np.ogrid[shapes])\n indices = list(map((lambda x: np.reshape(x, (-1, 1))), grid + np.array(out)))\n\n # Transform image based on masked indices\n transformed_image = map_coordinates(image, indices, order=0,\n mode='reflect').reshape(image.shape)\n\n return transformed_image\n\n\ndef extract_class_balanced_example_array(image,\n label,\n example_size=[1, 64, 64],\n n_examples=1,\n classes=2,\n class_weights=None):\n \"\"\"Extract training examples from an image (and corresponding label) subject\n to class balancing. Returns an image example array and the\n corresponding label array.\n\n Args:\n image (np.ndarray): image to extract class-balanced patches from\n label (np.ndarray): labels to use for balancing the classes\n example_size (list or tuple): shape of the patches to extract\n n_examples (int): number of patches to extract in total\n classes (int or list or tuple): number of classes or list of classes\n to extract\n\n Returns:\n np.ndarray, np.ndarray: class-balanced patches extracted from full\n images with the shape [batch, example_size..., image_channels]\n \"\"\"\n assert image.shape[:-1] == label.shape, 'Image and label shape must match'\n assert image.ndim - 1 == len(example_size), \\\n 'Example size doesnt fit image size'\n assert all([i_s >= e_s for i_s, e_s in zip(image.shape, example_size)]), \\\n 'Image must be larger than example shape'\n rank = len(example_size)\n\n if isinstance(classes, int):\n classes = tuple(range(classes))\n n_classes = len(classes)\n\n assert n_examples >= n_classes, \\\n 'n_examples need to be greater than n_classes'\n\n if class_weights is None:\n n_ex_per_class = np.ones(n_classes).astype(int) * int(np.round(n_examples / n_classes))\n else:\n assert len(class_weights) == n_classes, \\\n 'Class_weights must match number of classes'\n class_weights = np.array(class_weights)\n n_ex_per_class = np.round((class_weights / class_weights.sum()) * n_examples).astype(int)\n\n # Compute an example radius to define the region to extract around a\n # center location\n ex_rad = np.array(list(zip(np.floor(np.array(example_size) / 2.0),\n np.ceil(np.array(example_size) / 2.0))),\n dtype=np.int)\n\n class_ex_images = []\n class_ex_lbls = []\n min_ratio = 1.\n for c_idx, c in enumerate(classes):\n # Get valid, random center locations belonging to that class\n idx = np.argwhere(label == c)\n\n ex_images = []\n ex_lbls = []\n\n if len(idx) == 0 or n_ex_per_class[c_idx] == 0:\n class_ex_images.append([])\n class_ex_lbls.append([])\n continue\n\n # Extract random locations\n r_idx_idx = np.random.choice(len(idx),\n size=min(n_ex_per_class[c_idx], len(idx)),\n replace=False).astype(int)\n r_idx = idx[r_idx_idx]\n\n # Shift the random to valid locations if necessary\n r_idx = np.array(\n [np.array([max(min(r[dim], image.shape[dim] - ex_rad[dim][1]),\n ex_rad[dim][0]) for dim in range(rank)])\n for r in r_idx])\n\n for i in range(len(r_idx)):\n # Extract class-balanced examples from the original image\n slicer = [slice(r_idx[i][dim] -\n ex_rad[dim][0], r_idx[i][dim] +\n ex_rad[dim][1]) for dim in range(rank)]\n\n ex_image = image[slicer][np.newaxis, :]\n\n ex_lbl = label[slicer][np.newaxis, :]\n\n # Concatenate them and return the examples\n ex_images = np.concatenate((ex_images, ex_image), axis=0) \\\n if (len(ex_images) != 0) else ex_image\n ex_lbls = np.concatenate((ex_lbls, ex_lbl), axis=0) \\\n if (len(ex_lbls) != 0) else ex_lbl\n\n class_ex_images.append(ex_images)\n class_ex_lbls.append(ex_lbls)\n\n ratio = n_ex_per_class[c_idx] / len(ex_images)\n min_ratio = ratio if ratio < min_ratio else min_ratio\n\n indices = np.floor(n_ex_per_class * min_ratio).astype(int)\n\n ex_images = np.concatenate([cimage[:idxs] for cimage, idxs in zip(class_ex_images, indices)\n if len(cimage) > 0], axis=0)\n ex_lbls = np.concatenate([clbl[:idxs] for clbl, idxs in zip(class_ex_lbls, indices)\n if len(clbl) > 0], axis=0)\n\n return ex_images, ex_lbls\n\n\ndef extract_random_example_array(image_list,\n example_size=[1, 64, 64],\n n_examples=1):\n \"\"\"Randomly extract training examples from image (and a corresponding label).\n Returns an image example array and the corresponding label array.\n\n Args:\n image_list (np.ndarray or list or tuple): image(s) to extract random\n patches from\n example_size (list or tuple): shape of the patches to extract\n n_examples (int): number of patches to extract in total\n\n Returns:\n np.ndarray, np.ndarray: class-balanced patches extracted from full\n images with the shape [batch, example_size..., image_channels]\n \"\"\"\n\n assert n_examples > 0\n\n was_singular = False\n if isinstance(image_list, np.ndarray):\n image_list = [image_list]\n was_singular = True\n\n assert all([i_s >= e_s for i_s, e_s in zip(image_list[0].shape, example_size)]), \\\n 'Image must be bigger than example shape'\n assert (image_list[0].ndim - 1 == len(example_size) or\n image_list[0].ndim == len(example_size)), \\\n 'Example size doesnt fit image size'\n\n for i in image_list:\n if len(image_list) > 1:\n assert (i.ndim - 1 == image_list[0].ndim or\n i.ndim == image_list[0].ndim or\n i.ndim + 1 == image_list[0].ndim), \\\n 'Example size doesnt fit image size'\n\n assert all([i0_s == i_s for i0_s, i_s in zip(image_list[0].shape, i.shape)]), \\\n 'Image shapes must match'\n\n rank = len(example_size)\n\n # Extract random examples from image and label\n valid_loc_range = [image_list[0].shape[i] - example_size[i] for i in range(rank)]\n\n rnd_loc = [np.random.randint(valid_loc_range[dim], size=n_examples)\n if valid_loc_range[dim] > 0\n else np.zeros(n_examples, dtype=int) for dim in range(rank)]\n\n examples = [[]] * len(image_list)\n for i in range(n_examples):\n slicer = [slice(rnd_loc[dim][i], rnd_loc[dim][i] + example_size[dim])\n for dim in range(rank)]\n\n for j in range(len(image_list)):\n ex_image = image_list[j][slicer][np.newaxis]\n # Concatenate and return the examples\n examples[j] = np.concatenate((examples[j], ex_image), axis=0) \\\n if (len(examples[j]) != 0) else ex_image\n\n if was_singular:\n return examples[0]\n return examples\n" ]
[ [ "numpy.random.random", "numpy.reshape", "numpy.argwhere", "scipy.ndimage.interpolation.map_coordinates", "numpy.round", "numpy.random.normal", "numpy.concatenate", "numpy.ones", "scipy.ndimage.filters.gaussian_filter", "numpy.floor", "numpy.broadcast_arrays", "numpy.random.rand", "numpy.array", "numpy.flip", "numpy.zeros", "numpy.random.randint" ] ]
Hiteshsaai/BikeShare_DataAnalysis
[ "544f5176835882138ca8ae7ec460a9d405d2e30f" ]
[ "Bike_Share_Analysis.py" ]
[ "\n# coding: utf-8\n\n# # 2016 US Bike Share Activity Snapshot\n# \n# ## Table of Contents\n# - [Introduction](#intro)\n# - [Posing Questions](#pose_questions)\n# - [Data Collection and Wrangling](#wrangling)\n# - [Condensing the Trip Data](#condensing)\n# - [Exploratory Data Analysis](#eda)\n# - [Statistics](#statistics)\n# - [Visualizations](#visualizations)\n# - [Performing Your Own Analysis](#eda_continued)\n# - [Conclusions](#conclusions)\n# \n# <a id='intro'></a>\n# ## Introduction\n# \n# > **Tip**: Quoted sections like this will provide helpful instructions on how to navigate and use a Jupyter notebook.\n# \n# Over the past decade, bicycle-sharing systems have been growing in number and popularity in cities across the world. Bicycle-sharing systems allow users to rent bicycles for short trips, typically 30 minutes or less. Thanks to the rise in information technologies, it is easy for a user of the system to access a dock within the system to unlock or return bicycles. These technologies also provide a wealth of data that can be used to explore how these bike-sharing systems are used.\n# \n# In this project, you will perform an exploratory analysis on data provided by [Motivate](https://www.motivateco.com/), a bike-share system provider for many major cities in the United States. You will compare the system usage between three large cities: New York City, Chicago, and Washington, DC. You will also see if there are any differences within each system for those users that are registered, regular users and those users that are short-term, casual users.\n\n# <a id='pose_questions'></a>\n# ## Posing Questions\n# \n# Before looking at the bike sharing data, you should start by asking questions you might want to understand about the bike share data. Consider, for example, if you were working for Motivate. What kinds of information would you want to know about in order to make smarter business decisions? If you were a user of the bike-share service, what factors might influence how you would want to use the service?\n# \n# **Question 1**: Write at least two questions related to bike sharing that you think could be answered by data.\n# \n# **Answer**: To inform business decisions I would consider:\n# - useage disribution as a function of:\n# - time of day\n# - day of year\n# - season\n# - weather patterns\n# - Customer demographics\n# - Customer segments\n# - Whether or not any service point is running out of bikes and when (time of day, day of year)\n# \n# The main general questions come down to the classic :\n# \n# - Who uses the bike share?\n# - How do they use it?\n# - Why do they use it?\n# - When do they use it?\n# - Where do they use it (where do they pick up and return)?\n# \n# Finally, some specific Questions like,\n# \n# - What is the most common day of useage for subsribers, and customers?\n# - What is the most common time of useage for subscribers and customers?\n# - What is the most common trip duration?\n# \n# > **Tip**: If you double click on this cell, you will see the text change so that all of the formatting is removed. This allows you to edit this block of text. This block of text is written using [Markdown](http://daringfireball.net/projects/markdown/syntax), which is a way to format text using headers, links, italics, and many other options using a plain-text syntax. You will also use Markdown later in the Nanodegree program. Use **Shift** + **Enter** or **Shift** + **Return** to run the cell and show its rendered form.\n\n# <a id='wrangling'></a>\n# ## Data Collection and Wrangling\n# \n# Now it's time to collect and explore our data. In this project, we will focus on the record of individual trips taken in 2016 from our selected cities: New York City, Chicago, and Washington, DC. Each of these cities has a page where we can freely download the trip data.:\n# \n# - New York City (Citi Bike): [Link](https://www.citibikenyc.com/system-data)\n# - Chicago (Divvy): [Link](https://www.divvybikes.com/system-data)\n# - Washington, DC (Capital Bikeshare): [Link](https://www.capitalbikeshare.com/system-data)\n# \n# If you visit these pages, you will notice that each city has a different way of delivering its data. Chicago updates with new data twice a year, Washington DC is quarterly, and New York City is monthly. **However, you do not need to download the data yourself.** The data has already been collected for you in the `/data/` folder of the project files. While the original data for 2016 is spread among multiple files for each city, the files in the `/data/` folder collect all of the trip data for the year into one file per city. Some data wrangling of inconsistencies in timestamp format within each city has already been performed for you. In addition, a random 2% sample of the original data is taken to make the exploration more manageable. \n# \n# **Question 2**: However, there is still a lot of data for us to investigate, so it's a good idea to start off by looking at one entry from each of the cities we're going to analyze. Run the first code cell below to load some packages and functions that you'll be using in your analysis. Then, complete the second code cell to print out the first trip recorded from each of the cities (the second line of each data file).\n# \n# > **Tip**: You can run a code cell like you formatted Markdown cells above by clicking on the cell and using the keyboard shortcut **Shift** + **Enter** or **Shift** + **Return**. Alternatively, a code cell can be executed using the **Play** button in the toolbar after selecting it. While the cell is running, you will see an asterisk in the message to the left of the cell, i.e. `In [*]:`. The asterisk will change into a number to show that execution has completed, e.g. `In [1]`. If there is output, it will show up as `Out [1]:`, with an appropriate number to match the \"In\" number.\n\n# In[23]:\n\n\n## import all necessary packages and functions.\nimport csv # read and write csv files\nfrom datetime import datetime # operations to parse dates\nfrom datetime import time \nfrom datetime import date\nimport pprint # use to print data structures like dictionaries in\n # a nicer way than the base print function.\n\n\n# In[24]:\n\n\ndef print_first_point(filename):\n \"\"\"\n This function prints and returns the first data point (second row) from\n a csv file that includes a header row.\n \"\"\"\n # print city name for reference\n city = filename.split('-')[0].split('/')[-1]\n print('\\nCity: {}'.format(city))\n \n with open(filename, 'r') as f_in:\n ## TODO: Use the csv library to set up a DictReader object. ##\n ## see https://docs.python.org/3/library/csv.html ##\n trip_reader = csv.DictReader(f_in)\n \n ## TODO: Use a function on the DictReader object to read the ##\n ## first trip from the data file and store it in a variable. ##\n ## see https://docs.python.org/3/library/csv.html#reader-objects##\n first_trip = trip_reader.__next__()\n \n ## TODO: Use the pprint library to print the first trip. ##\n ## see https://docs.python.org/3/library/pprint.html ##\n pp = pprint.PrettyPrinter(indent=4)\n pp.pprint(first_trip)\n # output city name and first trip for later testing\n return (city, first_trip)\n\n# list of files for each city\ndata_files = ['./data/NYC-CitiBike-2016.csv',\n './data/Chicago-Divvy-2016.csv',\n './data/Washington-CapitalBikeshare-2016.csv',]\n\n# print the first trip from each file, store in dictionary\nexample_trips = {}\nfor data_file in data_files:\n city, first_trip = print_first_point(data_file)\n example_trips[city] = first_trip\n\n\n# If everything has been filled out correctly, you should see below the printout of each city name (which has been parsed from the data file name) that the first trip has been parsed in the form of a dictionary. When you set up a `DictReader` object, the first row of the data file is normally interpreted as column names. Every other row in the data file will use those column names as keys, as a dictionary is generated for each row.\n# \n# This will be useful since we can refer to quantities by an easily-understandable label instead of just a numeric index. For example, if we have a trip stored in the variable `row`, then we would rather get the trip duration from `row['duration']` instead of `row[0]`.\n# \n# <a id='condensing'></a>\n# ### Condensing the Trip Data\n# \n# It should also be observable from the above printout that each city provides different information. Even where the information is the same, the column names and formats are sometimes different. To make things as simple as possible when we get to the actual exploration, we should trim and clean the data. Cleaning the data makes sure that the data formats across the cities are consistent, while trimming focuses only on the parts of the data we are most interested in to make the exploration easier to work with.\n# \n# You will generate new data files with five values of interest for each trip: trip duration, starting month, starting hour, day of the week, and user type. Each of these may require additional wrangling depending on the city:\n# \n# - **Duration**: This has been given to us in seconds (New York, Chicago) or milliseconds (Washington). A more natural unit of analysis will be if all the trip durations are given in terms of minutes.\n# - **Month**, **Hour**, **Day of Week**: Ridership volume is likely to change based on the season, time of day, and whether it is a weekday or weekend. Use the start time of the trip to obtain these values. The New York City data includes the seconds in their timestamps, while Washington and Chicago do not. The [`datetime`](https://docs.python.org/3/library/datetime.html) package will be very useful here to make the needed conversions.\n# - **User Type**: It is possible that users who are subscribed to a bike-share system will have different patterns of use compared to users who only have temporary passes. Washington divides its users into two types: 'Registered' for users with annual, monthly, and other longer-term subscriptions, and 'Casual', for users with 24-hour, 3-day, and other short-term passes. The New York and Chicago data uses 'Subscriber' and 'Customer' for these groups, respectively. For consistency, you will convert the Washington labels to match the other two.\n# \n# \n# **Question 3a**: Complete the helper functions in the code cells below to address each of the cleaning tasks described above.\n\n# In[25]:\n\n\ndef duration_in_mins(datum, city):\n \"\"\"\n Takes as input a dictionary containing info about a single trip (datum) and\n its origin city (city) and returns the trip duration in units of minutes.\n \n Remember that Washington is in terms of milliseconds while Chicago and NYC\n are in terms of seconds. \n \n HINT: The csv module reads in all of the data as strings, including numeric\n values. You will need a function to convert the strings into an appropriate\n numeric type when making your transformations.\n see https://docs.python.org/3/library/functions.html\n \"\"\"\n \n # YOUR CODE HERE\n if city == 'NYC' or city == 'Chicago':\n duration = int(datum['tripduration'])/60\n elif city == 'BayArea':\n duration = float(datum['duration'])\n else:\n duration = int(datum['Duration (ms)'])/60000\n return duration\n\n\n# Some tests to check that your code works. There should be no output if all of\n# the assertions pass. The `example_trips` dictionary was obtained from when\n# you printed the first trip from each of the original data files.\ntests = {'NYC': 13.9833,\n 'Chicago': 15.4333,\n 'Washington': 7.1231}\n\nfor city in tests:\n assert abs(duration_in_mins(example_trips[city], city) - tests[city]) < .001\n\n\n# In[26]:\n\n\ndef time_of_trip(datum, city):\n \"\"\"\n Takes as input a dictionary containing info about a single trip (datum) and\n its origin city (city) and returns the month, hour, and day of the week in\n which the trip was made.\n \n Remember that NYC includes seconds, while Washington and Chicago do not.\n \n HINT: You should use the datetime module to parse the original date\n strings into a format that is useful for extracting the desired information.\n see https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior\n \"\"\"\n \n # YOUR CODE HERE\n days_dict = {0: 'Monday', 1: 'Tuesday', 2: 'Wednesday', 3: 'Thursday', 4: 'Friday', 5: 'Saturday', 6: 'Sunday'}\n if city == 'NYC': \n trip_date = datetime.strptime(datum['starttime'],'%m/%d/%Y %H:%M:%S')\n month = int(trip_date.strftime('%m')[-1])\n hour = int(trip_date.strftime('%H')[-1])\n days_of_week = days_dict[datetime.weekday(datetime.date(trip_date))]\n elif city == 'Chicago':\n trip_date = datetime.strptime(datum['starttime'],'%m/%d/%Y %H:%M')\n month = int(trip_date.strftime('%m')[-1])\n hour = int(trip_date.strftime('%H'))\n days_of_week = days_dict[datetime.weekday(datetime.date(trip_date))]\n elif city == 'Washington':\n trip_date = datetime.strptime(datum['Start date'],'%m/%d/%Y %H:%M')\n month = int(trip_date.strftime('%m')[-1])\n hour = int(trip_date.strftime('%H'))\n days_of_week = days_dict[datetime.weekday(datetime.date(trip_date))]\n return ( month, hour, days_of_week )\n\n\n# Some tests to check that your code works. There should be no output if all of\n# the assertions pass. The `example_trips` dictionary was obtained from when\n# you printed the first trip from each of the original data files.\n#'NYC': (1, 0, 'Friday'),\ntests = {'NYC': (1, 0, 'Friday'),\n 'Chicago': (3, 23, 'Thursday'),\n 'Washington': (3, 22, 'Thursday')}\n\nfor city in tests:\n assert time_of_trip(example_trips[city], city) == tests[city]\n\n\n# In[27]:\n\n\ndef type_of_user(datum, city):\n \"\"\"\n Takes as input a dictionary containing info about a single trip (datum) and\n its origin city (city) and returns the type of system user that made the\n trip.\n \n Remember that Washington has different category names compared to Chicago\n and NYC. \n \"\"\"\n \n # YOUR CODE HERE\n if city == 'NYC'or city == 'Chicago':\n user_type = datum['usertype']\n elif city == 'BayArea':\n user_type = datum['user_type']\n else: \n user_type = datum['Member Type']\n if user_type == 'Registered':\n user_type = 'Subscriber'\n else:\n user_type = 'Customer'\n return user_type\n\n\n# Some tests to check that your code works. There should be no output if all of\n# the assertions pass. The `example_trips` dictionary was obtained from when\n# you printed the first trip from each of the original data files.\ntests = {'NYC': 'Customer',\n 'Chicago': 'Subscriber',\n 'Washington': 'Subscriber'}\n\nfor city in tests:\n assert type_of_user(example_trips[city], city) == tests[city]\n\n\n# **Question 3b**: Now, use the helper functions you wrote above to create a condensed data file for each city consisting only of the data fields indicated above. In the `/examples/` folder, you will see an example datafile from the [Bay Area Bike Share](http://www.bayareabikeshare.com/open-data) before and after conversion. Make sure that your output is formatted to be consistent with the example file.\n\n# In[28]:\n\n\ndef condense_data(in_file, out_file, city):\n \"\"\"\n This function takes full data from the specified input file\n and writes the condensed data to a specified output file. The city\n argument determines how the input file will be parsed.\n \n HINT: See the cell below to see how the arguments are structured!\n \"\"\"\n \n with open(out_file, 'w') as f_out, open(in_file, 'r') as f_in:\n # set up csv DictWriter object - writer requires column names for the\n # first row as the \"fieldnames\" argument\n out_colnames = ['duration', 'month', 'hour', 'day_of_week', 'user_type'] \n trip_writer = csv.DictWriter(f_out, fieldnames = out_colnames)\n print (trip_writer)\n trip_writer.writeheader()\n \n ## TODO: set up csv DictReader object ##\n trip_reader = csv.DictReader(f_in)\n\n # collect data from and process each row\n for row in trip_reader:\n # set up a dictionary to hold the values for the cleaned and trimmed\n # data point\n new_point = {}\n\n ## TODO: use the helper functions to get the cleaned data from ##\n ## the original data dictionaries. ##\n ## Note that the keys for the new_point dictionary should match ##\n ## the column names set in the DictWriter object above. ##\n new_point['duration'] = duration_in_mins(row, city)\n new_point['month'] = time_of_trip(row,city)[0]\n new_point['hour'] = time_of_trip(row,city)[1]\n new_point['day_of_week'] = time_of_trip(row,city)[2]\n new_point['user_type'] = type_of_user(row, city)\n \n \n ## TODO: write the processed information to the output file. ##\n ## see https://docs.python.org/3/library/csv.html#writer-objects ##\n trip_writer.writerow(new_point)\n\n\n# In[29]:\n\n\n# Run this cell to check your work\ncity_info = {'Washington': {'in_file': './data/Washington-CapitalBikeshare-2016.csv',\n 'out_file': './data/Washington-2016-Summary.csv'},\n 'Chicago': {'in_file': './data/Chicago-Divvy-2016.csv',\n 'out_file': './data/Chicago-2016-Summary.csv'},\n 'NYC': {'in_file': './data/NYC-CitiBike-2016.csv',\n 'out_file': './data/NYC-2016-Summary.csv'}}\n\nfor city, filenames in city_info.items():\n condense_data(filenames['in_file'], filenames['out_file'], city)\n print_first_point(filenames['out_file'])\n\n\n# > **Tip**: If you save a jupyter Notebook, the output from running code blocks will also be saved. However, the state of your workspace will be reset once a new session is started. Make sure that you run all of the necessary code blocks from your previous session to reestablish variables and functions before picking up where you last left off.\n# \n# <a id='eda'></a>\n# ## Exploratory Data Analysis\n# \n# Now that you have the data collected and wrangled, you're ready to start exploring the data. In this section you will write some code to compute descriptive statistics from the data. You will also be introduced to the `matplotlib` library to create some basic histograms of the data.\n# \n# <a id='statistics'></a>\n# ### Statistics\n# \n# First, let's compute some basic counts. The first cell below contains a function that uses the csv module to iterate through a provided data file, returning the number of trips made by subscribers and customers. The second cell runs this function on the example Bay Area data in the `/examples/` folder. Modify the cells to answer the question below.\n# \n# **Question 4a**: Which city has the highest number of trips? Which city has the highest proportion of trips made by subscribers? Which city has the highest proportion of trips made by short-term customers?\n# \n# **Answer**: NYC has the Highest Number Of **Trips**, Highest Number Of **Subscribers** and Highest Number of **Short-term Customers**.\n\n# In[30]:\n\n\ndef number_of_trips(filename):\n \"\"\"\n This function reads in a file with trip data and reports the number of\n trips made by subscribers, customers, and total overall.\n \"\"\"\n city = filename.split('-')[0].split('/')[-1]\n with open(filename, 'r') as f_in:\n # set up csv reader object\n reader = csv.DictReader(f_in)\n \n # initialize count variables\n n_subscribers = 0\n n_customers = 0\n \n # tally up ride types\n for row in reader:\n if city == 'NYC' or city == 'Chicago':\n if row['usertype'] == 'Subscriber':\n n_subscribers += 1\n else:\n n_customers += 1\n else:\n if row['Member Type'] == 'Registered':\n n_subscribers += 1\n else:\n n_customers += 1\n # compute total number of rides\n n_trips = n_subscribers + n_customers \n # return tallies as a tuple\n return city, n_trips, n_subscribers, n_customers\n\n\n# In[31]:\n\n\n## Modify this and the previous cell to answer Question 4a. Remember to run ##\n## the function on the cleaned data files you created from Question 3. ##\n\ndata_file = ['./data/NYC-CitiBike-2016.csv',\n './data/Chicago-Divvy-2016.csv',\n './data/Washington-CapitalBikeshare-2016.csv']\noutput =[]\nfor file in data_file:\n data = number_of_trips(file)\n output.append(data)\nfor item in output:\n print (item[0],\":\",item[1],\"=>'TotalTrips' \",item[2],\"=>'TotalSubscriber' \",item[3],\"=>'TotalCustomer'\")\n\n\n# > **Tip**: In order to add additional cells to a notebook, you can use the \"Insert Cell Above\" and \"Insert Cell Below\" options from the menu bar above. There is also an icon in the toolbar for adding new cells, with additional icons for moving the cells up and down the document. By default, new cells are of the code type; you can also specify the cell type (e.g. Code or Markdown) of selected cells from the Cell menu or the dropdown in the toolbar.\n# \n# Now, you will write your own code to continue investigating properties of the data.\n# \n# **Question 4b**: Bike-share systems are designed for riders to take short trips. Most of the time, users are allowed to take trips of 30 minutes or less with no additional charges, with overage charges made for trips of longer than that duration. What is the average trip length for each city? What proportion of rides made in each city are longer than 30 minutes?\n# \n# **Answer**: \n# - **NYC** : The average Trip Length = 15.81, Propotion of Trips Longer than 30 Minutes = 7.30%\n# - **Chicago**: The average Trip Length = 16.56, Propotion of Trips Longer than 30 Minutes = 8.33%\n# - **Washington**: The average Trip Length = 18.93, Propotion of Trips Longer than 30 Minutes = 10.83%\n\n# In[32]:\n\n\n## Use this and additional cells to answer Question 4b. ##\n## ##\n## HINT: The csv module reads in all of the data as strings, including ##\n## numeric values. You will need a function to convert the strings ##\n## into an appropriate numeric type before you aggregate data. ##\n## TIP: For the Bay Area example, the average trip length is 14 minutes ##\n## and 3.5% of trips are longer than 30 minutes. ##\ndef trip_avg(filename):\n city = filename.split('-')[0].split('/')[-1]\n with open(filename,'r') as f_in:\n trip = csv.DictReader(f_in)\n trips = 0\n trip_time = 0\n trip_exceed = 0\n trip_duration = 0\n \n for row in trip:\n trips += 1\n trip_time = duration_in_mins(row, city)\n if trip_time > 30:\n trip_exceed += 1\n else:\n None\n trip_duration += trip_time\n trip_exceed_percent = (float(trip_exceed/trips))*100 \n trip_avg = float(trip_duration/trips)\n return (city,trip_avg,trip_exceed_percent)\n\n\n# In[34]:\n\n\ndata_file = [ './data/NYC-CitiBike-2016.csv',\n './data/Chicago-Divvy-2016.csv',\n './data/Washington-CapitalBikeshare-2016.csv']\nfor file in data_file:\n print (trip_avg(file))\n\n\n# **Question 4c**: Dig deeper into the question of trip duration based on ridership. Choose one city. Within that city, which type of user takes longer rides on average: Subscribers or Customers?\n# \n# **Answer**: Choosing **NYC** as the city, from that the Subscriber is taking longer ride on average when compared to the customer average duration.\n\n# In[35]:\n\n\n## Use this and additional cells to answer Question 4c. If you have ##\n## not done so yet, consider revising some of your previous code to ##\n## make use of functions for reusability. ##\n## ##\n## TIP: For the Bay Area example data, you should find the average ##\n## Subscriber trip duration to be 9.5 minutes and the average Customer ##\n## trip duration to be 54.6 minutes. Do the other cities have this ##\n## level of difference? ##\ndef avg_user_type(filename):\n city = filename.split('-')[0].split('/')[-1] \n with open(filename,'r') as f_in:\n data = csv.DictReader(f_in)\n trips = 0 \n subscriber = 0\n customer = 0\n for row in data:\n trips += 1\n if type_of_user(row,city) == 'Subscriber':\n subscriber += duration_in_mins(row, city)\n else: \n customer += duration_in_mins(row, city)\n subscriber_avg = float(subscriber/trips)\n customer_avg = float(customer/trips)\n return (city,subscriber_avg,customer_avg)\n\n\n# In[36]:\n\n\ndata_file = ['./data/NYC-CitiBike-2016.csv',\n './data/Chicago-Divvy-2016.csv',\n './data/Washington-CapitalBikeshare-2016.csv']\nfor file in data_file:\n print(avg_user_type(file))\n\n\n# <a id='visualizations'></a>\n# ### Visualizations\n# \n# The last set of values that you computed should have pulled up an interesting result. While the mean trip time for Subscribers is well under 30 minutes, the mean trip time for Customers is actually _above_ 30 minutes! It will be interesting for us to look at how the trip times are distributed. In order to do this, a new library will be introduced here, `matplotlib`. Run the cell below to load the library and to generate an example plot.\n\n# In[37]:\n\n\n# load library\nimport matplotlib.pyplot as plt\n\n# this is a 'magic word' that allows for plots to be displayed\n# inline with the notebook. If you want to know more, see:\n# http://ipython.readthedocs.io/en/stable/interactive/magics.html\nget_ipython().magic('matplotlib inline')\n\n# example histogram, data taken from bay area sample\ndata = [ 7.65, 8.92, 7.42, 5.50, 16.17, 4.20, 8.98, 9.62, 11.48, 14.33,\n 19.02, 21.53, 3.90, 7.97, 2.62, 2.67, 3.08, 14.40, 12.90, 7.83,\n 25.12, 8.30, 4.93, 12.43, 10.60, 6.17, 10.88, 4.78, 15.15, 3.53,\n 9.43, 13.32, 11.72, 9.85, 5.22, 15.10, 3.95, 3.17, 8.78, 1.88,\n 4.55, 12.68, 12.38, 9.78, 7.63, 6.45, 17.38, 11.90, 11.52, 8.63,]\nplt.hist(data)\nplt.title('Distribution of Trip Durations')\nplt.xlabel('Duration (m)')\nplt.show()\n\n\n# In the above cell, we collected fifty trip times in a list, and passed this list as the first argument to the `.hist()` function. This function performs the computations and creates plotting objects for generating a histogram, but the plot is actually not rendered until the `.show()` function is executed. The `.title()` and `.xlabel()` functions provide some labeling for plot context.\n# \n# You will now use these functions to create a histogram of the trip times for the city you selected in question 4c. Don't separate the Subscribers and Customers for now: just collect all of the trip times and plot them.\n\n# In[38]:\n\n\n## Use this and additional cells to collect all of the trip times as a list ##\n## and then use pyplot functions to generate a histogram of trip times. ##\ndef trip_time(filename):\n city = filename.split('-')[0].split('/')[-1]\n with open(filename,'r') as f_in:\n reader = csv.DictReader(f_in)\n data = []\n for row in reader:\n duration_data = duration_in_mins(row,city)\n data.append(duration_data)\n return data\nfile = './data/NYC-CitiBike-2016.csv'\nduration_plot = trip_time(file)\nplt.hist(duration_plot,bins=30)\nplt.xlim(0,3000)\nplt.title('Trip Duration Of NYC')\nplt.xlabel('Duration (m)')\nplt.show()\n\n\n# If you followed the use of the `.hist()` and `.show()` functions exactly like in the example, you're probably looking at a plot that's completely unexpected. The plot consists of one extremely tall bar on the left, maybe a very short second bar, and a whole lot of empty space in the center and right. Take a look at the duration values on the x-axis. This suggests that there are some highly infrequent outliers in the data. Instead of reprocessing the data, you will use additional parameters with the `.hist()` function to limit the range of data that is plotted. Documentation for the function can be found [[here]](https://matplotlib.org/devdocs/api/_as_gen/matplotlib.pyplot.hist.html#matplotlib.pyplot.hist).\n# \n# **Question 5**: Use the parameters of the `.hist()` function to plot the distribution of trip times for the Subscribers in your selected city. Do the same thing for only the Customers. Add limits to the plots so that only trips of duration less than 75 minutes are plotted. As a bonus, set the plots up so that bars are in five-minute wide intervals. For each group, where is the peak of each distribution? How would you describe the shape of each distribution?\n# \n# **Answer**: \n# \n# **SUBSCRIBER:** The Distribution has a peak from 1 to 10 minutes of duration for nearly 10000 data's and to describe the Shape of Distribution, From the peak on the left side of graph gradually getting decreased to the right side of the Graph.\n# \n# **CUSTOMER:** The Distrbution has a peak some where from 15 to 24 minuntes of duration for nearly 8000 data's\n# and the Shape of Distribution, it has a moderate increase from the lift side of the graph then to the peak and gradually it gets decreased to the right side of the graph.\n\n# In[39]:\n\n\n## Use this and additional cells to answer Question 5. ##\n\ndef trip_time(filename): \n city = filename.split('-')[0].split('/')[-1]\n with open(filename,'r') as f_in:\n reader = csv.DictReader(f_in)\n subscriber = []\n customer = []\n for row in reader:\n if type_of_user(row, city) == 'Subscriber':\n duration_data = duration_in_mins(row,city)\n if duration_data < 75: \n subscriber.append(duration_data)\n else:\n None\n else:\n duration_data = duration_in_mins(row,city)\n if duration_data < 75: \n customer.append(duration_data)\n else:\n None\n return (subscriber,customer)\n\n\n\n\n# In[40]:\n\n\nfile = './data/NYC-CitiBike-2016.csv'\nsubscriber,customer = trip_time(file)\nplt.hist(subscriber,bins=10)\nplt.title('Trip Duration Of NYC Subscriber')\nplt.xlabel('Duration (m)')\nplt.show()\nplt.hist(customer,bins=10)\nplt.title('Trip Duration Of NYC Customer')\nplt.xlabel('Duration (m)')\nplt.show()\n\n\n# <a id='eda_continued'></a>\n# ## Performing Your Own Analysis\n# \n# So far, you've performed an initial exploration into the data available. You have compared the relative volume of trips made between three U.S. cities and the ratio of trips made by Subscribers and Customers. For one of these cities, you have investigated differences between Subscribers and Customers in terms of how long a typical trip lasts. Now it is your turn to continue the exploration in a direction that you choose. Here are a few suggestions for questions to explore:\n# \n# - How does ridership differ by month or season? Which month / season has the highest ridership? Does the ratio of Subscriber trips to Customer trips change depending on the month or season?\n# - Is the pattern of ridership different on the weekends versus weekdays? On what days are Subscribers most likely to use the system? What about Customers? Does the average duration of rides change depending on the day of the week?\n# - During what time of day is the system used the most? Is there a difference in usage patterns for Subscribers and Customers?\n# \n# If any of the questions you posed in your answer to question 1 align with the bullet points above, this is a good opportunity to investigate one of them. As part of your investigation, you will need to create a visualization. If you want to create something other than a histogram, then you might want to consult the [Pyplot documentation](https://matplotlib.org/devdocs/api/pyplot_summary.html). In particular, if you are plotting values across a categorical variable (e.g. city, user type), a bar chart will be useful. The [documentation page for `.bar()`](https://matplotlib.org/devdocs/api/_as_gen/matplotlib.pyplot.bar.html#matplotlib.pyplot.bar) includes links at the bottom of the page with examples for you to build off of for your own use.\n# \n# **Question 6**: Continue the investigation by exploring another question that could be answered by the data available. Document the question you want to explore below. Your investigation should involve at least two variables and should compare at least two groups. You should also use at least one visualization as part of your explorations.\n# \n# **Answer**: \n# ***QUESTION:*** Is the pattern of ridership different on the weekends versus weekdays? On what days are Subscribers most likely to use the system? What about Customers? Does the average duration of rides change depending on the day of the week?\n# \n# ***ANSWER ANALYSIS*** I have a made a analysis for the weekdays and weekends of each type of user says Subscirbers and the Customers, Answering to the **1st** part of the question, the ridership pattern differs based on the weekdays and weekends **2nd** part, Subscriber type user are more on weekdays than on weekends and in Customer type user also Weekday riders are more in count comapred to the weekend riders and the **3rd** part of the question, yes do average duration of rides changes depending on the day of the week.\n# ***On Conclusion*** anaysis of this visual anaysis the weekday riders are more in number, when compared to the weekend users, based on this more bikes and maintainence has to be made on the weekdays to give a better servies and improve the Business on a effective way.\n\n# In[41]:\n\n\n## Use this and additional cells to continue to explore the dataset. ##\n## Once you have performed your exploration, document your findings ##\nimport numpy as np\n## in the Markdown cell above.##\ndef type_user_analysis(filename):\n city = filename.split('-')[0].split('/')[-1]\n with open(filename,'r') as f_in:\n reader = csv.DictReader(f_in)\n sub_week_day = []\n sub_weekend_days = []\n cust_week_day = []\n cust_weekend_days = []\n trips = 0\n for rows in reader:\n trips += 1\n if type_of_user(rows, city) == 'Subscriber':\n week_days = time_of_trip(rows, city)[2]\n if week_days =='Saturday' or week_days == 'Sunday':\n sub_weekend_days.append(week_days)\n else:\n sub_week_day.append(week_days)\n else: \n week_days = time_of_trip(rows, city)[2]\n if week_days == 'Saturday' or week_days == 'Sunday':\n cust_weekend_days.append(week_days)\n else:\n cust_week_day.append(week_days)\n \n return (sub_week_day,sub_weekend_days,cust_week_day,cust_weekend_days)\n \n\n\n# In[42]:\n\n\ndef dayofweek_avg(filename):\n city = filename.split('-')[0].split('/')[-1]\n with open(filename,'r') as f_in:\n trip = csv.DictReader(f_in)\n trips = 0\n trip_weekday= 0\n trip_weekend = 0\n \n for row in trip:\n trips += 1\n if time_of_trip(row, city)[2] == 'Saturday' or time_of_trip(row, city)[2] == 'Sunday':\n trip_weekend += duration_in_mins(row, city)\n else:\n trip_weekday += duration_in_mins(row, city)\n \n weekday_avg = float(trip_weekday/trips)\n weekend_avg = float(trip_weekend/trips)\n return (weekday_avg,weekend_avg)\n\n\n# In[43]:\n\n\nfile = './data/NYC-CitiBike-2016.csv'\nsub_weekdays , sub_weekends, cust_weekdays, cust_weekends = type_user_analysis(file)\nweekday_avg , weekend_avg = dayofweek_avg(file)\nsubscriber_weekdays = len(sub_weekdays)\nsubscriber_weekends = len(sub_weekends)\ncustomer_weekdays = len(cust_weekdays)\ncustomer_weekends = len(cust_weekends)\n\nsub_object = ('weekdays','weekends')\nsubscriber = [subscriber_weekdays,subscriber_weekends]\ny_pos1 = np.arange(len(sub_object))\n\nplt.bar(y_pos1,subscriber)\nplt.title(\"Subscriber Usage\")\nplt.xticks(y_pos1,sub_object)\nplt.ylabel('Number of Trips')\nplt.show()\n\ncust_object = ('weekdays','weekends')\ncustomer = [customer_weekdays,customer_weekends]\ny_pos2 = np.arange(len(cust_object))\n\nplt.bar(y_pos2,customer)\nplt.title(\"Customer Usage\")\nplt.xticks(y_pos2,cust_object)\nplt.ylabel('Number of Trips')\nplt.show()\n\ntrip_avg = ('weekdays','weekend')\navg = [weekday_avg, weekend_avg]\ny_pos3= np.arange(len(trip_avg))\n\nplt.bar(y_pos3,avg)\nplt.title(\"Average Usage\")\nplt.xticks(y_pos3,trip_avg)\nplt.ylabel('Number of Trips')\nplt.show()\n\n\n# <a id='conclusions'></a>\n# ## Conclusions\n# \n# Congratulations on completing the project! This is only a sampling of the data analysis process: from generating questions, wrangling the data, and to exploring the data. Normally, at this point in the data analysis process, you might want to draw conclusions about the data by performing a statistical test or fitting the data to a model for making predictions. There are also a lot of potential analyses that could be performed on the data which are not possible with only the data provided. For example, detailed location data has not been investigated. Where are the most commonly used docks? What are the most common routes? As another example, weather has potential to have a large impact on daily ridership. How much is ridership impacted when there is rain or snow? Are subscribers or customers affected more by changes in weather?\n# \n# **Question 7**: Putting the bike share data aside, think of a topic or field of interest where you would like to be able to apply the techniques of data science. What would you like to be able to learn from your chosen subject?\n# \n# **Answer**: I would like to use this techniques for product based companies and come out with the analysis of products which are most being attracted to the customers and the marketing statergies analysis to those product which would help in using those statergies to make other product reach out to the customers in a similar way like a recommender system to make a much Effective Business Development.\n# \n# > **Tip**: If we want to share the results of our analysis with others, we aren't limited to giving them a copy of the jupyter Notebook (.ipynb) file. We can also export the Notebook output in a form that can be opened even for those without Python installed. From the **File** menu in the upper left, go to the **Download as** submenu. You can then choose a different format that can be viewed more generally, such as HTML (.html) or\n# PDF (.pdf). You may need additional packages or software to perform these exports.\n\n# Documents & Websites Used: \n# - [Python functions](https://docs.python.org/3/library/functions.html)\n# - [Bulid-in Constands](https://docs.python.org/3/library/constants.html)\n# - [Stack OverFlow](https://stackoverflow.com/)\n# - [Tutorial Point](https://www.tutorialspoint.com)\n# - [Matplotlib Bar chart](https://plot.ly/matplotlib/bar-charts/)\n# - [Python Standard Library](https://docs.python.org/2/library/index.html)\n# \n\n# In[ ]:\n\n\n\n\n" ]
[ [ "matplotlib.pyplot.title", "matplotlib.pyplot.xlim", "matplotlib.pyplot.bar", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.xticks", "matplotlib.pyplot.show", "matplotlib.pyplot.hist", "matplotlib.pyplot.ylabel" ] ]
ChengIC/text_classifier
[ "e7265deebb30a3bab1c3e8a4a0a829f659b32e1c" ]
[ "train.py" ]
[ "\n\nimport pandas as pd\nfrom sklearn.feature_extraction.text import CountVectorizer\nimport pickle\n\ndf= pd.read_csv(\"spam.csv\", encoding=\"latin-1\")\n# print (df.head())\ndf.drop(['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], axis=1, inplace=True)\n# Features and Labels\ndf['label'] = df['v1'].map({'ham': 0, 'spam': 1})\nX = df['v2']\nprint (X)\ny = df['label']\n# Extract Feature With CountVectorizer\ncv = CountVectorizer()\nX = cv.fit_transform(X) # Fit the Data\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)\n#Naive Bayes Classifier\nfrom sklearn.naive_bayes import MultinomialNB\n\nclf = MultinomialNB()\nclf.fit(X_train,y_train)\nclf.score(X_test,y_test)\n\n# Save the vectorizer\nvec_file = 'vectorizer.pickle'\npickle.dump(cv, open(vec_file, 'wb'))\n# Save the model\nmod_file = 'classification.model'\npickle.dump(clf, open(mod_file, 'wb'))\n\n\n# data = ['lala']\n# vect = cv.transform(data).toarray()\n# my_prediction = clf.predict(vect)\n# print (my_prediction)" ]
[ [ "sklearn.feature_extraction.text.CountVectorizer", "sklearn.naive_bayes.MultinomialNB", "pandas.read_csv", "sklearn.model_selection.train_test_split" ] ]
GitHubEmploy/tsad
[ "63181f40209beb3f943b4a0905513455481f71f4" ]
[ "anomaly_detection/prophet_model.py" ]
[ "from pandas import DataFrame, Series\nfrom fbprophet import Prophet\nimport random\nimport numpy as np\nfrom itertools import product\nimport pandas as pd\nimport threading\nfrom multiprocessing import cpu_count\n\nfrom functions import *\nfrom utils import *\nfrom logger import LoggerProcess\n\n\ndef get_anomaly(fact, yhat_upper, yhat_lower):\n ad = Series([0, 0])\n if fact > yhat_upper:\n ad = Series([1, abs((fact - yhat_upper) / fact)])\n if fact < yhat_lower:\n ad = Series([1, abs((yhat_lower - fact)/ fact)])\n return ad\n\n\ndef get_anomaly_score(anomaly, fact, yhat_upper, yhat_lower):\n if anomaly == 1:\n return abs((fact - yhat_upper) / fact)\n if anomaly == -1:\n return abs((yhat_lower - fact)/ fact)\n\n\ndef get_tuning_params(parameter_tuning, params, job):\n arrays = []\n for p in params:\n if p not in list(parameter_tuning.keys()):\n arrays.append([params[p]])\n else:\n arrays.append(\n np.arange(float(parameter_tuning[p].split(\"*\")[0]),\n float(parameter_tuning[p].split(\"*\")[1]),\n float(parameter_tuning[p].split(\"*\")[0])).tolist()\n )\n comb_arrays = list(product(*arrays))\n if job != 'parameter_tuning':\n return random.sample(comb_arrays, int(len(comb_arrays)*0.5))\n else:\n return comb_arrays\n\n\ndef get_params(params, comb):\n count = 0\n for p in params:\n _p = type(params[p])(comb[count])\n params[p] = _p\n count += 1\n return params\n\n\ndef mean_absolute_percentage_error(y_true, y_pred):\n y_true, y_pred = np.array(y_true), np.array(y_pred)\n return np.mean(np.abs((y_true - y_pred) / y_true)) * 100\n\n\nclass TrainProphet:\n def __init__(self,\n job=None, groups=None, time_indicator=None, feature=None,\n data_source=None, data_query_path=None, time_period=None):\n self.job = job\n self.params = hyper_conf('prophet')\n self.combination_params = hyper_conf('prophet_cp')\n self.hyper_params = hyper_conf('prophet_pt')\n self.optimized_parameters = {}\n self._p = None\n self.levels_tuning = get_tuning_params(self.hyper_params, self.params, self.job)\n self.query_date = get_query_date(job, period=time_period, dates=None, params=self.params)\n self.data, self.groups = data_manipulation(job=job,\n date=self.query_date,\n time_indicator=time_indicator,\n feature=feature,\n data_source=data_source,\n groups=groups,\n data_query_path=data_query_path)\n self.date = time_indicator\n self.f_w_data = self.data\n self.split_date = get_split_date(period=time_period, dates=list(self.data[self.date]), params=self.params)\n self.feature = feature\n self.anomaly = []\n self.model = None\n self.count = 1\n self.levels = get_levels(self.data, self.groups)\n self.logger = LoggerProcess(job=job,\n model='prophet',\n total_process=len(self.levels)\n if job != 'parameter_tuning' else len(self.levels_tuning))\n self.comb = None\n self.prediction = None\n\n def get_query(self):\n count = 0\n query = ''\n for c in self.comb:\n if type(c) != str:\n query += self.groups[count] + ' == ' + str(c) + ' and '\n else:\n query += self.groups[count] + \" == '\" + str(c) + \"' and \"\n count += 1\n query = query[:-4]\n return query\n\n def get_related_params(self):\n self._p = self.params if self.combination_params is None else self.combination_params[self.get_param_key()]\n\n def convert_date_feature_column_for_prophet(self):\n renaming = {self.date: 'ds', self.feature: 'y'}\n self.f_w_data = self.f_w_data.rename(columns=renaming)\n self.f_w_data['ds'] = self.f_w_data['ds'].apply(lambda x: datetime.datetime.strptime(str(x)[0:19], '%Y-%m-%d %H:%M:%S'))\n return self.f_w_data\n\n def fit_predict_model(self, save_model=True):\n self.f_w_data = self.convert_date_feature_column_for_prophet()\n self.model = Prophet(daily_seasonality=False, yearly_seasonality=False, weekly_seasonality=False,\n seasonality_mode='multiplicative',\n interval_width=float(self._p['interval_width']),\n changepoint_range=float(self._p['changepoint_range']),\n n_changepoints=int(self._p['n_changepoints'])\n ).fit(self.f_w_data[['ds', 'y']])\n if save_model:\n model_from_to_pkl(directory=conf('model_main_path'),\n path=model_path(self.comb, self.groups, 'prophet'),\n model=self.model, is_writing=True)\n\n def detect_anomalies(self):\n self.model = model_from_to_pkl(directory=conf('model_main_path'),\n path=model_path(self.comb, self.groups, 'prophet'))\n try:\n\n self.prediction = self.model.predict(self.convert_date_feature_column_for_prophet())\n self.f_w_data = pd.merge(self.f_w_data,\n self.prediction.rename(columns={'ds': self.date}),\n on=self.date,\n how='left')\n self.f_w_data = self.f_w_data[self.f_w_data[self.date] >= self.split_date]\n self.f_w_data[['ad_label_3', 'anomaly_score_3']] = self.f_w_data.apply(lambda row:\n get_anomaly(row[self.feature],\n row['yhat_upper'],\n row['yhat_lower']), axis=1)\n self.anomaly += self.f_w_data[['ad_label_3', self.date, 'anomaly_score_3'] + self.groups].to_dict(\"results\")\n print(self.f_w_data[['ad_label_3', self.date, 'anomaly_score_3'] + self.groups])\n except Exception as e:\n print(e)\n\n def train_execute(self):\n if not hyper_conf('prophet_has_param_tuning_first_run'):\n self.parameter_tuning()\n for self.comb in self.levels:\n print(\"*\" * 4, \"PROPHET - \", self.get_query().replace(\" and \", \"; \").replace(\" == \", \" - \"), \"*\" * 4)\n self.f_w_data = self.data.query(self.get_query()).sort_values(by=self.date)\n print(\"data size :\", len(self.f_w_data))\n self.convert_date_feature_column_for_prophet()\n self.get_related_params()\n self.fit_predict_model()\n self.logger.counter()\n if not check_request_stoped(self.job):\n break\n\n def prediction_execute(self):\n for self.comb in self.levels:\n print(\"*\" * 4, \"PROPHET - \", self.get_query().replace(\" and \", \"; \").replace(\" == \", \" - \"), \"*\" * 4)\n if check_model_exists(model_path(self.comb, self.groups, 'prophet'), conf('model_main_path')):\n self.f_w_data = self.data.query(self.get_query()).sort_values(by=self.date)\n print(\"prediction size :\", len(self.f_w_data))\n self.detect_anomalies()\n self.logger.counter()\n if not check_request_stoped(self.job):\n break\n self.anomaly = DataFrame(self.anomaly)\n\n def process_execute(self, pr, count):\n self.get_related_params()\n self._p = get_params(self._p, pr)\n print(\"hyper parameters : \", self._p)\n self.convert_date_feature_column_for_prophet()\n self.fit_predict_model(save_model=False)\n self.prediction = self.model.predict(self.convert_date_feature_column_for_prophet())\n error[count] = mean_absolute_percentage_error(self.f_w_data['y'], abs(self.prediction['yhat']))\n\n def parameter_tuning_threading(self, has_comb=True):\n global error\n error = {}\n _optimized_parameters = None\n err = 100000000\n self.f_w_data = self.data.query(self.get_query()).sort_values(by=self.date) if has_comb else self.f_w_data\n self.f_w_data = self.f_w_data[-int(0.1 * len(self.f_w_data)):]\n for iter in range(int(len(self.levels_tuning) / cpu_count())):\n _levels = self.levels_tuning[(iter * cpu_count()):((iter + 1) * cpu_count())]\n for i in range(len(_levels)):\n self.logger.counter()\n process = threading.Thread(target=self.process_execute, daemon=True, args=(_levels[i], i, ))\n process.start()\n process.join()\n for i in error:\n if i in list(error.keys()):\n if error[i] < err:\n err = error[i]\n _optimized_parameters = get_params(self.params, _levels[i])\n return _optimized_parameters\n\n def get_param_key(self):\n return \"_\".join([str(i[0]) + \"*\" + str(i[1]) for i in zip(self.groups, self.comb)])\n\n def parameter_tuning(self):\n if len(self.levels) == 0:\n self.optimized_parameters = self.parameter_tuning_threading(has_comb=False)\n else:\n for self.comb in self.levels:\n self.optimized_parameters[self.get_param_key()] = self.parameter_tuning_threading()\n if not check_request_stoped(self.job):\n break\n print(\"updating model parameters\")\n pt_config = read_yaml(conf('docs_main_path'), 'parameter_tunning.yaml')\n pt_config['has_param_tuning_first_run']['prophet'] = True\n _key = 'hyper_parameters' if len(self.levels) == 0 else 'combination_params'\n pt_config[_key]['prophet'] = self.optimized_parameters\n write_yaml(conf('docs_main_path'), \"parameter_tunning.yaml\", pt_config, ignoring_aliases=True)\n self.params = hyper_conf('prophet')\n self.combination_params = hyper_conf('prophet_cp')\n\n\n\n\n\n\n\n\n" ]
[ [ "numpy.abs", "numpy.array", "pandas.Series", "pandas.DataFrame" ] ]
muhyun/pytorch-lightning
[ "5fda9da7b415c1812423f27d0d8d89a0b829015c" ]
[ "pytorch_lightning/loggers/tensorboard.py" ]
[ "\"\"\"\nTensorBoard\n-----------\n\"\"\"\n\nimport os\nfrom argparse import Namespace\nfrom typing import Optional, Dict, Union, Any\nfrom warnings import warn\n\nimport torch\nfrom pkg_resources import parse_version\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom pytorch_lightning import _logger as log\nfrom pytorch_lightning.core.saving import save_hparams_to_yaml\nfrom pytorch_lightning.loggers.base import LightningLoggerBase\nfrom pytorch_lightning.utilities import rank_zero_only\n\ntry:\n from omegaconf import Container\nexcept ImportError:\n Container = None\n\n\nclass TensorBoardLogger(LightningLoggerBase):\n r\"\"\"\n Log to local file system in `TensorBoard <https://www.tensorflow.org/tensorboard>`_ format.\n Implemented using :class:`~torch.utils.tensorboard.SummaryWriter`. Logs are saved to\n ``os.path.join(save_dir, name, version)``. This is the default logger in Lightning, it comes\n preinstalled.\n\n Example:\n >>> from pytorch_lightning import Trainer\n >>> from pytorch_lightning.loggers import TensorBoardLogger\n >>> logger = TensorBoardLogger(\"tb_logs\", name=\"my_model\")\n >>> trainer = Trainer(logger=logger)\n\n Args:\n save_dir: Save directory\n name: Experiment name. Defaults to ``'default'``. If it is the empty string then no per-experiment\n subdirectory is used.\n version: Experiment version. If version is not specified the logger inspects the save\n directory for existing versions, then automatically assigns the next available version.\n If it is a string then it is used as the run-specific subdirectory name,\n otherwise ``'version_${version}'`` is used.\n \\**kwargs: Other arguments are passed directly to the :class:`SummaryWriter` constructor.\n\n \"\"\"\n NAME_HPARAMS_FILE = 'hparams.yaml'\n\n def __init__(self,\n save_dir: str,\n name: Optional[str] = \"default\",\n version: Optional[Union[int, str]] = None,\n **kwargs):\n super().__init__()\n self.save_dir = save_dir\n self._name = name\n self._version = version\n\n self._experiment = None\n self.hparams = {}\n self._kwargs = kwargs\n\n @property\n def root_dir(self) -> str:\n \"\"\"\n Parent directory for all tensorboard checkpoint subdirectories.\n If the experiment name parameter is ``None`` or the empty string, no experiment subdirectory is used\n and the checkpoint will be saved in \"save_dir/version_dir\"\n \"\"\"\n if self.name is None or len(self.name) == 0:\n return self.save_dir\n else:\n return os.path.join(self.save_dir, self.name)\n\n @property\n def log_dir(self) -> str:\n \"\"\"\n The directory for this run's tensorboard checkpoint. By default, it is named\n ``'version_${self.version}'`` but it can be overridden by passing a string value\n for the constructor's version parameter instead of ``None`` or an int.\n \"\"\"\n # create a pseudo standard path ala test-tube\n version = self.version if isinstance(self.version, str) else f\"version_{self.version}\"\n log_dir = os.path.join(self.root_dir, version)\n return log_dir\n\n @property\n def experiment(self) -> SummaryWriter:\n r\"\"\"\n Actual tensorboard object. To use TensorBoard features in your\n :class:`~pytorch_lightning.core.lightning.LightningModule` do the following.\n\n Example::\n\n self.logger.experiment.some_tensorboard_function()\n\n \"\"\"\n if self._experiment is not None:\n return self._experiment\n\n os.makedirs(self.root_dir, exist_ok=True)\n self._experiment = SummaryWriter(log_dir=self.log_dir, **self._kwargs)\n return self._experiment\n\n @rank_zero_only\n def log_hyperparams(self, params: Union[Dict[str, Any], Namespace],\n metrics: Optional[Dict[str, Any]] = None) -> None:\n params = self._convert_params(params)\n\n # store params to output\n self.hparams.update(params)\n\n # format params into the suitable for tensorboard\n params = self._flatten_dict(params)\n params = self._sanitize_params(params)\n\n if parse_version(torch.__version__) < parse_version(\"1.3.0\"):\n warn(\n f\"Hyperparameter logging is not available for Torch version {torch.__version__}.\"\n \" Skipping log_hyperparams. Upgrade to Torch 1.3.0 or above to enable\"\n \" hyperparameter logging.\"\n )\n else:\n from torch.utils.tensorboard.summary import hparams\n\n if metrics is None:\n metrics = {}\n exp, ssi, sei = hparams(params, metrics)\n writer = self.experiment._get_file_writer()\n writer.add_summary(exp)\n writer.add_summary(ssi)\n writer.add_summary(sei)\n\n if metrics:\n # necessary for hparam comparison with metrics\n self.log_metrics(metrics)\n\n @rank_zero_only\n def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None) -> None:\n for k, v in metrics.items():\n if isinstance(v, torch.Tensor):\n v = v.item()\n self.experiment.add_scalar(k, v, step)\n\n @rank_zero_only\n def save(self) -> None:\n super().save()\n dir_path = self.log_dir\n if not os.path.isdir(dir_path):\n dir_path = self.save_dir\n\n # prepare the file path\n hparams_file = os.path.join(dir_path, self.NAME_HPARAMS_FILE)\n\n # save the metatags file\n if Container is not None:\n if isinstance(self.hparams, Container):\n from omegaconf import OmegaConf\n OmegaConf.save(self.hparams, hparams_file, resolve=True)\n else:\n save_hparams_to_yaml(hparams_file, self.hparams)\n else:\n save_hparams_to_yaml(hparams_file, self.hparams)\n\n @rank_zero_only\n def finalize(self, status: str) -> None:\n self.save()\n\n @property\n def name(self) -> str:\n return self._name\n\n @property\n def version(self) -> int:\n if self._version is None:\n self._version = self._get_next_version()\n return self._version\n\n def _get_next_version(self):\n root_dir = os.path.join(self.save_dir, self.name)\n\n if not os.path.isdir(root_dir):\n log.warning('Missing logger folder: %s', root_dir)\n return 0\n\n existing_versions = []\n for d in os.listdir(root_dir):\n if os.path.isdir(os.path.join(root_dir, d)) and d.startswith(\"version_\"):\n existing_versions.append(int(d.split(\"_\")[1]))\n\n if len(existing_versions) == 0:\n return 0\n\n return max(existing_versions) + 1\n" ]
[ [ "torch.utils.tensorboard.summary.hparams", "torch.utils.tensorboard.SummaryWriter" ] ]
AYCHALTZ/AlMusicomposer
[ "cc229aae875ca5f30ddfe5699482ab8572c81e9e" ]
[ "util.py" ]
[ "import os\nimport math\nimport cPickle\nfrom collections import defaultdict\nfrom random import shuffle\n\nimport numpy as np\nimport tensorflow as tf \n\nimport midi_util\nimport nottingham_util\n\ndef parse_midi_directory(input_dir, time_step):\n \"\"\" \n input_dir: data directory full of midi files\n time_step: the number of ticks to use as a time step for discretization\n\n Returns a list of [T x D] matrices, where T is the amount of time steps\n and D is the range of notes.\n \"\"\"\n files = [ os.path.join(input_dir, f) for f in os.listdir(input_dir)\n if os.path.isfile(os.path.join(input_dir, f)) ] \n sequences = [ \\\n (f, midi_util.parse_midi_to_sequence(f, time_step=time_step)) \\\n for f in files ]\n\n return sequences\n\ndef batch_data(sequences, time_batch_len=128, max_time_batches=10,\n softmax=False, verbose=False):\n \"\"\"\n sequences: a list of [T x D] matrices, each matrix representing a sequencey\n time_batch_len: the unrolling length that will be used by BPTT. \n max_time_batches: the max amount of time batches to consider. Any sequences \n longert than max_time_batches * time_batch_len will be ignored\n Can be set to -1 to all time batches needed.\n softmax: Flag should be set to true if using the dual-softmax formualtion\n\n returns [\n [ [ data ], [ target ] ], # batch with one time step\n [ [ data1, data2 ], [ target1, target2 ] ], # batch with two time steps\n ...\n ]\n \"\"\"\n\n assert time_batch_len > 0\n\n dims = sequences[0].shape[1]\n sequence_lens = [s.shape[0] for s in sequences]\n\n if verbose:\n avg_seq_len = sum(sequence_lens) / len(sequences)\n print(\"Average Sequence Length: {}\".format(avg_seq_len))\n print(\"Max Sequence Length: {}\".format(time_batch_len))\n print(\"Number of sequences: {}\".format(len(sequences)))\n\n batches = defaultdict(list)\n for sequence in sequences:\n # -1 because we can't predict the first step\n num_time_steps = ((sequence.shape[0]-1) // time_batch_len) \n if num_time_steps < 1:\n continue\n if max_time_batches > 0 and num_time_steps > max_time_batches:\n continue\n batches[num_time_steps].append(sequence)\n\n if verbose:\n print(\"Batch distribution:\")\n print([(k, len(v)) for (k, v) in batches.iteritems()])\n\n def arrange_batch(sequences, num_time_steps):\n sequences = [s[:(num_time_steps*time_batch_len)+1, :] for s in sequences]\n stacked = np.dstack(sequences)\n # swap axes so that shape is (SEQ_LENGTH X BATCH_SIZE X INPUT_DIM)\n data = np.swapaxes(stacked, 1, 2)\n targets = np.roll(data, -1, axis=0)\n # cutoff final time step\n data = data[:-1, :, :]\n targets = targets[:-1, :, :]\n assert data.shape == targets.shape\n\n if softmax:\n r = nottingham_util.NOTTINGHAM_MELODY_RANGE\n labels = np.ones((targets.shape[0], targets.shape[1], 2), dtype=np.int32)\n assert np.all(np.sum(targets[:, :, :r], axis=2) == 1)\n assert np.all(np.sum(targets[:, :, r:], axis=2) == 1)\n labels[:, :, 0] = np.argmax(targets[:, :, :r], axis=2)\n labels[:, :, 1] = np.argmax(targets[:, :, r:], axis=2)\n targets = labels\n assert targets.shape[:2] == data.shape[:2]\n\n assert data.shape[0] == num_time_steps * time_batch_len\n\n # split them up into time batches\n tb_data = np.split(data, num_time_steps, axis=0)\n tb_targets = np.split(targets, num_time_steps, axis=0)\n\n assert len(tb_data) == len(tb_targets) == num_time_steps\n for i in range(len(tb_data)):\n assert tb_data[i].shape[0] == time_batch_len\n assert tb_targets[i].shape[0] == time_batch_len\n if softmax:\n assert np.all(np.sum(tb_data[i], axis=2) == 2)\n\n return (tb_data, tb_targets)\n\n return [ arrange_batch(b, n) for n, b in batches.iteritems() ]\n \ndef load_data(data_dir, time_step, time_batch_len, max_time_batches, nottingham=None):\n \"\"\"\n nottingham: The sequences object as created in prepare_nottingham_pickle\n (see nottingham_util for more). If None, parse all the MIDI\n files from data_dir\n time_step: the time_step used to parse midi files (only used if data_dir\n is provided)\n time_batch_len and max_time_batches: see batch_data()\n\n returns { \n \"train\": {\n \"data\": [ batch_data() ],\n \"metadata: { ... }\n },\n \"valid\": { ... }\n \"test\": { ... }\n }\n \"\"\"\n\n data = {}\n for dataset in ['train', 'test', 'valid']:\n\n # For testing, use ALL the sequences\n if dataset == 'test':\n max_time_batches = -1\n\n # Softmax formualation preparsed into sequences\n if nottingham:\n sequences = nottingham[dataset]\n metadata = nottingham[dataset + '_metadata']\n # Cross-entropy formulation needs to be parsed\n else:\n sf = parse_midi_directory(os.path.join(data_dir, dataset), time_step)\n sequences = [s[1] for s in sf]\n files = [s[0] for s in sf]\n metadata = [{\n 'path': f,\n 'name': f.split(\"/\")[-1].split(\".\")[0]\n } for f in files]\n\n dataset_data = batch_data(sequences, time_batch_len, max_time_batches, softmax = True if nottingham else False)\n\n data[dataset] = {\n \"data\": dataset_data,\n \"metadata\": metadata,\n }\n\n data[\"input_dim\"] = dataset_data[0][0][0].shape[2]\n\n return data\n\n\ndef run_epoch(session, model, batches, training=False, testing=False):\n \"\"\"\n session: Tensorflow session object\n model: model object (see model.py)\n batches: data object loaded from util_data()\n\n training: A backpropagation iteration will be performed on the dataset\n if this flag is active\n\n returns average loss per time step over all batches.\n if testing flag is active: returns [ loss, probs ] where is the probability\n values for each note\n \"\"\"\n\n # shuffle batches\n shuffle(batches)\n\n target_tensors = [model.loss, model.final_state]\n if testing:\n target_tensors.append(model.probs)\n batch_probs = defaultdict(list)\n if training:\n target_tensors.append(model.train_step)\n\n losses = []\n for data, targets in batches:\n # save state over unrolling time steps\n batch_size = data[0].shape[1]\n num_time_steps = len(data)\n state = model.get_cell_zero_state(session, batch_size) \n probs = list()\n\n for tb_data, tb_targets in zip(data, targets):\n if testing:\n tbd = tb_data\n tbt = tb_targets\n else:\n # shuffle all the batches of input, state, and target\n batches = tb_data.shape[1]\n permutations = np.random.permutation(batches)\n tbd = np.zeros_like(tb_data)\n tbd[:, np.arange(batches), :] = tb_data[:, permutations, :]\n tbt = np.zeros_like(tb_targets)\n tbt[:, np.arange(batches), :] = tb_targets[:, permutations, :]\n state[np.arange(batches)] = state[permutations]\n\n feed_dict = {\n model.initial_state: state,\n model.seq_input: tbd,\n model.seq_targets: tbt,\n }\n results = session.run(target_tensors, feed_dict=feed_dict)\n\n losses.append(results[0])\n state = results[1]\n if testing:\n batch_probs[num_time_steps].append(results[2])\n\n loss = sum(losses) / len(losses)\n\n if testing:\n return [loss, batch_probs]\n else:\n return loss\n\ndef accuracy(batch_probs, data, num_samples=20):\n \"\"\"\n batch_probs: probs object returned from run_epoch\n data: data object passed into run_epoch\n num_samples: the number of times to sample each note (an average over all\n these samples will be used)\n\n returns the accuracy metric according to\n http://ismir2009.ismir.net/proceedings/PS2-21.pdf\n \"\"\"\n\n false_positives, false_negatives, true_positives = 0, 0, 0 \n for _, batch_targets in data:\n num_time_steps = len(batch_data)\n for ts_targets, ts_probs in zip(batch_targets, batch_probs[num_time_steps]):\n\n assert ts_targets.shape == ts_targets.shape\n\n for seq_idx in range(ts_targets.shape[1]):\n for step_idx in range(ts_targets.shape[0]):\n for note_idx, prob in enumerate(ts_probs[step_idx, seq_idx, :]):\n num_occurrences = np.random.binomial(num_samples, prob)\n if ts_targets[step_idx, seq_idx, note_idx] == 0.0:\n false_positives += num_occurrences\n else:\n false_negatives += (num_samples - num_occurrences)\n true_positives += num_occurrences\n \n accuracy = (float(true_positives) / float(true_positives + false_positives + false_negatives)) \n\n print(\"Precision: {}\".format(float(true_positives) / (float(true_positives + false_positives))))\n print(\"Recall: {}\".format(float(true_positives) / (float(true_positives + false_negatives))))\n print(\"Accuracy: {}\".format(accuracy))\n" ]
[ [ "numpy.swapaxes", "numpy.split", "numpy.sum", "numpy.arange", "numpy.dstack", "numpy.ones", "numpy.argmax", "numpy.random.permutation", "numpy.zeros_like", "numpy.random.binomial", "numpy.roll" ] ]
eduardo98m/GiaDog
[ "ecafdaaddd1b3ac5cf09cdfc97fba8087385497c" ]
[ "giadog/src/GymEnvs/TestingFunctions/test_position_orientation.py" ]
[ "\"\"\"\n Authors: Amin Arriaga, Eduardo Lopez\n Project: Graduation Thesis: GIAdog\n\n Function for testing the robot position and orientation.\n\"\"\"\nimport pyBulletPainter as pbp\nimport numpy as np\n\ndef test_position_orientation(robot, first_exec: bool=False):\n \"\"\"\n Test the position and orientation of the robot by creating an arrow \n that starts from the robot and points where the robot is facing, and \n stays that way even while the robot is moving.\n\n Arguments:\n ----------\n first_exec: bool\n if True, the parameters are initialized.\n \"\"\"\n p = robot.client\n r_o = robot.position\n\n\n robot.update_position_orientation()\n # Orientation\n _, pitch, yaw = robot.orientation\n x = np.cos(yaw) * np.cos(pitch)\n y = np.sin(yaw) * np.cos(pitch)\n z = -np.sin(pitch)\n r_f = r_o + np.array([x, y, z])\n\n if first_exec: robot.vector_id = pbp.create_vector(p, r_o, r_f)\n else: pbp.update_vector(p, robot.vector_id, r_o, r_f)" ]
[ [ "numpy.array", "numpy.cos", "numpy.sin" ] ]
JWThacker/Udacity_Intro_Machine_Learning_Tensorflow_Nanodegree
[ "1d2b0db9d92d5491173e1d1052acc005721c0ad1" ]
[ "project_2/workspace/predict.py" ]
[ "import warnings\nwarnings.filterwarnings('ignore')\nimport json\nimport sys\nimport argparse as ap\n\nimport numpy as np\n\nimport tensorflow as tf\ntf.get_logger().setLevel('WARNING')\ntf.autograph.set_verbosity(2)\nimport tensorflow_hub as hub\nimport logging\n\nfrom utils import process_image, predict\n'''\n Predict the flower species given an image of a flower.\n\n params:\n /path/to/image - a path to an image to make a prediction from.\n saved_model - a Keras model saved as a .h5\n --top_k - the top number of classes that image could be.\n --category_names - path to a .json labeling classes to species names.\n'''\ndef main():\n\n # Add and then parse all command line arguments.\n parser = ap.ArgumentParser(usage=('python3 predict.py /path/to/image saved_model '\n '--top_k K --category_names map.json'),\n description=('Predict the species'\n ' of a flower image.'))\n\n parser.add_argument('image_path', type=str, help='Path to an image of a flower')\n\n parser.add_argument('saved_model', type=str, help='A tf.Keras model saved as an .h5')\n\n parser.add_argument('--top_k', type=int, default=1, help=('Number of different'\n ' species probabilities'\n ' will be displayed for'))\n\n parser.add_argument('--category_names', type=str,\n default=None,\n help=('path to a .json file'\n 'containing the mapped'\n 'names of the predicted'\n 'species of flowers'))\n args = parser.parse_args()\n\n # Load saved Keras model\n reloaded_model = tf.keras.models.load_model(args.saved_model, custom_objects={'KerasLayer': hub.KerasLayer})\n\n # predict the species with the corresponding probabilities\n try:\n probs, classes = predict(args.image_path, reloaded_model, args.top_k)\n except FileNotFoundError:\n print('\\n\\n')\n print('Image not found; enter a valid path to an image')\n print('\\n\\n')\n sys.exit()\n else:\n # If --category_names was not empty, map class labels to species names\n if args.category_names:\n species_names = []\n try:\n with open(args.category_names, 'r') as f:\n class_names = json.load(f)\n except FileNotFoundError:\n print('\\n\\n')\n print(f'{args.category_names} not found; enter valid path.')\n print('\\n\\n')\n sys.exit()\n else:\n for i, classs in enumerate(classes):\n species_names.append(class_names[classs])\n results = {name: prob for name, prob in zip(species_names, probs)}\n print('\\n\\n')\n print('Flower Species Name: Probability of species')\n for name in species_names:\n print(name.title(), ': ', results[name])\n print('\\n\\n')\n # Otherwise print the class labels and corresponding probabilities\n else:\n print('\\n\\n')\n results = {classs: prob for classs, prob in zip(classes, probs)}\n print('Class Label: Probability of class')\n for classs in classes:\n print(classs, ': ', results[classs])\n print('\\n\\n')\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "tensorflow.autograph.set_verbosity", "tensorflow.keras.models.load_model", "tensorflow.get_logger" ] ]
zhujun98/karabo_data
[ "68ee19d52cd7f140052d029545a7b6169ec9752a" ]
[ "karabo_data/tests/test_utils.py" ]
[ "import numpy as np\nimport os\nimport pytest\nimport re\nimport tempfile\nfrom testpath import assert_isfile\n\nfrom karabo_data import utils\nfrom karabo_data.utils import QuickView\n\n\ndef test_cbf_conversion(mock_agipd_data, capsys):\n with tempfile.TemporaryDirectory() as td:\n out_file = os.path.join(td, 'out.cbf')\n utils.hdf5_to_cbf(mock_agipd_data, out_file, index=0)\n assert_isfile(out_file)\n\n captured = capsys.readouterr()\n assert re.match(\"Convert .* to .*/out.cbf\", captured.out)\n\n\ndef test_init_quick_view():\n qv = QuickView()\n\n assert qv.data is None\n qv.data = np.empty((1, 1, 1), dtype=np.int8)\n assert len(qv) == 1\n assert qv.pos == 0\n\n with pytest.raises(TypeError) as info:\n qv.data = 4\n\n with pytest.raises(TypeError) as info:\n qv.data = np.empty((1, 1, 1, 1), dtype=np.int8)\n\n\nif __name__ == \"__main__\":\n pytest.main([\"-v\"])\n print(\"Run 'py.test -v -s' to see more output\")\n" ]
[ [ "numpy.empty" ] ]
ronaldokun/isic2019
[ "26d436f7ecd9efbce8834dd01aae02c2a8ad85f6" ]
[ "pt/eval.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler\nimport torchvision\nfrom torchvision import datasets, models as tv_models\nfrom torch.utils.data import DataLoader\nfrom torchsummary import summary\nimport numpy as np\nimport models\nimport threading\nimport pickle\nfrom pathlib import Path\nimport math\nimport os\nimport sys\nfrom glob import glob\nimport re\nimport gc\nimport importlib\nimport time\nimport csv\nimport sklearn.preprocessing\nimport utils\nfrom sklearn.utils import class_weight\nimport imagesize\n\n# add configuration file\n# Dictionary for model configuration\nmdlParams = {}\n\n# Import machine config\npc_cfg = importlib.import_module('pc_cfgs.'+sys.argv[1])\nmdlParams.update(pc_cfg.mdlParams)\n\n\n# If there is another argument, its which checkpoint should be used\nif len(sys.argv) > 6:\n if 'last' in sys.argv[6]:\n mdlParams['ckpt_name'] = 'checkpoint-'\n else:\n mdlParams['ckpt_name'] = 'checkpoint_best-'\n if 'first' in sys.argv[6]:\n mdlParams['use_first'] = True\nelse:\n mdlParams['ckpt_name'] = 'checkpoint-'\n\n# Set visible devices\nmdlParams['numGPUs']= [[int(s) for s in re.findall(r'\\d+',sys.argv[6])][-1]]\ncuda_str = \"\"\nfor i in range(len(mdlParams['numGPUs'])):\n cuda_str = cuda_str + str(mdlParams['numGPUs'][i])\n if i is not len(mdlParams['numGPUs'])-1:\n cuda_str = cuda_str + \",\"\nprint(\"Devices to use:\",cuda_str)\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = cuda_str \n\n# If there is another argument, also use a meta learner\nif len(sys.argv) > 7:\n if 'HAMONLY' in sys.argv[7]:\n mdlParams['eval_on_ham_only'] = True \n\n# Import model config\nmodel_cfg = importlib.import_module('cfgs.'+sys.argv[2])\nmdlParams_model = model_cfg.init(mdlParams)\nmdlParams.update(mdlParams_model)\n\n\n# Path name where model is saved is the fourth argument\nif 'NONE' in sys.argv[5]:\n mdlParams['saveDirBase'] = mdlParams['saveDir'] + sys.argv[2]\nelse:\n mdlParams['saveDirBase'] = sys.argv[5]\n\n# Third is multi crop yes no\nif 'multi' in sys.argv[3]:\n if 'rand' in sys.argv[3]:\n mdlParams['numRandValSeq'] = [int(s) for s in re.findall(r'\\d+',sys.argv[3])][0]\n print(\"Random sequence number\",mdlParams['numRandValSeq'])\n else:\n mdlParams['numRandValSeq'] = 0\n mdlParams['multiCropEval'] = [int(s) for s in re.findall(r'\\d+',sys.argv[3])][-1]\n mdlParams['voting_scheme'] = sys.argv[4]\n if 'scale' in sys.argv[3]:\n print(\"Multi Crop and Scale Eval with crop number:\",mdlParams['multiCropEval'],\" Voting scheme: \",mdlParams['voting_scheme'])\n mdlParams['orderedCrop'] = False\n mdlParams['scale_min'] = [int(s) for s in re.findall(r'\\d+',sys.argv[3])][-2]/100.0\n elif 'determ' in sys.argv[3]:\n # Example application: multideterm5sc3f2\n mdlParams['deterministic_eval'] = True\n mdlParams['numCropPositions'] = [int(s) for s in re.findall(r'\\d+',sys.argv[3])][-3]\n num_scales = [int(s) for s in re.findall(r'\\d+',sys.argv[3])][-2]\n all_scales = [1.0,0.5,0.75,0.25,0.9,0.6,0.4]\n mdlParams['cropScales'] = all_scales[:num_scales]\n mdlParams['cropFlipping'] = [int(s) for s in re.findall(r'\\d+',sys.argv[3])][-1]\n print(\"deterministic eval with crops number\",mdlParams['numCropPositions'],\"scales\",mdlParams['cropScales'],\"flipping\",mdlParams['cropFlipping'])\n mdlParams['multiCropEval'] = mdlParams['numCropPositions']*len(mdlParams['cropScales'])*mdlParams['cropFlipping']\n mdlParams['offset_crop'] = 0.2\n elif 'order' in sys.argv[3]:\n mdlParams['orderedCrop'] = True\n if mdlParams.get('var_im_size',False):\n # Crop positions, always choose multiCropEval to be 4, 9, 16, 25, etc.\n mdlParams['cropPositions'] = np.zeros([len(mdlParams['im_paths']),mdlParams['multiCropEval'],2],dtype=np.int64)\n #mdlParams['imSizes'] = np.zeros([len(mdlParams['im_paths']),mdlParams['multiCropEval'],2],dtype=np.int64)\n for u in range(len(mdlParams['im_paths'])):\n height, width = imagesize.get(mdlParams['im_paths'][u])\n if width < mdlParams['input_size'][0]:\n height = int(mdlParams['input_size'][0]/float(width))*height\n width = mdlParams['input_size'][0]\n if height < mdlParams['input_size'][0]:\n width = int(mdlParams['input_size'][0]/float(height))*width\n height = mdlParams['input_size'][0] \n if mdlParams.get('resize_large_ones') is not None:\n if width == mdlParams['large_size'] and height == mdlParams['large_size']:\n width, height = (mdlParams['resize_large_ones'],mdlParams['resize_large_ones']) \n ind = 0\n for i in range(np.int32(np.sqrt(mdlParams['multiCropEval']))):\n for j in range(np.int32(np.sqrt(mdlParams['multiCropEval']))):\n mdlParams['cropPositions'][u,ind,0] = mdlParams['input_size'][0]/2+i*((width-mdlParams['input_size'][1])/(np.sqrt(mdlParams['multiCropEval'])-1))\n mdlParams['cropPositions'][u,ind,1] = mdlParams['input_size'][1]/2+j*((height-mdlParams['input_size'][0])/(np.sqrt(mdlParams['multiCropEval'])-1))\n #mdlParams['imSizes'][u,ind,0] = curr_im_size[0]\n\n ind += 1\n # Sanity checks\n #print(\"Positions\",mdlParams['cropPositions'])\n # Test image sizes\n height = mdlParams['input_size'][0]\n width = mdlParams['input_size'][1]\n for u in range(len(mdlParams['im_paths'])): \n height_test, width_test = imagesize.get(mdlParams['im_paths'][u])\n if width_test < mdlParams['input_size'][0]:\n height_test = int(mdlParams['input_size'][0]/float(width_test))*height_test\n width_test = mdlParams['input_size'][0]\n if height_test < mdlParams['input_size'][0]:\n width_test = int(mdlParams['input_size'][0]/float(height_test))*width_test\n height_test = mdlParams['input_size'][0] \n if mdlParams.get('resize_large_ones') is not None:\n if width_test == mdlParams['large_size'] and height_test == mdlParams['large_size']:\n width_test, height_test = (mdlParams['resize_large_ones'],mdlParams['resize_large_ones']) \n test_im = np.zeros([width_test,height_test]) \n for i in range(mdlParams['multiCropEval']):\n im_crop = test_im[np.int32(mdlParams['cropPositions'][u,i,0]-height/2):np.int32(mdlParams['cropPositions'][u,i,0]-height/2)+height,np.int32(mdlParams['cropPositions'][u,i,1]-width/2):np.int32(mdlParams['cropPositions'][u,i,1]-width/2)+width]\n if im_crop.shape[0] != mdlParams['input_size'][0]:\n print(\"Wrong shape\",im_crop.shape[0],mdlParams['im_paths'][u]) \n if im_crop.shape[1] != mdlParams['input_size'][1]:\n print(\"Wrong shape\",im_crop.shape[1],mdlParams['im_paths'][u]) \n else:\n # Crop positions, always choose multiCropEval to be 4, 9, 16, 25, etc.\n mdlParams['cropPositions'] = np.zeros([mdlParams['multiCropEval'],2],dtype=np.int64)\n if mdlParams['multiCropEval'] == 5:\n numCrops = 4\n elif mdlParams['multiCropEval'] == 7:\n numCrops = 9\n mdlParams['cropPositions'] = np.zeros([9,2],dtype=np.int64)\n else:\n numCrops = mdlParams['multiCropEval']\n ind = 0\n for i in range(np.int32(np.sqrt(numCrops))):\n for j in range(np.int32(np.sqrt(numCrops))):\n mdlParams['cropPositions'][ind,0] = mdlParams['input_size'][0]/2+i*((mdlParams['input_size_load'][0]-mdlParams['input_size'][0])/(np.sqrt(numCrops)-1))\n mdlParams['cropPositions'][ind,1] = mdlParams['input_size'][1]/2+j*((mdlParams['input_size_load'][1]-mdlParams['input_size'][1])/(np.sqrt(numCrops)-1))\n ind += 1\n # Add center crop\n if mdlParams['multiCropEval'] == 5:\n mdlParams['cropPositions'][4,0] = mdlParams['input_size_load'][0]/2\n mdlParams['cropPositions'][4,1] = mdlParams['input_size_load'][1]/2 \n if mdlParams['multiCropEval'] == 7: \n mdlParams['cropPositions'] = np.delete(mdlParams['cropPositions'],[3,7],0) \n # Sanity checks\n print(\"Positions val\",mdlParams['cropPositions'])\n # Test image sizes\n test_im = np.zeros(mdlParams['input_size_load'])\n height = mdlParams['input_size'][0]\n width = mdlParams['input_size'][1]\n for i in range(mdlParams['multiCropEval']):\n im_crop = test_im[np.int32(mdlParams['cropPositions'][i,0]-height/2):np.int32(mdlParams['cropPositions'][i,0]-height/2)+height,np.int32(mdlParams['cropPositions'][i,1]-width/2):np.int32(mdlParams['cropPositions'][i,1]-width/2)+width,:]\n print(\"Shape\",i+1,im_crop.shape) \n print(\"Multi Crop with order with crop number:\",mdlParams['multiCropEval'],\" Voting scheme: \",mdlParams['voting_scheme'])\n if 'flip' in sys.argv[3]:\n # additional flipping, example: flip2multiorder16\n mdlParams['eval_flipping'] = [int(s) for s in re.findall(r'\\d+',sys.argv[3])][-2]\n print(\"Additional flipping\",mdlParams['eval_flipping'])\n else:\n print(\"Multi Crop Eval with crop number:\",mdlParams['multiCropEval'],\" Voting scheme: \",mdlParams['voting_scheme'])\n mdlParams['orderedCrop'] = False\nelse:\n mdlParams['multiCropEval'] = 0\n mdlParams['orderedCrop'] = False\n\n# Set training set to eval mode\nmdlParams['trainSetState'] = 'eval'\n\nif mdlParams['numClasses'] == 9 and mdlParams.get('no_c9_eval',False):\n num_classes = mdlParams['numClasses']-1 \nelse:\n num_classes = mdlParams['numClasses']\n# Save results in here\nallData = {}\nallData['f1Best'] = np.zeros([mdlParams['numCV']])\nallData['sensBest'] = np.zeros([mdlParams['numCV'],num_classes])\nallData['specBest'] = np.zeros([mdlParams['numCV'],num_classes])\nallData['accBest'] = np.zeros([mdlParams['numCV']])\nallData['waccBest'] = np.zeros([mdlParams['numCV'],num_classes])\nallData['aucBest'] = np.zeros([mdlParams['numCV'],num_classes])\nallData['convergeTime'] = {}\nallData['bestPred'] = {}\nallData['bestPredMC'] = {}\nallData['targets'] = {}\nallData['extPred'] = {}\nallData['f1Best_meta'] = np.zeros([mdlParams['numCV']])\nallData['sensBest_meta'] = np.zeros([mdlParams['numCV'],num_classes])\nallData['specBest_meta'] = np.zeros([mdlParams['numCV'],num_classes])\nallData['accBest_meta'] = np.zeros([mdlParams['numCV']])\nallData['waccBest_meta'] = np.zeros([mdlParams['numCV'],num_classes])\nallData['aucBest_meta'] = np.zeros([mdlParams['numCV'],num_classes])\n#allData['convergeTime'] = {}\nallData['bestPred_meta'] = {}\nallData['targets_meta'] = {}\n\nif not (len(sys.argv) > 8):\n for cv in range(mdlParams['numCV']):\n # Reset model graph \n importlib.reload(models)\n #importlib.reload(torchvision)\n # Collect model variables\n modelVars = {}\n modelVars['device'] = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n print(modelVars['device'])\n # Def current CV set\n mdlParams['trainInd'] = mdlParams['trainIndCV'][cv]\n if 'valIndCV' in mdlParams:\n mdlParams['valInd'] = mdlParams['valIndCV'][cv]\n # Def current path for saving stuff\n if 'valIndCV' in mdlParams:\n mdlParams['saveDir'] = mdlParams['saveDirBase'] + '/CVSet' + str(cv)\n else:\n mdlParams['saveDir'] = mdlParams['saveDirBase']\n\n # Potentially calculate setMean to subtract\n if mdlParams['subtract_set_mean'] == 1:\n mdlParams['setMean'] = np.mean(mdlParams['images_means'][mdlParams['trainInd'],:],(0))\n print(\"Set Mean\",mdlParams['setMean']) \n\n # Potentially only HAM eval\n if mdlParams.get('eval_on_ham_only',False):\n print(\"Old val inds\",len(mdlParams['valInd']))\n mdlParams['valInd'] = np.intersect1d(mdlParams['valInd'],mdlParams['HAM10000_inds'])\n print(\"New val inds, HAM only\",len(mdlParams['valInd']))\n\n # balance classes\n if mdlParams['balance_classes'] < 3 or mdlParams['balance_classes'] == 7 or mdlParams['balance_classes'] == 11:\n class_weights = class_weight.compute_class_weight('balanced',np.unique(np.argmax(mdlParams['labels_array'][mdlParams['trainInd'],:],1)),np.argmax(mdlParams['labels_array'][mdlParams['trainInd'],:],1)) \n print(\"Current class weights\",class_weights)\n class_weights = class_weights*mdlParams['extra_fac']\n print(\"Current class weights with extra\",class_weights) \n elif mdlParams['balance_classes'] == 3 or mdlParams['balance_classes'] == 4:\n # Split training set by classes\n not_one_hot = np.argmax(mdlParams['labels_array'],1)\n mdlParams['class_indices'] = []\n for i in range(mdlParams['numClasses']):\n mdlParams['class_indices'].append(np.where(not_one_hot==i)[0])\n # Kick out non-trainind indices\n mdlParams['class_indices'][i] = np.setdiff1d(mdlParams['class_indices'][i],mdlParams['valInd'])\n #print(\"Class\",i,mdlParams['class_indices'][i].shape,np.min(mdlParams['class_indices'][i]),np.max(mdlParams['class_indices'][i]),np.sum(mdlParams['labels_array'][np.int64(mdlParams['class_indices'][i]),:],0)) \n elif mdlParams['balance_classes'] == 5 or mdlParams['balance_classes'] == 6 or mdlParams['balance_classes'] == 13:\n # Other class balancing loss\n class_weights = 1.0/np.mean(mdlParams['labels_array'][mdlParams['trainInd'],:],axis=0)\n print(\"Current class weights\",class_weights) \n class_weights = class_weights*mdlParams['extra_fac']\n print(\"Current class weights with extra\",class_weights) \n elif mdlParams['balance_classes'] == 9:\n # Only use HAM indicies for calculation\n print(\"Balance 9\")\n indices_ham = mdlParams['trainInd'][mdlParams['trainInd'] < 25331]\n if mdlParams['numClasses'] == 9:\n class_weights_ = 1.0/np.mean(mdlParams['labels_array'][indices_ham,:8],axis=0)\n #print(\"class before\",class_weights_)\n class_weights = np.zeros([mdlParams['numClasses']])\n class_weights[:8] = class_weights_\n class_weights[-1] = np.max(class_weights_)\n else:\n class_weights = 1.0/np.mean(mdlParams['labels_array'][indices_ham,:],axis=0)\n print(\"Current class weights\",class_weights) \n if isinstance(mdlParams['extra_fac'], float):\n class_weights = np.power(class_weights,mdlParams['extra_fac'])\n else:\n class_weights = class_weights*mdlParams['extra_fac']\n print(\"Current class weights with extra\",class_weights) \n\n\n # Set up dataloaders\n # Meta scaler\n if mdlParams.get('meta_features',None) is not None and mdlParams['scale_features']:\n mdlParams['feature_scaler_meta'] = sklearn.preprocessing.StandardScaler().fit(mdlParams['meta_array'][mdlParams['trainInd'],:]) \n #print(\"scaler mean\",mdlParams['feature_scaler_meta'].mean_,\"var\",mdlParams['feature_scaler_meta'].var_) \n # For train\n dataset_train = utils.ISICDataset(mdlParams, 'trainInd')\n # For val\n dataset_val = utils.ISICDataset(mdlParams, 'valInd')\n if mdlParams['multiCropEval'] > 0:\n modelVars['dataloader_valInd'] = DataLoader(dataset_val, batch_size=mdlParams['multiCropEval'], shuffle=False, num_workers=8, pin_memory=True) \n else:\n modelVars['dataloader_valInd'] = DataLoader(dataset_val, batch_size=mdlParams['batchSize'], shuffle=False, num_workers=8, pin_memory=True) \n \n modelVars['dataloader_trainInd'] = DataLoader(dataset_train, batch_size=mdlParams['batchSize'], shuffle=True, num_workers=8, pin_memory=True)\n \n # For test\n if 'testInd' in mdlParams:\n dataset_test = utils.ISICDataset(mdlParams, 'testInd')\n if mdlParams['multiCropEval'] > 0:\n modelVars['dataloader_testInd'] = DataLoader(dataset_test, batch_size=mdlParams['multiCropEval'], shuffle=False, num_workers=8, pin_memory=True) \n else:\n modelVars['dataloader_testInd'] = DataLoader(dataset_test, batch_size=mdlParams['batchSize'], shuffle=False, num_workers=8, pin_memory=True) \n \n \n modelVars['model'] = models.getModel(mdlParams)()\n # Original input size\n #if 'Dense' not in mdlParams['model_type']:\n # print(\"Original input size\",modelVars['model'].input_size)\n #print(modelVars['model'])\n if 'Dense' in mdlParams['model_type']:\n if mdlParams['input_size'][0] != 224:\n modelVars['model'] = utils.modify_densenet_avg_pool(modelVars['model'])\n #print(modelVars['model'])\n num_ftrs = modelVars['model'].classifier.in_features\n modelVars['model'].classifier = nn.Linear(num_ftrs, mdlParams['numClasses'])\n #print(modelVars['model'])\n elif 'dpn' in mdlParams['model_type']:\n num_ftrs = modelVars['model'].classifier.in_channels\n modelVars['model'].classifier = nn.Conv2d(num_ftrs,mdlParams['numClasses'],[1,1])\n #modelVars['model'].add_module('real_classifier',nn.Linear(num_ftrs, mdlParams['numClasses']))\n #print(modelVars['model'])\n elif 'efficient' in mdlParams['model_type']:\n # Do nothing, output is prepared\n num_ftrs = modelVars['model']._fc.in_features\n modelVars['model']._fc = nn.Linear(num_ftrs, mdlParams['numClasses']) \n elif 'wsl' in mdlParams['model_type']:\n num_ftrs = modelVars['model'].fc.in_features\n modelVars['model'].fc = nn.Linear(num_ftrs, mdlParams['numClasses']) \n else:\n num_ftrs = modelVars['model'].last_linear.in_features\n modelVars['model'].last_linear = nn.Linear(num_ftrs, mdlParams['numClasses']) \n # modify model\n if mdlParams.get('meta_features',None) is not None:\n modelVars['model'] = models.modify_meta(mdlParams,modelVars['model']) \n modelVars['model'] = modelVars['model'].to(modelVars['device'])\n #summary(modelVars['model'], (mdlParams['input_size'][2], mdlParams['input_size'][0], mdlParams['input_size'][1]))\n # Loss, with class weighting\n # Loss, with class weighting\n if mdlParams['balance_classes'] == 3 or mdlParams['balance_classes'] == 0 or mdlParams['balance_classes'] == 12:\n modelVars['criterion'] = nn.CrossEntropyLoss()\n elif mdlParams['balance_classes'] == 8:\n modelVars['criterion'] = nn.CrossEntropyLoss(reduce=False)\n elif mdlParams['balance_classes'] == 6 or mdlParams['balance_classes'] == 7:\n modelVars['criterion'] = nn.CrossEntropyLoss(weight=torch.cuda.FloatTensor(class_weights.astype(np.float32)),reduce=False)\n elif mdlParams['balance_classes'] == 10:\n modelVars['criterion'] = utils.FocalLoss(mdlParams['numClasses'])\n elif mdlParams['balance_classes'] == 11:\n modelVars['criterion'] = utils.FocalLoss(mdlParams['numClasses'],alpha=torch.cuda.FloatTensor(class_weights.astype(np.float32)))\n else:\n modelVars['criterion'] = nn.CrossEntropyLoss(weight=torch.cuda.FloatTensor(class_weights.astype(np.float32)))\n\n # Observe that all parameters are being optimized\n modelVars['optimizer'] = optim.Adam(modelVars['model'].parameters(), lr=mdlParams['learning_rate'])\n\n # Decay LR by a factor of 0.1 every 7 epochs\n modelVars['scheduler'] = lr_scheduler.StepLR(modelVars['optimizer'], step_size=mdlParams['lowerLRAfter'], gamma=1/np.float32(mdlParams['LRstep']))\n\n # Define softmax\n modelVars['softmax'] = nn.Softmax(dim=1)\n\n # Manually find latest chekcpoint, tf.train.latest_checkpoint is doing weird shit\n files = glob(mdlParams['saveDir']+'/*')\n #print(mdlParams['saveDir'])\n #print(\"Files\",files)\n global_steps = np.zeros([len(files)])\n for i in range(len(files)):\n # Use meta files to find the highest index\n if 'checkpoint' not in files[i]:\n continue\n if mdlParams['ckpt_name'] not in files[i]:\n continue\n # Extract global step\n nums = [int(s) for s in re.findall(r'\\d+',files[i])]\n global_steps[i] = nums[-1]\n # Create path with maximum global step found, if first is not wanted\n global_steps = np.sort(global_steps)\n if mdlParams.get('use_first') is not None:\n chkPath = mdlParams['saveDir'] + '/' + mdlParams['ckpt_name'] + str(int(global_steps[-2])) + '.pt'\n else:\n chkPath = mdlParams['saveDir'] + '/' + mdlParams['ckpt_name'] + str(int(np.max(global_steps))) + '.pt'\n print(\"Restoring: \",chkPath)\n # Load\n state = torch.load(chkPath)\n # Initialize model and optimizer\n modelVars['model'].load_state_dict(state['state_dict'])\n #modelVars['optimizer'].load_state_dict(state['optimizer']) \n # Construct pkl filename: config name, last/best, saved epoch number\n pklFileName = sys.argv[2] + \"_\" + sys.argv[6] + \"_\" + str(int(np.max(global_steps))) + \".pkl\"\n modelVars['model'].eval()\n if mdlParams['classification']:\n print(\"CV Set \",cv+1)\n print(\"------------------------------------\")\n # Training err first, deactivated\n if 'trainInd' in mdlParams and False:\n loss, accuracy, sensitivity, specificity, conf_matrix, f1, auc, waccuracy, predictions, targets, _ = utils.getErrClassification_mgpu(mdlParams, 'trainInd', modelVars)\n print(\"Training Results:\")\n print(\"----------------------------------\")\n print(\"Loss\",np.mean(loss))\n print(\"F1 Score\",f1) \n print(\"Sensitivity\",sensitivity)\n print(\"Specificity\",specificity)\n print(\"Accuracy\",accuracy)\n print(\"Per Class Accuracy\",waccuracy)\n print(\"Weighted Accuracy\",waccuracy)\n print(\"AUC\",auc)\n print(\"Mean AUC\", np.mean(auc)) \n if 'valInd' in mdlParams and (len(sys.argv) <= 8):\n loss, accuracy, sensitivity, specificity, conf_matrix, f1, auc, waccuracy, predictions, targets, predictions_mc = utils.getErrClassification_mgpu(mdlParams, 'valInd', modelVars)\n print(\"Validation Results:\")\n print(\"----------------------------------\")\n print(\"Loss\",np.mean(loss))\n print(\"F1 Score\",f1) \n print(\"Sensitivity\",sensitivity)\n print(\"Specificity\",specificity)\n print(\"Accuracy\",accuracy)\n print(\"Per Class Accuracy\",waccuracy)\n print(\"Weighted Accuracy\",np.mean(waccuracy))\n print(\"AUC\",auc)\n print(\"Mean AUC\", np.mean(auc)) \n # Save results in dict\n if 'testInd' not in mdlParams:\n allData['f1Best'][cv] = f1\n allData['sensBest'][cv,:] = sensitivity\n allData['specBest'][cv,:] = specificity\n allData['accBest'][cv] = accuracy\n allData['waccBest'][cv,:] = waccuracy\n allData['aucBest'][cv,:] = auc \n allData['bestPred'][cv] = predictions\n allData['bestPredMC'][cv] = predictions_mc\n allData['targets'][cv] = targets \n print(\"Pred shape\",predictions.shape,\"Tar shape\",targets.shape)\n if 'testInd' in mdlParams: \n loss, accuracy, sensitivity, specificity, conf_matrix, f1, auc, waccuracy, predictions, targets, predictions_mc = utils.getErrClassification_mgpu(mdlParams, 'testInd', modelVars)\n print(\"Test Results Normal:\")\n print(\"----------------------------------\")\n print(\"Loss\",np.mean(loss))\n print(\"F1 Score\",f1) \n print(\"Sensitivity\",sensitivity)\n print(\"Specificity\",specificity)\n print(\"Accuracy\",accuracy)\n print(\"Per Class Accuracy\",waccuracy)\n print(\"Weighted Accuracy\",np.mean(waccuracy))\n print(\"AUC\",auc)\n print(\"Mean AUC\", np.mean(auc)) \n # Save results in dict\n allData['f1Best'][cv] = f1\n allData['sensBest'][cv,:] = sensitivity\n allData['specBest'][cv,:] = specificity\n allData['accBest'][cv] = accuracy\n allData['waccBest'][cv,:] = waccuracy\n allData['aucBest'][cv,:] = auc \n else:\n # TODO: Regression\n print(\"Not Implemented\") \n# If there is an 8th argument, make extra evaluation for external set\nif len(sys.argv) > 8:\n for cv in range(mdlParams['numCV']):\n # Reset model graph \n importlib.reload(models)\n #importlib.reload(torchvision)\n # Collect model variables\n modelVars = {}\n modelVars['device'] = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\") \n # define new folder, take care that there might be no labels\n print(\"Creating predictions for path \",sys.argv[8])\n # Add meta data\n if mdlParams.get('meta_features',None) is not None:\n mdlParams['meta_dict'] = {}\n path1 = mdlParams['dataDir'] + '/meta_data/test_rez3_ll/meta_data_test.pkl'\n # Open and load\n with open(path1,'rb') as f:\n meta_data = pickle.load(f)\n # Write into dict\n for k in range(len(meta_data['im_name'])):\n feature_vector = []\n if 'age_oh' in mdlParams['meta_features']:\n if mdlParams['encode_nan']:\n feature_vector.append(meta_data['age_oh'][k,:])\n else:\n feature_vector.append(meta_data['age_oh'][k,1:])\n if 'age_num' in mdlParams['meta_features']:\n feature_vector.append(np.array([meta_data['age_num'][k]])) \n if 'loc_oh' in mdlParams['meta_features']:\n if mdlParams['encode_nan']:\n feature_vector.append(meta_data['loc_oh'][k,:])\n else:\n feature_vector.append(meta_data['loc_oh'][k,1:])\n if 'sex_oh' in mdlParams['meta_features']:\n if mdlParams['encode_nan']:\n feature_vector.append(meta_data['sex_oh'][k,:])\n else:\n feature_vector.append(meta_data['sex_oh'][k,1:]) \n\n #print(feature_vector) \n feature_vector = np.concatenate(feature_vector,axis=0)\n #print(\"feature vector shape\",feature_vector.shape) \n mdlParams['meta_dict'][meta_data['im_name'][k]] = feature_vector \n # Define the path\n path1 = sys.argv[8]\n # All files in that set\n files = sorted(glob(path1+'/*'))\n # Define new paths\n mdlParams['im_paths'] = []\n mdlParams['meta_list'] = []\n for j in range(len(files)):\n inds = [int(s) for s in re.findall(r'\\d+',files[j])]\n if 'ISIC_' in files[j]:\n mdlParams['im_paths'].append(files[j])\n if mdlParams.get('meta_features',None) is not None:\n for key in mdlParams['meta_dict']:\n if key in files[j]:\n mdlParams['meta_list'].append(mdlParams['meta_dict'][key]) \n if mdlParams.get('meta_features',None) is not None:\n # Meta data\n mdlParams['meta_array'] = np.array(mdlParams['meta_list']) \n # Add empty labels\n mdlParams['labels_array'] = np.zeros([len(mdlParams['im_paths']),mdlParams['numClasses']],dtype=np.float32)\n # Define everything as a valind set\n mdlParams['valInd'] = np.array(np.arange(len(mdlParams['im_paths'])))\n mdlParams['trainInd'] = mdlParams['valInd']\n if mdlParams.get('var_im_size',False):\n # Crop positions, always choose multiCropEval to be 4, 9, 16, 25, etc.\n mdlParams['cropPositions'] = np.zeros([len(mdlParams['im_paths']),mdlParams['multiCropEval'],2],dtype=np.int64)\n #mdlParams['imSizes'] = np.zeros([len(mdlParams['im_paths']),mdlParams['multiCropEval'],2],dtype=np.int64)\n for u in range(len(mdlParams['im_paths'])):\n height, width = imagesize.get(mdlParams['im_paths'][u])\n if width < mdlParams['input_size'][0]:\n height = int(mdlParams['input_size'][0]/float(width))*height\n width = mdlParams['input_size'][0]\n if height < mdlParams['input_size'][0]:\n width = int(mdlParams['input_size'][0]/float(height))*width\n height = mdlParams['input_size'][0] \n if mdlParams.get('resize_large_ones') is not None:\n if width == mdlParams['large_size'] and height == mdlParams['large_size']:\n width, height = (mdlParams['resize_large_ones'],mdlParams['resize_large_ones']) \n ind = 0\n for i in range(np.int32(np.sqrt(mdlParams['multiCropEval']))):\n for j in range(np.int32(np.sqrt(mdlParams['multiCropEval']))):\n mdlParams['cropPositions'][u,ind,0] = mdlParams['input_size'][0]/2+i*((width-mdlParams['input_size'][1])/(np.sqrt(mdlParams['multiCropEval'])-1))\n mdlParams['cropPositions'][u,ind,1] = mdlParams['input_size'][1]/2+j*((height-mdlParams['input_size'][0])/(np.sqrt(mdlParams['multiCropEval'])-1))\n #mdlParams['imSizes'][u,ind,0] = curr_im_size[0]\n\n ind += 1\n # Sanity checks\n #print(\"Positions\",mdlParams['cropPositions'])\n # Test image sizes\n test_im = np.zeros(mdlParams['input_size_load'])\n height = mdlParams['input_size'][0]\n width = mdlParams['input_size'][1]\n for u in range(len(mdlParams['im_paths'])): \n height_test, width_test = imagesize.get(mdlParams['im_paths'][u])\n if width_test < mdlParams['input_size'][0]:\n height_test = int(mdlParams['input_size'][0]/float(width_test))*height_test\n width_test = mdlParams['input_size'][0]\n if height_test < mdlParams['input_size'][0]:\n width_test = int(mdlParams['input_size'][0]/float(height_test))*width_test\n height_test = mdlParams['input_size'][0] \n if mdlParams.get('resize_large_ones') is not None:\n if width_test == mdlParams['large_size'] and height_test == mdlParams['large_size']:\n width_test, height_test = (mdlParams['resize_large_ones'],mdlParams['resize_large_ones']) \n test_im = np.zeros([width_test,height_test]) \n for i in range(mdlParams['multiCropEval']):\n im_crop = test_im[np.int32(mdlParams['cropPositions'][u,i,0]-height/2):np.int32(mdlParams['cropPositions'][u,i,0]-height/2)+height,np.int32(mdlParams['cropPositions'][u,i,1]-width/2):np.int32(mdlParams['cropPositions'][u,i,1]-width/2)+width]\n if im_crop.shape[0] != mdlParams['input_size'][0]:\n print(\"Wrong shape\",im_crop.shape[0],mdlParams['im_paths'][u]) \n if im_crop.shape[1] != mdlParams['input_size'][1]:\n print(\"Wrong shape\",im_crop.shape[1],mdlParams['im_paths'][u]) \n mdlParams['saveDir'] = mdlParams['saveDirBase'] + '/CVSet' + str(cv)\n # balance classes\n if mdlParams['balance_classes'] < 3 or mdlParams['balance_classes'] == 7 or mdlParams['balance_classes'] == 11:\n class_weights = class_weight.compute_class_weight('balanced',np.unique(np.argmax(mdlParams['labels_array'][mdlParams['trainInd'],:],1)),np.argmax(mdlParams['labels_array'][mdlParams['trainInd'],:],1)) \n print(\"Current class weights\",class_weights)\n class_weights = class_weights*mdlParams['extra_fac']\n print(\"Current class weights with extra\",class_weights) \n elif mdlParams['balance_classes'] == 3 or mdlParams['balance_classes'] == 4:\n # Split training set by classes\n not_one_hot = np.argmax(mdlParams['labels_array'],1)\n mdlParams['class_indices'] = []\n for i in range(mdlParams['numClasses']):\n mdlParams['class_indices'].append(np.where(not_one_hot==i)[0])\n # Kick out non-trainind indices\n mdlParams['class_indices'][i] = np.setdiff1d(mdlParams['class_indices'][i],mdlParams['valInd'])\n #print(\"Class\",i,mdlParams['class_indices'][i].shape,np.min(mdlParams['class_indices'][i]),np.max(mdlParams['class_indices'][i]),np.sum(mdlParams['labels_array'][np.int64(mdlParams['class_indices'][i]),:],0)) \n elif mdlParams['balance_classes'] == 5 or mdlParams['balance_classes'] == 6 or mdlParams['balance_classes'] == 13:\n # Other class balancing loss\n class_weights = 1.0/np.mean(mdlParams['labels_array'][mdlParams['trainInd'],:],axis=0)\n print(\"Current class weights\",class_weights) \n class_weights = class_weights*mdlParams['extra_fac']\n print(\"Current class weights with extra\",class_weights) \n elif mdlParams['balance_classes'] == 9:\n # Only use official indicies for calculation\n print(\"Balance 9\")\n indices_ham = mdlParams['trainInd'][mdlParams['trainInd'] < 25331]\n if mdlParams['numClasses'] == 9:\n class_weights_ = 1.0/np.mean(mdlParams['labels_array'][indices_ham,:8],axis=0)\n #print(\"class before\",class_weights_)\n class_weights = np.zeros([mdlParams['numClasses']])\n class_weights[:8] = class_weights_\n class_weights[-1] = np.max(class_weights_)\n else:\n class_weights = 1.0/np.mean(mdlParams['labels_array'][indices_ham,:],axis=0)\n print(\"Current class weights\",class_weights) \n if isinstance(mdlParams['extra_fac'], float):\n class_weights = np.power(class_weights,mdlParams['extra_fac'])\n else:\n class_weights = class_weights*mdlParams['extra_fac']\n print(\"Current class weights with extra\",class_weights) \n\n\n # Set up dataloaders\n # Meta scaler\n if mdlParams.get('meta_features',None) is not None and mdlParams['scale_features']:\n mdlParams['feature_scaler_meta'] = sklearn.preprocessing.StandardScaler().fit(mdlParams['meta_array'][mdlParams['trainInd'],:]) \n #print(\"scaler mean\",mdlParams['feature_scaler_meta'].mean_,\"var\",mdlParams['feature_scaler_meta'].var_) \n # For train\n dataset_train = utils.ISICDataset(mdlParams, 'trainInd')\n # For val\n dataset_val = utils.ISICDataset(mdlParams, 'valInd')\n if mdlParams['multiCropEval'] > 0:\n modelVars['dataloader_valInd'] = DataLoader(dataset_val, batch_size=mdlParams['multiCropEval'], shuffle=False, num_workers=8, pin_memory=True) \n else:\n modelVars['dataloader_valInd'] = DataLoader(dataset_val, batch_size=mdlParams['batchSize'], shuffle=False, num_workers=8, pin_memory=True) \n modelVars['dataloader_trainInd'] = DataLoader(dataset_train, batch_size=mdlParams['batchSize'], shuffle=True, num_workers=8, pin_memory=True)\n \n\n # Define model \n modelVars['model'] = models.getModel(mdlParams)() \n if 'Dense' in mdlParams['model_type']:\n if mdlParams['input_size'][0] != 224:\n modelVars['model'] = utils.modify_densenet_avg_pool(modelVars['model'])\n #print(modelVars['model'])\n num_ftrs = modelVars['model'].classifier.in_features\n modelVars['model'].classifier = nn.Linear(num_ftrs, mdlParams['numClasses'])\n #print(modelVars['model'])\n elif 'dpn' in mdlParams['model_type']:\n num_ftrs = modelVars['model'].classifier.in_channels\n modelVars['model'].classifier = nn.Conv2d(num_ftrs,mdlParams['numClasses'],[1,1])\n #modelVars['model'].add_module('real_classifier',nn.Linear(num_ftrs, mdlParams['numClasses']))\n #print(modelVars['model'])\n elif 'efficient' in mdlParams['model_type']:\n # Do nothing, output is prepared\n num_ftrs = modelVars['model']._fc.in_features\n modelVars['model']._fc = nn.Linear(num_ftrs, mdlParams['numClasses']) \n elif 'wsl' in mdlParams['model_type']:\n num_ftrs = modelVars['model'].fc.in_features\n modelVars['model'].fc = nn.Linear(num_ftrs, mdlParams['numClasses']) \n else:\n num_ftrs = modelVars['model'].last_linear.in_features\n modelVars['model'].last_linear = nn.Linear(num_ftrs, mdlParams['numClasses']) \n # modify model\n if mdlParams.get('meta_features',None) is not None:\n modelVars['model'] = models.modify_meta(mdlParams,modelVars['model']) \n modelVars['model'] = modelVars['model'].to(modelVars['device'])\n #summary(modelVars['model'], (mdlParams['input_size'][2], mdlParams['input_size'][0], mdlParams['input_size'][1]))\n # Loss, with class weighting\n # Loss, with class weighting\n if mdlParams['balance_classes'] == 3 or mdlParams['balance_classes'] == 0 or mdlParams['balance_classes'] == 12:\n modelVars['criterion'] = nn.CrossEntropyLoss()\n elif mdlParams['balance_classes'] == 8:\n modelVars['criterion'] = nn.CrossEntropyLoss(reduce=False)\n elif mdlParams['balance_classes'] == 6 or mdlParams['balance_classes'] == 7:\n modelVars['criterion'] = nn.CrossEntropyLoss(weight=torch.cuda.FloatTensor(class_weights.astype(np.float32)),reduce=False)\n elif mdlParams['balance_classes'] == 10:\n modelVars['criterion'] = utils.FocalLoss(mdlParams['numClasses'])\n elif mdlParams['balance_classes'] == 11:\n modelVars['criterion'] = utils.FocalLoss(mdlParams['numClasses'],alpha=torch.cuda.FloatTensor(class_weights.astype(np.float32)))\n else:\n modelVars['criterion'] = nn.CrossEntropyLoss(weight=torch.cuda.FloatTensor(class_weights.astype(np.float32)))\n # Observe that all parameters are being optimized\n modelVars['optimizer'] = optim.Adam(modelVars['model'].parameters(), lr=mdlParams['learning_rate'])\n\n # Decay LR by a factor of 0.1 every 7 epochs\n modelVars['scheduler'] = lr_scheduler.StepLR(modelVars['optimizer'], step_size=mdlParams['lowerLRAfter'], gamma=1/np.float32(mdlParams['LRstep']))\n\n # Define softmax\n modelVars['softmax'] = nn.Softmax(dim=1)\n\n # Manually find latest chekcpoint, tf.train.latest_checkpoint is doing weird shit\n files = glob(mdlParams['saveDir']+'/*')\n global_steps = np.zeros([len(files)])\n for i in range(len(files)):\n # Use meta files to find the highest index\n if 'checkpoint' not in files[i]:\n continue\n if mdlParams['ckpt_name'] not in files[i]:\n continue\n # Extract global step\n nums = [int(s) for s in re.findall(r'\\d+',files[i])]\n global_steps[i] = nums[-1]\n # Create path with maximum global step found, if first is not wanted\n global_steps = np.sort(global_steps)\n if mdlParams.get('use_first') is not None:\n chkPath = mdlParams['saveDir'] + '/' + mdlParams['ckpt_name'] + str(int(global_steps[-2])) + '.pt'\n else:\n chkPath = mdlParams['saveDir'] + '/' + mdlParams['ckpt_name'] + str(int(np.max(global_steps))) + '.pt'\n print(\"Restoring: \",chkPath)\n \n # Load\n state = torch.load(chkPath)\n # Initialize model and optimizer\n modelVars['model'].load_state_dict(state['state_dict'])\n #modelVars['optimizer'].load_state_dict(state['optimizer']) \n # Get predictions or learn on pred\n modelVars['model'].eval() \n # Get predictions\n # Turn off the skipping of the last class\n mdlParams['no_c9_eval'] = False\n loss, accuracy, sensitivity, specificity, conf_matrix, f1, auc, waccuracy, predictions, targets, predictions_mc = utils.getErrClassification_mgpu(mdlParams, 'valInd', modelVars)\n # Save predictions \n allData['extPred'][cv] = predictions\n print(\"extPred shape\",allData['extPred'][cv].shape)\n pklFileName = sys.argv[2] + \"_\" + sys.argv[6] + \"_\" + str(int(np.max(global_steps))) + \"_predn.pkl\"\n\n# Mean results over all folds\nnp.set_printoptions(precision=4)\nprint(\"-------------------------------------------------\")\nprint(\"Mean over all Folds\")\nprint(\"-------------------------------------------------\")\nprint(\"F1 Score\",np.array([np.mean(allData['f1Best'])]),\"+-\",np.array([np.std(allData['f1Best'])])) \nprint(\"Sensitivtiy\",np.mean(allData['sensBest'],0),\"+-\",np.std(allData['sensBest'],0)) \nprint(\"Specificity\",np.mean(allData['specBest'],0),\"+-\",np.std(allData['specBest'],0)) \nprint(\"Mean Specificity\",np.array([np.mean(allData['specBest'])]),\"+-\",np.array([np.std(np.mean(allData['specBest'],1))])) \nprint(\"Accuracy\",np.array([np.mean(allData['accBest'])]),\"+-\",np.array([np.std(allData['accBest'])])) \nprint(\"Per Class Accuracy\",np.mean(allData['waccBest'],0),\"+-\",np.std(allData['waccBest'],0))\nprint(\"Weighted Accuracy\",np.array([np.mean(allData['waccBest'])]),\"+-\",np.array([np.std(np.mean(allData['waccBest'],1))])) \nprint(\"AUC\",np.mean(allData['aucBest'],0),\"+-\",np.std(allData['aucBest'],0)) \nprint(\"Mean AUC\",np.array([np.mean(allData['aucBest'])]),\"+-\",np.array([np.std(np.mean(allData['aucBest'],1))])) \n# Save dict with results\nwith open(mdlParams['saveDirBase'] + \"/\" + pklFileName, 'wb') as f:\n pickle.dump(allData, f, pickle.HIGHEST_PROTOCOL) \n" ]
[ [ "torch.nn.Softmax", "numpy.sqrt", "torch.load", "torch.utils.data.DataLoader", "numpy.concatenate", "numpy.max", "numpy.mean", "torch.cuda.is_available", "numpy.where", "torch.nn.CrossEntropyLoss", "numpy.intersect1d", "numpy.std", "numpy.argmax", "numpy.float32", "numpy.zeros", "numpy.power", "torch.nn.Conv2d", "torch.nn.Linear", "numpy.delete", "numpy.array", "numpy.set_printoptions", "numpy.int32", "numpy.sort", "numpy.setdiff1d" ] ]
PhDittmann/carla
[ "f114dc0aa187c5e0abad04731ba1e1d7b32102f0" ]
[ "PythonAPI/examples/manual_control.py" ]
[ "#!/usr/bin/env python\n\n# Copyright (c) 2019 Computer Vision Center (CVC) at the Universitat Autonoma de\n# Barcelona (UAB).\n#\n# This work is licensed under the terms of the MIT license.\n# For a copy, see <https://opensource.org/licenses/MIT>.\n\n# Allows controlling a vehicle with a keyboard. For a simpler and more\n# documented example, please take a look at tutorial.py.\n\n\"\"\"\nWelcome to CARLA manual control.\n\nUse ARROWS or WASD keys for control.\n\n W : throttle\n S : brake\n A/D : steer left/right\n Q : toggle reverse\n Space : hand-brake\n P : toggle autopilot\n M : toggle manual transmission\n ,/. : gear up/down\n\n L : toggle next light type\n SHIFT + L : toggle high beam\n Z/X : toggle right/left blinker\n I : toggle interior light\n\n TAB : change sensor position\n ` or N : next sensor\n [1-9] : change to sensor [1-9]\n G : toggle radar visualization\n C : change weather (Shift+C reverse)\n Backspace : change vehicle\n\n R : toggle recording images to disk\n\n CTRL + R : toggle recording of simulation (replacing any previous)\n CTRL + P : start replaying last recorded simulation\n CTRL + + : increments the start time of the replay by 1 second (+SHIFT = 10 seconds)\n CTRL + - : decrements the start time of the replay by 1 second (+SHIFT = 10 seconds)\n\n F1 : toggle HUD\n H/? : toggle help\n ESC : quit\n\"\"\"\n\nfrom __future__ import print_function\n\n\n# ==============================================================================\n# -- find carla module ---------------------------------------------------------\n# ==============================================================================\n\n\nimport glob\nimport os\nimport sys\n\ntry:\n sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % (\n sys.version_info.major,\n sys.version_info.minor,\n 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])\nexcept IndexError:\n pass\n\n\n# ==============================================================================\n# -- imports -------------------------------------------------------------------\n# ==============================================================================\n\n\nimport carla\n\nfrom carla import ColorConverter as cc\n\nimport argparse\nimport collections\nimport datetime\nimport logging\nimport math\nimport random\nimport re\nimport weakref\n\ntry:\n import pygame\n from pygame.locals import KMOD_CTRL\n from pygame.locals import KMOD_SHIFT\n from pygame.locals import K_0\n from pygame.locals import K_9\n from pygame.locals import K_BACKQUOTE\n from pygame.locals import K_BACKSPACE\n from pygame.locals import K_COMMA\n from pygame.locals import K_DOWN\n from pygame.locals import K_ESCAPE\n from pygame.locals import K_F1\n from pygame.locals import K_LEFT\n from pygame.locals import K_PERIOD\n from pygame.locals import K_RIGHT\n from pygame.locals import K_SLASH\n from pygame.locals import K_SPACE\n from pygame.locals import K_TAB\n from pygame.locals import K_UP\n from pygame.locals import K_a\n from pygame.locals import K_c\n from pygame.locals import K_g\n from pygame.locals import K_d\n from pygame.locals import K_h\n from pygame.locals import K_m\n from pygame.locals import K_n\n from pygame.locals import K_p\n from pygame.locals import K_q\n from pygame.locals import K_r\n from pygame.locals import K_s\n from pygame.locals import K_w\n from pygame.locals import K_l\n from pygame.locals import K_i\n from pygame.locals import K_z\n from pygame.locals import K_x\n from pygame.locals import K_MINUS\n from pygame.locals import K_EQUALS\nexcept ImportError:\n raise RuntimeError('cannot import pygame, make sure pygame package is installed')\n\ntry:\n import numpy as np\nexcept ImportError:\n raise RuntimeError('cannot import numpy, make sure numpy package is installed')\n\n\n# ==============================================================================\n# -- Global functions ----------------------------------------------------------\n# ==============================================================================\n\n\ndef find_weather_presets():\n rgx = re.compile('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)')\n name = lambda x: ' '.join(m.group(0) for m in rgx.finditer(x))\n presets = [x for x in dir(carla.WeatherParameters) if re.match('[A-Z].+', x)]\n return [(getattr(carla.WeatherParameters, x), name(x)) for x in presets]\n\n\ndef get_actor_display_name(actor, truncate=250):\n name = ' '.join(actor.type_id.replace('_', '.').title().split('.')[1:])\n return (name[:truncate - 1] + u'\\u2026') if len(name) > truncate else name\n\n\n# ==============================================================================\n# -- World ---------------------------------------------------------------------\n# ==============================================================================\n\n\nclass World(object):\n def __init__(self, carla_world, hud, args):\n self.world = carla_world\n self.actor_role_name = args.rolename\n try:\n self.map = self.world.get_map()\n except RuntimeError as error:\n print('RuntimeError: {}'.format(error))\n print(' The server could not send the OpenDRIVE (.xodr) file:')\n print(' Make sure it exists, has the same name of your town, and is correct.')\n sys.exit(1)\n self.hud = hud\n self.player = None\n self.collision_sensor = None\n self.lane_invasion_sensor = None\n self.gnss_sensor = None\n self.imu_sensor = None\n self.radar_sensor = None\n self.camera_manager = None\n self._weather_presets = find_weather_presets()\n self._weather_index = 0\n self._actor_filter = args.filter\n self._gamma = args.gamma\n self.restart()\n self.world.on_tick(hud.on_world_tick)\n self.recording_enabled = False\n self.recording_start = 0\n\n def restart(self):\n self.player_max_speed = 1.589\n self.player_max_speed_fast = 3.713\n # Keep same camera config if the camera manager exists.\n cam_index = self.camera_manager.index if self.camera_manager is not None else 0\n cam_pos_index = self.camera_manager.transform_index if self.camera_manager is not None else 0\n # Get a random blueprint.\n blueprint = random.choice(self.world.get_blueprint_library().filter(self._actor_filter))\n blueprint.set_attribute('role_name', self.actor_role_name)\n if blueprint.has_attribute('color'):\n color = random.choice(blueprint.get_attribute('color').recommended_values)\n blueprint.set_attribute('color', color)\n if blueprint.has_attribute('driver_id'):\n driver_id = random.choice(blueprint.get_attribute('driver_id').recommended_values)\n blueprint.set_attribute('driver_id', driver_id)\n if blueprint.has_attribute('is_invincible'):\n blueprint.set_attribute('is_invincible', 'true')\n # set the max speed\n if blueprint.has_attribute('speed'):\n self.player_max_speed = float(blueprint.get_attribute('speed').recommended_values[1])\n self.player_max_speed_fast = float(blueprint.get_attribute('speed').recommended_values[2])\n else:\n print(\"No recommended values for 'speed' attribute\")\n # Spawn the player.\n if self.player is not None:\n spawn_point = self.player.get_transform()\n spawn_point.location.z += 2.0\n spawn_point.rotation.roll = 0.0\n spawn_point.rotation.pitch = 0.0\n self.destroy()\n self.player = self.world.try_spawn_actor(blueprint, spawn_point)\n while self.player is None:\n if not self.map.get_spawn_points():\n print('There are no spawn points available in your map/town.')\n print('Please add some Vehicle Spawn Point to your UE4 scene.')\n sys.exit(1)\n spawn_points = self.map.get_spawn_points()\n spawn_point = random.choice(spawn_points) if spawn_points else carla.Transform()\n self.player = self.world.try_spawn_actor(blueprint, spawn_point)\n # Set up the sensors.\n self.collision_sensor = CollisionSensor(self.player, self.hud)\n self.lane_invasion_sensor = LaneInvasionSensor(self.player, self.hud)\n self.gnss_sensor = GnssSensor(self.player)\n self.imu_sensor = IMUSensor(self.player)\n self.camera_manager = CameraManager(self.player, self.hud, self._gamma)\n self.camera_manager.transform_index = cam_pos_index\n self.camera_manager.set_sensor(cam_index, notify=False)\n actor_type = get_actor_display_name(self.player)\n self.hud.notification(actor_type)\n\n def next_weather(self, reverse=False):\n self._weather_index += -1 if reverse else 1\n self._weather_index %= len(self._weather_presets)\n preset = self._weather_presets[self._weather_index]\n self.hud.notification('Weather: %s' % preset[1])\n self.player.get_world().set_weather(preset[0])\n\n def toggle_radar(self):\n if self.radar_sensor is None:\n self.radar_sensor = RadarSensor(self.player)\n elif self.radar_sensor.sensor is not None:\n self.radar_sensor.sensor.destroy()\n self.radar_sensor = None\n\n def tick(self, clock):\n self.hud.tick(self, clock)\n\n def render(self, display):\n self.camera_manager.render(display)\n self.hud.render(display)\n\n def destroy_sensors(self):\n self.camera_manager.sensor.destroy()\n self.camera_manager.sensor = None\n self.camera_manager.index = None\n\n def destroy(self):\n if self.radar_sensor is not None:\n self.toggle_radar()\n actors = [\n self.camera_manager.sensor,\n self.collision_sensor.sensor,\n self.lane_invasion_sensor.sensor,\n self.gnss_sensor.sensor,\n self.imu_sensor.sensor,\n self.player]\n for actor in actors:\n if actor is not None:\n actor.destroy()\n\n\n# ==============================================================================\n# -- KeyboardControl -----------------------------------------------------------\n# ==============================================================================\n\n\nclass KeyboardControl(object):\n \"\"\"Class that handles keyboard input.\"\"\"\n def __init__(self, world, start_in_autopilot):\n self._autopilot_enabled = start_in_autopilot\n if isinstance(world.player, carla.Vehicle):\n self._control = carla.VehicleControl()\n self._lights = carla.VehicleLightState.NONE\n world.player.set_autopilot(self._autopilot_enabled)\n world.player.set_light_state(self._lights)\n elif isinstance(world.player, carla.Walker):\n self._control = carla.WalkerControl()\n self._autopilot_enabled = False\n self._rotation = world.player.get_transform().rotation\n else:\n raise NotImplementedError(\"Actor type not supported\")\n self._steer_cache = 0.0\n world.hud.notification(\"Press 'H' or '?' for help.\", seconds=4.0)\n\n def parse_events(self, client, world, clock):\n if isinstance(self._control, carla.VehicleControl):\n current_lights = self._lights\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n return True\n elif event.type == pygame.KEYUP:\n if self._is_quit_shortcut(event.key):\n return True\n elif event.key == K_BACKSPACE:\n if self._autopilot_enabled:\n world.player.set_autopilot(False)\n world.restart()\n world.player.set_autopilot(True)\n else:\n world.restart()\n elif event.key == K_F1:\n world.hud.toggle_info()\n elif event.key == K_h or (event.key == K_SLASH and pygame.key.get_mods() & KMOD_SHIFT):\n world.hud.help.toggle()\n elif event.key == K_TAB:\n world.camera_manager.toggle_camera()\n elif event.key == K_c and pygame.key.get_mods() & KMOD_SHIFT:\n world.next_weather(reverse=True)\n elif event.key == K_c:\n world.next_weather()\n elif event.key == K_g:\n world.toggle_radar()\n elif event.key == K_BACKQUOTE:\n world.camera_manager.next_sensor()\n elif event.key == K_n:\n world.camera_manager.next_sensor()\n elif event.key > K_0 and event.key <= K_9:\n world.camera_manager.set_sensor(event.key - 1 - K_0)\n elif event.key == K_r and not (pygame.key.get_mods() & KMOD_CTRL):\n world.camera_manager.toggle_recording()\n elif event.key == K_r and (pygame.key.get_mods() & KMOD_CTRL):\n if (world.recording_enabled):\n client.stop_recorder()\n world.recording_enabled = False\n world.hud.notification(\"Recorder is OFF\")\n else:\n client.start_recorder(\"manual_recording.rec\")\n world.recording_enabled = True\n world.hud.notification(\"Recorder is ON\")\n elif event.key == K_p and (pygame.key.get_mods() & KMOD_CTRL):\n # stop recorder\n client.stop_recorder()\n world.recording_enabled = False\n # work around to fix camera at start of replaying\n current_index = world.camera_manager.index\n world.destroy_sensors()\n # disable autopilot\n self._autopilot_enabled = False\n world.player.set_autopilot(self._autopilot_enabled)\n world.hud.notification(\"Replaying file 'manual_recording.rec'\")\n # replayer\n client.replay_file(\"manual_recording.rec\", world.recording_start, 0, 0)\n world.camera_manager.set_sensor(current_index)\n elif event.key == K_MINUS and (pygame.key.get_mods() & KMOD_CTRL):\n if pygame.key.get_mods() & KMOD_SHIFT:\n world.recording_start -= 10\n else:\n world.recording_start -= 1\n world.hud.notification(\"Recording start time is %d\" % (world.recording_start))\n elif event.key == K_EQUALS and (pygame.key.get_mods() & KMOD_CTRL):\n if pygame.key.get_mods() & KMOD_SHIFT:\n world.recording_start += 10\n else:\n world.recording_start += 1\n world.hud.notification(\"Recording start time is %d\" % (world.recording_start))\n if isinstance(self._control, carla.VehicleControl):\n if event.key == K_q:\n self._control.gear = 1 if self._control.reverse else -1\n elif event.key == K_m:\n self._control.manual_gear_shift = not self._control.manual_gear_shift\n self._control.gear = world.player.get_control().gear\n world.hud.notification('%s Transmission' %\n ('Manual' if self._control.manual_gear_shift else 'Automatic'))\n elif self._control.manual_gear_shift and event.key == K_COMMA:\n self._control.gear = max(-1, self._control.gear - 1)\n elif self._control.manual_gear_shift and event.key == K_PERIOD:\n self._control.gear = self._control.gear + 1\n elif event.key == K_p and not pygame.key.get_mods() & KMOD_CTRL:\n self._autopilot_enabled = not self._autopilot_enabled\n world.player.set_autopilot(self._autopilot_enabled)\n world.hud.notification(\n 'Autopilot %s' % ('On' if self._autopilot_enabled else 'Off'))\n elif event.key == K_l and pygame.key.get_mods() & KMOD_CTRL:\n current_lights ^= carla.VehicleLightState.Special1\n elif event.key == K_l and pygame.key.get_mods() & KMOD_SHIFT:\n current_lights ^= carla.VehicleLightState.HighBeam\n elif event.key == K_l:\n # Use 'L' key to switch between lights:\n # closed -> position -> low beam -> fog\n if not self._lights & carla.VehicleLightState.Position:\n world.hud.notification(\"Position lights\")\n current_lights |= carla.VehicleLightState.Position\n else:\n world.hud.notification(\"Low beam lights\")\n current_lights |= carla.VehicleLightState.LowBeam\n if self._lights & carla.VehicleLightState.LowBeam:\n world.hud.notification(\"Fog lights\")\n current_lights |= carla.VehicleLightState.Fog\n if self._lights & carla.VehicleLightState.Fog:\n world.hud.notification(\"Lights off\")\n current_lights ^= carla.VehicleLightState.Position\n current_lights ^= carla.VehicleLightState.LowBeam\n current_lights ^= carla.VehicleLightState.Fog\n elif event.key == K_i:\n current_lights ^= carla.VehicleLightState.Interior\n elif event.key == K_z:\n current_lights ^= carla.VehicleLightState.LeftBlinker\n elif event.key == K_x:\n current_lights ^= carla.VehicleLightState.RightBlinker\n\n if not self._autopilot_enabled:\n if isinstance(self._control, carla.VehicleControl):\n self._parse_vehicle_keys(pygame.key.get_pressed(), clock.get_time())\n self._control.reverse = self._control.gear < 0\n # Set automatic control-related vehicle lights\n if self._control.brake:\n current_lights |= carla.VehicleLightState.Brake\n else: # Remove the Brake flag\n current_lights &= ~carla.VehicleLightState.Brake\n if self._control.reverse:\n current_lights |= carla.VehicleLightState.Reverse\n else: # Remove the Reverse flag\n current_lights &= ~carla.VehicleLightState.Reverse\n if current_lights != self._lights: # Change the light state only if necessary\n self._lights = current_lights\n world.player.set_light_state(carla.VehicleLightState(self._lights))\n elif isinstance(self._control, carla.WalkerControl):\n self._parse_walker_keys(pygame.key.get_pressed(), clock.get_time(), world)\n world.player.apply_control(self._control)\n\n def _parse_vehicle_keys(self, keys, milliseconds):\n if keys[K_UP] or keys[K_w]:\n self._control.throttle = min(self._control.throttle + 0.01, 1)\n else:\n self._control.throttle = 0.0\n\n if keys[K_DOWN] or keys[K_s]:\n self._control.brake = min(self._control.brake + 0.2, 1)\n else:\n self._control.brake = 0\n\n steer_increment = 5e-4 * milliseconds\n if keys[K_LEFT] or keys[K_a]:\n if self._steer_cache > 0:\n self._steer_cache = 0\n else:\n self._steer_cache -= steer_increment\n elif keys[K_RIGHT] or keys[K_d]:\n if self._steer_cache < 0:\n self._steer_cache = 0\n else:\n self._steer_cache += steer_increment\n else:\n self._steer_cache = 0.0\n self._steer_cache = min(0.7, max(-0.7, self._steer_cache))\n self._control.steer = round(self._steer_cache, 1)\n self._control.hand_brake = keys[K_SPACE]\n\n def _parse_walker_keys(self, keys, milliseconds, world):\n self._control.speed = 0.0\n if keys[K_DOWN] or keys[K_s]:\n self._control.speed = 0.0\n if keys[K_LEFT] or keys[K_a]:\n self._control.speed = .01\n self._rotation.yaw -= 0.08 * milliseconds\n if keys[K_RIGHT] or keys[K_d]:\n self._control.speed = .01\n self._rotation.yaw += 0.08 * milliseconds\n if keys[K_UP] or keys[K_w]:\n self._control.speed = world.player_max_speed_fast if pygame.key.get_mods() & KMOD_SHIFT else world.player_max_speed\n self._control.jump = keys[K_SPACE]\n self._rotation.yaw = round(self._rotation.yaw, 1)\n self._control.direction = self._rotation.get_forward_vector()\n\n @staticmethod\n def _is_quit_shortcut(key):\n return (key == K_ESCAPE) or (key == K_q and pygame.key.get_mods() & KMOD_CTRL)\n\n\n# ==============================================================================\n# -- HUD -----------------------------------------------------------------------\n# ==============================================================================\n\n\nclass HUD(object):\n def __init__(self, width, height):\n self.dim = (width, height)\n font = pygame.font.Font(pygame.font.get_default_font(), 20)\n font_name = 'courier' if os.name == 'nt' else 'mono'\n fonts = [x for x in pygame.font.get_fonts() if font_name in x]\n default_font = 'ubuntumono'\n mono = default_font if default_font in fonts else fonts[0]\n mono = pygame.font.match_font(mono)\n self._font_mono = pygame.font.Font(mono, 12 if os.name == 'nt' else 14)\n self._notifications = FadingText(font, (width, 40), (0, height - 40))\n self.help = HelpText(pygame.font.Font(mono, 16), width, height)\n self.server_fps = 0\n self.frame = 0\n self.simulation_time = 0\n self._show_info = True\n self._info_text = []\n self._server_clock = pygame.time.Clock()\n\n def on_world_tick(self, timestamp):\n self._server_clock.tick()\n self.server_fps = self._server_clock.get_fps()\n self.frame = timestamp.frame\n self.simulation_time = timestamp.elapsed_seconds\n\n def tick(self, world, clock):\n self._notifications.tick(world, clock)\n if not self._show_info:\n return\n t = world.player.get_transform()\n v = world.player.get_velocity()\n c = world.player.get_control()\n compass = world.imu_sensor.compass\n heading = 'N' if compass > 270.5 or compass < 89.5 else ''\n heading += 'S' if 90.5 < compass < 269.5 else ''\n heading += 'E' if 0.5 < compass < 179.5 else ''\n heading += 'W' if 180.5 < compass < 359.5 else ''\n colhist = world.collision_sensor.get_collision_history()\n collision = [colhist[x + self.frame - 200] for x in range(0, 200)]\n max_col = max(1.0, max(collision))\n collision = [x / max_col for x in collision]\n vehicles = world.world.get_actors().filter('vehicle.*')\n self._info_text = [\n 'Server: % 16.0f FPS' % self.server_fps,\n 'Client: % 16.0f FPS' % clock.get_fps(),\n '',\n 'Vehicle: % 20s' % get_actor_display_name(world.player, truncate=20),\n 'Map: % 20s' % world.map.name,\n 'Simulation time: % 12s' % datetime.timedelta(seconds=int(self.simulation_time)),\n '',\n 'Speed: % 15.0f km/h' % (3.6 * math.sqrt(v.x**2 + v.y**2 + v.z**2)),\n u'Compass:% 17.0f\\N{DEGREE SIGN} % 2s' % (compass, heading),\n 'Accelero: (%5.1f,%5.1f,%5.1f)' % (world.imu_sensor.accelerometer),\n 'Gyroscop: (%5.1f,%5.1f,%5.1f)' % (world.imu_sensor.gyroscope),\n 'Location:% 20s' % ('(% 5.1f, % 5.1f)' % (t.location.x, t.location.y)),\n 'GNSS:% 24s' % ('(% 2.6f, % 3.6f)' % (world.gnss_sensor.lat, world.gnss_sensor.lon)),\n 'Height: % 18.0f m' % t.location.z,\n '']\n if isinstance(c, carla.VehicleControl):\n self._info_text += [\n ('Throttle:', c.throttle, 0.0, 1.0),\n ('Steer:', c.steer, -1.0, 1.0),\n ('Brake:', c.brake, 0.0, 1.0),\n ('Reverse:', c.reverse),\n ('Hand brake:', c.hand_brake),\n ('Manual:', c.manual_gear_shift),\n 'Gear: %s' % {-1: 'R', 0: 'N'}.get(c.gear, c.gear)]\n elif isinstance(c, carla.WalkerControl):\n self._info_text += [\n ('Speed:', c.speed, 0.0, 5.556),\n ('Jump:', c.jump)]\n self._info_text += [\n '',\n 'Collision:',\n collision,\n '',\n 'Number of vehicles: % 8d' % len(vehicles)]\n if len(vehicles) > 1:\n self._info_text += ['Nearby vehicles:']\n distance = lambda l: math.sqrt((l.x - t.location.x)**2 + (l.y - t.location.y)**2 + (l.z - t.location.z)**2)\n vehicles = [(distance(x.get_location()), x) for x in vehicles if x.id != world.player.id]\n for d, vehicle in sorted(vehicles):\n if d > 200.0:\n break\n vehicle_type = get_actor_display_name(vehicle, truncate=22)\n self._info_text.append('% 4dm %s' % (d, vehicle_type))\n\n def toggle_info(self):\n self._show_info = not self._show_info\n\n def notification(self, text, seconds=2.0):\n self._notifications.set_text(text, seconds=seconds)\n\n def error(self, text):\n self._notifications.set_text('Error: %s' % text, (255, 0, 0))\n\n def render(self, display):\n if self._show_info:\n info_surface = pygame.Surface((220, self.dim[1]))\n info_surface.set_alpha(100)\n display.blit(info_surface, (0, 0))\n v_offset = 4\n bar_h_offset = 100\n bar_width = 106\n for item in self._info_text:\n if v_offset + 18 > self.dim[1]:\n break\n if isinstance(item, list):\n if len(item) > 1:\n points = [(x + 8, v_offset + 8 + (1.0 - y) * 30) for x, y in enumerate(item)]\n pygame.draw.lines(display, (255, 136, 0), False, points, 2)\n item = None\n v_offset += 18\n elif isinstance(item, tuple):\n if isinstance(item[1], bool):\n rect = pygame.Rect((bar_h_offset, v_offset + 8), (6, 6))\n pygame.draw.rect(display, (255, 255, 255), rect, 0 if item[1] else 1)\n else:\n rect_border = pygame.Rect((bar_h_offset, v_offset + 8), (bar_width, 6))\n pygame.draw.rect(display, (255, 255, 255), rect_border, 1)\n f = (item[1] - item[2]) / (item[3] - item[2])\n if item[2] < 0.0:\n rect = pygame.Rect((bar_h_offset + f * (bar_width - 6), v_offset + 8), (6, 6))\n else:\n rect = pygame.Rect((bar_h_offset, v_offset + 8), (f * bar_width, 6))\n pygame.draw.rect(display, (255, 255, 255), rect)\n item = item[0]\n if item: # At this point has to be a str.\n surface = self._font_mono.render(item, True, (255, 255, 255))\n display.blit(surface, (8, v_offset))\n v_offset += 18\n self._notifications.render(display)\n self.help.render(display)\n\n\n# ==============================================================================\n# -- FadingText ----------------------------------------------------------------\n# ==============================================================================\n\n\nclass FadingText(object):\n def __init__(self, font, dim, pos):\n self.font = font\n self.dim = dim\n self.pos = pos\n self.seconds_left = 0\n self.surface = pygame.Surface(self.dim)\n\n def set_text(self, text, color=(255, 255, 255), seconds=2.0):\n text_texture = self.font.render(text, True, color)\n self.surface = pygame.Surface(self.dim)\n self.seconds_left = seconds\n self.surface.fill((0, 0, 0, 0))\n self.surface.blit(text_texture, (10, 11))\n\n def tick(self, _, clock):\n delta_seconds = 1e-3 * clock.get_time()\n self.seconds_left = max(0.0, self.seconds_left - delta_seconds)\n self.surface.set_alpha(500.0 * self.seconds_left)\n\n def render(self, display):\n display.blit(self.surface, self.pos)\n\n\n# ==============================================================================\n# -- HelpText ------------------------------------------------------------------\n# ==============================================================================\n\n\nclass HelpText(object):\n \"\"\"Helper class to handle text output using pygame\"\"\"\n def __init__(self, font, width, height):\n lines = __doc__.split('\\n')\n self.font = font\n self.line_space = 18\n self.dim = (780, len(lines) * self.line_space + 12)\n self.pos = (0.5 * width - 0.5 * self.dim[0], 0.5 * height - 0.5 * self.dim[1])\n self.seconds_left = 0\n self.surface = pygame.Surface(self.dim)\n self.surface.fill((0, 0, 0, 0))\n for n, line in enumerate(lines):\n text_texture = self.font.render(line, True, (255, 255, 255))\n self.surface.blit(text_texture, (22, n * self.line_space))\n self._render = False\n self.surface.set_alpha(220)\n\n def toggle(self):\n self._render = not self._render\n\n def render(self, display):\n if self._render:\n display.blit(self.surface, self.pos)\n\n\n# ==============================================================================\n# -- CollisionSensor -----------------------------------------------------------\n# ==============================================================================\n\n\nclass CollisionSensor(object):\n def __init__(self, parent_actor, hud):\n self.sensor = None\n self.history = []\n self._parent = parent_actor\n self.hud = hud\n world = self._parent.get_world()\n bp = world.get_blueprint_library().find('sensor.other.collision')\n self.sensor = world.spawn_actor(bp, carla.Transform(), attach_to=self._parent)\n # We need to pass the lambda a weak reference to self to avoid circular\n # reference.\n weak_self = weakref.ref(self)\n self.sensor.listen(lambda event: CollisionSensor._on_collision(weak_self, event))\n\n def get_collision_history(self):\n history = collections.defaultdict(int)\n for frame, intensity in self.history:\n history[frame] += intensity\n return history\n\n @staticmethod\n def _on_collision(weak_self, event):\n self = weak_self()\n if not self:\n return\n actor_type = get_actor_display_name(event.other_actor)\n self.hud.notification('Collision with %r' % actor_type)\n impulse = event.normal_impulse\n intensity = math.sqrt(impulse.x**2 + impulse.y**2 + impulse.z**2)\n self.history.append((event.frame, intensity))\n if len(self.history) > 4000:\n self.history.pop(0)\n\n\n# ==============================================================================\n# -- LaneInvasionSensor --------------------------------------------------------\n# ==============================================================================\n\n\nclass LaneInvasionSensor(object):\n def __init__(self, parent_actor, hud):\n self.sensor = None\n self._parent = parent_actor\n self.hud = hud\n world = self._parent.get_world()\n bp = world.get_blueprint_library().find('sensor.other.lane_invasion')\n self.sensor = world.spawn_actor(bp, carla.Transform(), attach_to=self._parent)\n # We need to pass the lambda a weak reference to self to avoid circular\n # reference.\n weak_self = weakref.ref(self)\n self.sensor.listen(lambda event: LaneInvasionSensor._on_invasion(weak_self, event))\n\n @staticmethod\n def _on_invasion(weak_self, event):\n self = weak_self()\n if not self:\n return\n lane_types = set(x.type for x in event.crossed_lane_markings)\n text = ['%r' % str(x).split()[-1] for x in lane_types]\n self.hud.notification('Crossed line %s' % ' and '.join(text))\n\n\n# ==============================================================================\n# -- GnssSensor ----------------------------------------------------------------\n# ==============================================================================\n\n\nclass GnssSensor(object):\n def __init__(self, parent_actor):\n self.sensor = None\n self._parent = parent_actor\n self.lat = 0.0\n self.lon = 0.0\n world = self._parent.get_world()\n bp = world.get_blueprint_library().find('sensor.other.gnss')\n self.sensor = world.spawn_actor(bp, carla.Transform(carla.Location(x=1.0, z=2.8)), attach_to=self._parent)\n # We need to pass the lambda a weak reference to self to avoid circular\n # reference.\n weak_self = weakref.ref(self)\n self.sensor.listen(lambda event: GnssSensor._on_gnss_event(weak_self, event))\n\n @staticmethod\n def _on_gnss_event(weak_self, event):\n self = weak_self()\n if not self:\n return\n self.lat = event.latitude\n self.lon = event.longitude\n\n\n# ==============================================================================\n# -- IMUSensor -----------------------------------------------------------------\n# ==============================================================================\n\n\nclass IMUSensor(object):\n def __init__(self, parent_actor):\n self.sensor = None\n self._parent = parent_actor\n self.accelerometer = (0.0, 0.0, 0.0)\n self.gyroscope = (0.0, 0.0, 0.0)\n self.compass = 0.0\n world = self._parent.get_world()\n bp = world.get_blueprint_library().find('sensor.other.imu')\n self.sensor = world.spawn_actor(\n bp, carla.Transform(), attach_to=self._parent)\n # We need to pass the lambda a weak reference to self to avoid circular\n # reference.\n weak_self = weakref.ref(self)\n self.sensor.listen(\n lambda sensor_data: IMUSensor._IMU_callback(weak_self, sensor_data))\n\n @staticmethod\n def _IMU_callback(weak_self, sensor_data):\n self = weak_self()\n if not self:\n return\n limits = (-99.9, 99.9)\n self.accelerometer = (\n max(limits[0], min(limits[1], sensor_data.accelerometer.x)),\n max(limits[0], min(limits[1], sensor_data.accelerometer.y)),\n max(limits[0], min(limits[1], sensor_data.accelerometer.z)))\n self.gyroscope = (\n max(limits[0], min(limits[1], math.degrees(sensor_data.gyroscope.x))),\n max(limits[0], min(limits[1], math.degrees(sensor_data.gyroscope.y))),\n max(limits[0], min(limits[1], math.degrees(sensor_data.gyroscope.z))))\n self.compass = math.degrees(sensor_data.compass)\n\n\n# ==============================================================================\n# -- RadarSensor ---------------------------------------------------------------\n# ==============================================================================\n\n\nclass RadarSensor(object):\n def __init__(self, parent_actor):\n self.sensor = None\n self._parent = parent_actor\n self.velocity_range = 7.5 # m/s\n world = self._parent.get_world()\n self.debug = world.debug\n bp = world.get_blueprint_library().find('sensor.other.radar')\n bp.set_attribute('horizontal_fov', str(35))\n bp.set_attribute('vertical_fov', str(20))\n self.sensor = world.spawn_actor(\n bp,\n carla.Transform(\n carla.Location(x=2.8, z=1.0),\n carla.Rotation(pitch=5)),\n attach_to=self._parent)\n # We need a weak reference to self to avoid circular reference.\n weak_self = weakref.ref(self)\n self.sensor.listen(\n lambda radar_data: RadarSensor._Radar_callback(weak_self, radar_data))\n\n @staticmethod\n def _Radar_callback(weak_self, radar_data):\n self = weak_self()\n if not self:\n return\n # To get a numpy [[vel, altitude, azimuth, depth],...[,,,]]:\n # points = np.frombuffer(radar_data.raw_data, dtype=np.dtype('f4'))\n # points = np.reshape(points, (len(radar_data), 4))\n\n current_rot = radar_data.transform.rotation\n for detect in radar_data:\n azi = math.degrees(detect.azimuth)\n alt = math.degrees(detect.altitude)\n # The 0.25 adjusts a bit the distance so the dots can\n # be properly seen\n fw_vec = carla.Vector3D(x=detect.depth - 0.25)\n carla.Transform(\n carla.Location(),\n carla.Rotation(\n pitch=current_rot.pitch + alt,\n yaw=current_rot.yaw + azi,\n roll=current_rot.roll)).transform(fw_vec)\n\n def clamp(min_v, max_v, value):\n return max(min_v, min(value, max_v))\n\n norm_velocity = detect.velocity / self.velocity_range # range [-1, 1]\n r = int(clamp(0.0, 1.0, 1.0 - norm_velocity) * 255.0)\n g = int(clamp(0.0, 1.0, 1.0 - abs(norm_velocity)) * 255.0)\n b = int(abs(clamp(- 1.0, 0.0, - 1.0 - norm_velocity)) * 255.0)\n self.debug.draw_point(\n radar_data.transform.location + fw_vec,\n size=0.075,\n life_time=0.06,\n persistent_lines=False,\n color=carla.Color(r, g, b))\n\n# ==============================================================================\n# -- CameraManager -------------------------------------------------------------\n# ==============================================================================\n\n\nclass CameraManager(object):\n def __init__(self, parent_actor, hud, gamma_correction):\n self.sensor = None\n self.surface = None\n self._parent = parent_actor\n self.hud = hud\n self.recording = False\n bound_y = 0.5 + self._parent.bounding_box.extent.y\n Attachment = carla.AttachmentType\n self._camera_transforms = [\n (carla.Transform(carla.Location(x=-5.5, z=2.5), carla.Rotation(pitch=8.0)), Attachment.SpringArm),\n (carla.Transform(carla.Location(x=1.6, z=1.7)), Attachment.Rigid),\n (carla.Transform(carla.Location(x=5.5, y=1.5, z=1.5)), Attachment.SpringArm),\n (carla.Transform(carla.Location(x=-8.0, z=6.0), carla.Rotation(pitch=6.0)), Attachment.SpringArm),\n (carla.Transform(carla.Location(x=-1, y=-bound_y, z=0.5)), Attachment.Rigid)]\n self.transform_index = 1\n self.sensors = [\n ['sensor.camera.rgb', cc.Raw, 'Camera RGB', {}],\n ['sensor.camera.depth', cc.Raw, 'Camera Depth (Raw)', {}],\n ['sensor.camera.depth', cc.Depth, 'Camera Depth (Gray Scale)', {}],\n ['sensor.camera.depth', cc.LogarithmicDepth, 'Camera Depth (Logarithmic Gray Scale)', {}],\n ['sensor.camera.semantic_segmentation', cc.Raw, 'Camera Semantic Segmentation (Raw)', {}],\n ['sensor.camera.semantic_segmentation', cc.CityScapesPalette,\n 'Camera Semantic Segmentation (CityScapes Palette)', {}],\n ['sensor.lidar.ray_cast', None, 'Lidar (Ray-Cast)', {}],\n ['sensor.camera.dvs', cc.Raw, 'Dynamic Vision Sensor', {}],\n ['sensor.camera.rgb', cc.Raw, 'Camera RGB Distorted',\n {'lens_circle_multiplier': '3.0',\n 'lens_circle_falloff': '3.0',\n 'chromatic_aberration_intensity': '0.5',\n 'chromatic_aberration_offset': '0'}]]\n world = self._parent.get_world()\n bp_library = world.get_blueprint_library()\n for item in self.sensors:\n bp = bp_library.find(item[0])\n if item[0].startswith('sensor.camera'):\n bp.set_attribute('image_size_x', str(hud.dim[0]))\n bp.set_attribute('image_size_y', str(hud.dim[1]))\n if bp.has_attribute('gamma'):\n bp.set_attribute('gamma', str(gamma_correction))\n for attr_name, attr_value in item[3].items():\n bp.set_attribute(attr_name, attr_value)\n elif item[0].startswith('sensor.lidar'):\n bp.set_attribute('range', '50')\n item.append(bp)\n self.index = None\n\n def toggle_camera(self):\n self.transform_index = (self.transform_index + 1) % len(self._camera_transforms)\n self.set_sensor(self.index, notify=False, force_respawn=True)\n\n def set_sensor(self, index, notify=True, force_respawn=False):\n index = index % len(self.sensors)\n needs_respawn = True if self.index is None else \\\n (force_respawn or (self.sensors[index][2] != self.sensors[self.index][2]))\n if needs_respawn:\n if self.sensor is not None:\n self.sensor.destroy()\n self.surface = None\n self.sensor = self._parent.get_world().spawn_actor(\n self.sensors[index][-1],\n self._camera_transforms[self.transform_index][0],\n attach_to=self._parent,\n attachment_type=self._camera_transforms[self.transform_index][1])\n # We need to pass the lambda a weak reference to self to avoid\n # circular reference.\n weak_self = weakref.ref(self)\n self.sensor.listen(lambda image: CameraManager._parse_image(weak_self, image))\n if notify:\n self.hud.notification(self.sensors[index][2])\n self.index = index\n\n def next_sensor(self):\n self.set_sensor(self.index + 1)\n\n def toggle_recording(self):\n self.recording = not self.recording\n self.hud.notification('Recording %s' % ('On' if self.recording else 'Off'))\n\n def render(self, display):\n if self.surface is not None:\n display.blit(self.surface, (0, 0))\n\n @staticmethod\n def _parse_image(weak_self, image):\n self = weak_self()\n if not self:\n return\n if self.sensors[self.index][0].startswith('sensor.lidar'):\n points = np.frombuffer(image.raw_data, dtype=np.dtype('f4'))\n points = np.reshape(points, (int(points.shape[0] / 3), 3))\n lidar_data = np.array(points[:, :2])\n lidar_data *= min(self.hud.dim) / 100.0\n lidar_data += (0.5 * self.hud.dim[0], 0.5 * self.hud.dim[1])\n lidar_data = np.fabs(lidar_data) # pylint: disable=E1111\n lidar_data = lidar_data.astype(np.int32)\n lidar_data = np.reshape(lidar_data, (-1, 2))\n lidar_img_size = (self.hud.dim[0], self.hud.dim[1], 3)\n lidar_img = np.zeros((lidar_img_size), dtype=np.uint8)\n lidar_img[tuple(lidar_data.T)] = (255, 255, 255)\n self.surface = pygame.surfarray.make_surface(lidar_img)\n elif self.sensors[self.index][0].startswith('sensor.camera.dvs'):\n # Example of converting the raw_data from a carla.DVSEventArray\n # sensor into a NumPy array and using it as an image\n dvs_events = np.frombuffer(image.raw_data, dtype=np.dtype([\n ('x', np.uint16), ('y', np.uint16), ('t', np.int64), ('pol', np.bool)]))\n dvs_img = np.zeros((image.height, image.width, 3), dtype=np.uint8)\n # Blue is positive, red is negative\n dvs_img[dvs_events[:]['y'], dvs_events[:]['x'], dvs_events[:]['pol'] * 2] = 255\n self.surface = pygame.surfarray.make_surface(dvs_img.swapaxes(0, 1))\n else:\n image.convert(self.sensors[self.index][1])\n array = np.frombuffer(image.raw_data, dtype=np.dtype(\"uint8\"))\n array = np.reshape(array, (image.height, image.width, 4))\n array = array[:, :, :3]\n array = array[:, :, ::-1]\n self.surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))\n if self.recording:\n image.save_to_disk('_out/%08d' % image.frame)\n\n\n# ==============================================================================\n# -- game_loop() ---------------------------------------------------------------\n# ==============================================================================\n\n\ndef game_loop(args):\n pygame.init()\n pygame.font.init()\n world = None\n\n try:\n client = carla.Client(args.host, args.port)\n client.set_timeout(2.0)\n\n display = pygame.display.set_mode(\n (args.width, args.height),\n pygame.HWSURFACE | pygame.DOUBLEBUF)\n\n hud = HUD(args.width, args.height)\n world = World(client.get_world(), hud, args)\n controller = KeyboardControl(world, args.autopilot)\n\n clock = pygame.time.Clock()\n while True:\n clock.tick_busy_loop(60)\n if controller.parse_events(client, world, clock):\n return\n world.tick(clock)\n world.render(display)\n pygame.display.flip()\n\n finally:\n\n if (world and world.recording_enabled):\n client.stop_recorder()\n\n if world is not None:\n world.destroy()\n\n pygame.quit()\n\n\n# ==============================================================================\n# -- main() --------------------------------------------------------------------\n# ==============================================================================\n\n\ndef main():\n argparser = argparse.ArgumentParser(\n description='CARLA Manual Control Client')\n argparser.add_argument(\n '-v', '--verbose',\n action='store_true',\n dest='debug',\n help='print debug information')\n argparser.add_argument(\n '--host',\n metavar='H',\n default='127.0.0.1',\n help='IP of the host server (default: 127.0.0.1)')\n argparser.add_argument(\n '-p', '--port',\n metavar='P',\n default=2000,\n type=int,\n help='TCP port to listen to (default: 2000)')\n argparser.add_argument(\n '-a', '--autopilot',\n action='store_true',\n help='enable autopilot')\n argparser.add_argument(\n '--res',\n metavar='WIDTHxHEIGHT',\n default='1280x720',\n help='window resolution (default: 1280x720)')\n argparser.add_argument(\n '--filter',\n metavar='PATTERN',\n default='vehicle.*',\n help='actor filter (default: \"vehicle.*\")')\n argparser.add_argument(\n '--rolename',\n metavar='NAME',\n default='hero',\n help='actor role name (default: \"hero\")')\n argparser.add_argument(\n '--gamma',\n default=2.2,\n type=float,\n help='Gamma correction of the camera (default: 2.2)')\n args = argparser.parse_args()\n\n args.width, args.height = [int(x) for x in args.res.split('x')]\n\n log_level = logging.DEBUG if args.debug else logging.INFO\n logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level)\n\n logging.info('listening to server %s:%s', args.host, args.port)\n\n print(__doc__)\n\n try:\n\n game_loop(args)\n\n except KeyboardInterrupt:\n print('\\nCancelled by user. Bye!')\n\n\nif __name__ == '__main__':\n\n main()\n" ]
[ [ "numpy.reshape", "numpy.dtype", "numpy.array", "numpy.zeros", "numpy.fabs" ] ]
phukeo/SCRAMBLE
[ "35d222edb6533e24d09f48a82175281af32eb91f" ]
[ "scrambleFUN.py" ]
[ "import os\r\nimport numpy as np\r\nimport matplotlib.pyplot as pp\r\nimport pandas as pd\r\n\r\n#########################\r\n## INTIALISE VARIABLES ##\r\n#########################\r\n\r\nnewDesk=[]\r\nselectedList=[]\r\nyPlotlabel=\"\"\r\nflow=[\"red\", \"orange\",\"brown\",\"tan\", \"lime\", \"purple\", \"teal\", \"black\", \"blue\", \"grey\", \"pink\", \"violet\", \"goldenrod\",\"darkkhaki\",\"peru\", \"saddlebrown\"]\r\nblues=[\"blue\",\"turquoise\",\"lime\", \"darkgreen\",\"midnightblue\", \"slateblue\", \"dodgerblue\", \"mediumblue\", \"seagreen\",\"yellowgreen\",\"olivedrab\",\"lightseagreen\"]\r\ngreens=[\"olive\",\"crimson\",\"black\", \"blue\", \"maroon\", \"lightcoral\", \"chocolate\", \"lightsalmon\", \"darkolivegreen\", \"rosybrown\"]\r\nreds=flow+blues+greens+flow+blues+greens\r\nBODStats=pd.DataFrame()\r\n\r\n######################\r\n## DEFINE FUNCTIONS ##\r\n######################\r\n\r\ndef importData(directory):\r\n \r\n os.chdir(directory)\r\n folderList=os.listdir()\r\n idvgData=pd.DataFrame() # Initialises a blank dataframe to be appended to \r\n newDesk=[] # Initialise a blank list for the data to be selected from\r\n counter=0\r\n\r\n for folderName in folderList:# Loop over the functionalisation folders\r\n os.chdir(directory)\r\n folderList=os.listdir( )# Now list the FOLDERS inside the top directory\r\n os.chdir(directory+\"/\"+folderName) # Change directory to the ith folderName \r\n fileList=os.listdir() # List the FILES in the folderName FOLDER\r\n \r\n for file in fileList:# Loop over the files in the fileList and import them to the dataframe with a new snazzier name \r\n fName = directory+\"/\"+folderName+\"/\"+file\r\n df=pd.read_csv(fName, usecols=[1,2], skiprows=248)\r\n\r\n global device\r\n newTitle,device = newNameFinal(folderName,file)\r\n df.columns=pd.MultiIndex.from_product([[newTitle],df.columns]) # Introduce multiindex naming of columns\r\n idvgData=pd.concat([idvgData,df],axis=1)\r\n newDesk.append(newTitle)\r\n \r\n global copied_original\r\n copied_original=idvgData.copy()\r\n copied_original.name=device\r\n\r\n return copied_original,device,newDesk\r\n\r\ndef newNameFinal(folderName1, originalName):\r\n # Takes a file name and shortens it based on the position of the \"_\" and then concatenates with the folder name. \r\n\r\n displayText=originalName[0:originalName.find(\"_\")]\r\n outputName=folderName1+\"_\"+displayText\r\n\r\n return outputName, displayText[0:2]\r\n\r\ndef importBOD(filename):\r\n # Imports data from a .BOD file (a file which has been previosuly exported from SCRAMBLE)\r\n \r\n BODdf=pd.read_csv(filename, header=[0,1])\r\n \r\n global copied_original\r\n copied_original=BODdf.copy()\r\n \r\n # Produce a list of the data\r\n niceCoffee=[]\r\n for i, x in enumerate(BODdf.columns.get_level_values(0)): \r\n if i%2>0: # Select every other name as they are repeated\r\n niceCoffee.append(x)\r\n\r\n return copied_original,niceCoffee\r\n\r\ndef statsTable(selection):\r\n \r\n bigData=copied_original.copy() # Always work from a copy of the original data\r\n\r\n statsInput=bigData.loc[:,(selection)] # Filter based on name of data\r\n sVg = statsInput.loc[:,[statsInput.columns[0]]] # Select the Vbg \r\n sDrain = statsInput.loc[:,[statsInput.columns[1]]] # Select the Ids\r\n\r\n statsFrame=pd.DataFrame() #Initialise the dataframe for this loop\r\n\r\n ## FORWARD SWEEP STATS ##\r\n #Slice the data and select the forward sweep\r\n fVg=sVg.iloc[0:(int(statsInput.shape[0]/2))] \r\n fDrain=sDrain.iloc[0:(int(statsInput.shape[0]/2))] \r\n\r\n #DP Current - fDPI\r\n fMinI=fDrain.describe().loc[\"min\"]\r\n statsFrame=pd.concat([statsFrame,fMinI],ignore_index=True)\r\n\r\n #DP Voltage - fDPV\r\n fMinVIndex=abs(fDrain-fMinI).idxmin()\r\n fMinV1=fVg.iloc[fMinVIndex].values[0][0]\r\n fMinV=pd.Series(fMinV1)\r\n statsFrame=pd.concat([statsFrame,fMinV], ignore_index=True)\r\n\r\n #DP Voltage Gradient - fDPMaxgrad and fDPMaxgradV\r\n fDPIseries=fDrain[statsInput.columns[1]].values\r\n fDPVseries=fVg[statsInput.columns[0]].values\r\n fDPIgrad1=np.gradient(fDPIseries) \r\n fDPIgradMax1=max(abs(fDPIgrad1))\r\n indexGradMax=np.argmax(abs(fDPIgrad1))\r\n fDPVgradMax1=fDPVseries[indexGradMax]\r\n fDPIgradMaxI1=fDPIseries[indexGradMax]\r\n\r\n fDPIgradMax=pd.Series(fDPIgradMax1)\r\n fDPVgradMax=pd.Series(fDPVgradMax1)\r\n fDPIgradMaxI=pd.Series(fDPIgradMaxI1)\r\n\r\n statsFrame=pd.concat([statsFrame,fDPIgradMax], ignore_index=True)\r\n statsFrame=pd.concat([statsFrame,fDPVgradMax], ignore_index=True)\r\n statsFrame=pd.concat([statsFrame,fDPIgradMaxI], ignore_index=True)\r\n \r\n #Current value at 0 BackGate - fI0Vg\r\n fI0Vg1=fDrain.iloc[int(((fDrain.shape[0])-1)/2)].values[0] # Halfway point\r\n fI0Vg=pd.Series(fI0Vg1)\r\n statsFrame=pd.concat([statsFrame,fI0Vg], ignore_index=True)\r\n \r\n ## REVERSE SWEEP STATS ##\r\n #Slice the data and select the reverse sweep\r\n rVg=sVg.iloc[(int(statsInput.shape[0]/2)):]\r\n rDrain=sDrain.iloc[(int(statsInput.shape[0]/2)):] \r\n\r\n #DP Current - rDPI\r\n rMinI=rDrain.describe().loc[\"min\"]\r\n statsFrame=pd.concat([statsFrame,rMinI],ignore_index=True)\r\n\r\n #DP Voltage - rDPV\r\n rMinVIndex=abs(rDrain-rMinI).idxmin()\r\n rMinV1=sVg.iloc[rMinVIndex].values[0][0]\r\n rMinV=pd.Series(rMinV1)\r\n statsFrame=pd.concat([statsFrame,rMinV], ignore_index=True)\r\n\r\n #DP Voltage Gradient - rDPMaxgrad and rDPMaxgradV\r\n rDPIseries=rDrain[statsInput.columns[1]].values\r\n rDPVseries=rVg[statsInput.columns[0]].values\r\n rDPIgrad1=np.gradient(rDPIseries) \r\n rDPIgradMax1=max(abs(rDPIgrad1))\r\n indexGradMax=np.argmax(abs(rDPIgrad1))\r\n rDPVgradMax1=rDPVseries[indexGradMax]\r\n rDPIgradMaxI1=rDPIseries[indexGradMax]\r\n\r\n rDPIgradMax=pd.Series(rDPIgradMax1)\r\n rDPVgradMax=pd.Series(rDPVgradMax1)\r\n rDPIgradMaxI=pd.Series(rDPIgradMaxI1)\r\n\r\n statsFrame=pd.concat([statsFrame,rDPIgradMax], ignore_index=True)\r\n statsFrame=pd.concat([statsFrame,rDPVgradMax], ignore_index=True)\r\n statsFrame=pd.concat([statsFrame,rDPIgradMaxI], ignore_index=True)\r\n\r\n #Current value at 0 BackGate - fI0Vg\r\n rI0Vg1=rDrain.iloc[int(((rDrain.shape[0])-1)/2)].values[0]\r\n rI0Vg=pd.Series(rI0Vg1)\r\n statsFrame=pd.concat([statsFrame,rI0Vg], ignore_index=True)\r\n \r\n ## CONSTRUCT THE PARAMETER TABLE ##\r\n\r\n insides = {'Column 1' : [1,2,3,4,5,6,30,40,50,60,70,80],\r\n 'Index Title' : [\"fDPI\",\"fDPV\",\"fMaxgrad\",\"fMaxgradV\", \"fMaxgradI\", \"fI0Vg\",\r\n \"rDPI\",\"rDPV\",\"rMaxgrad\",\"rMaxgradV\",\"rMaxgradI\", \"rI0Vg\"]}\r\n blankStats = pd.DataFrame(insides)\r\n del blankStats[\"Column 1\"]\r\n blankStats.index.name = \"BOD_Params\"\r\n newFrame=pd.concat([blankStats,statsFrame], axis=1) #Concatenate the initial df with data from statsFrame\r\n newFrame.index = newFrame[\"Index Title\"]\r\n del newFrame[\"Index Title\"]\r\n newFrame.columns=[selection]\r\n newFrame.index.name=\"BOD_Params\"\r\n \r\n return newFrame #Output from StatsTable\r\n\r\ndef mobility(selection,Vds,L,W,oxideThick,oxideDielectric):\r\n \r\n bigData=copied_original.copy() # Always work from a copy of the original data\r\n mobilitySeries=pd.Series([]) # Convert dataframe to series for ease of maniupulation\r\n mobilityFrame=pd.DataFrame() #Initialise the dataframe for this sweep\r\n \r\n mobilityInput=bigData.loc[:,(selection)] # Filter based on name of data\r\n mVg = mobilityInput.loc[:,[mobilityInput.columns[0]]] # Select the Vg\r\n mDrain = mobilityInput.loc[:,[mobilityInput.columns[1]]] # Select the Ids \r\n mDrainSeries=mDrain[mobilityInput.columns[1]].values \r\n mVgSeries=mVg[mobilityInput.columns[0]].values\r\n mGradient=np.gradient(mDrainSeries) # Use the gradient function on the Ids data\r\n \r\n L=L\r\n W=W\r\n Cg=((8.854*(10**-12))*(oxideDielectric))/(oxideThick) # Calculate Cg from user parameters \r\n \r\n # Equation below calculates the mobility\r\n mobilitySeries=abs((mGradient*L)/(W*Vds*Cg))*100*100 # Multiplication of *100*100 used convert end result into units of cm^2\r\n \r\n # Convert series into a dataframe to ease concatenation and plotting\r\n mobilityFrame=pd.DataFrame(data=mobilitySeries, index=mVgSeries)\r\n mobilityFrame.index=range(0,mobilityFrame.shape[0],1)\r\n mobilityFrame=pd.concat([mVg,mobilityFrame], axis=1,ignore_index=False)\r\n mobilityFrame.columns = [mobilityInput.columns[0],\"Mobilities\"]\r\n mobilityFrame.columns=pd.MultiIndex.from_product([[selection],mobilityFrame.columns])\r\n\r\n ## FORWARD & REVERSE SWEEP STATS ##\r\n \r\n statsInput=bigData.loc[:,(selection)] \r\n sVg = statsInput.loc[:,[statsInput.columns[0]]] #statsInput.columns[0]\r\n sDrain = statsInput.loc[:,[statsInput.columns[1]]]\r\n fVg=sVg.iloc[0:(int(statsInput.shape[0]/2))]\r\n fDrain=sDrain.iloc[0:(int(statsInput.shape[0]/2))] \r\n fMinI=fDrain.describe().loc[\"min\"]\r\n fMinVIndex=abs(fDrain-fMinI).idxmin()\r\n rVg=sVg.iloc[(int(statsInput.shape[0]/2)):]\r\n rDrain=sDrain.iloc[(int(statsInput.shape[0]/2)):] \r\n rMinI=rDrain.describe().loc[\"min\"]\r\n rMinVIndex=abs(rDrain-rMinI).idxmin()\r\n\r\n fPoint=fMinVIndex.values[0]\r\n rPoint=rMinVIndex.values[0]\r\n \r\n return (fPoint, rPoint, mobilitySeries, mobilityFrame)\r\n \r\ndef processData(selectedList,Vds,deviceL,deviceW,oxideThick,oxideDielectric, mouse):\r\n \r\n ## INITIALISE VARIABLES BELOW ##\r\n BODStats=pd.DataFrame()\r\n BODMobility=pd.DataFrame()\r\n BODMobListFwd=[]\r\n BODMobListRev=[]\r\n L=deviceL\r\n W=deviceW\r\n labelSize=\"xx-small\"\r\n fontSize=\"x-small\"\r\n textBox=dict(boxstyle='round', facecolor='wheat')\r\n pp.close(\"all\")\r\n\r\n\r\n ## INITIALISE SWEEP VISUALISATION PLOT ##\r\n if len (selectedList)>0: \r\n fig,ax1=pp.subplots(1,1) #NewLine\r\n ax1.set_xlabel(\"$V_{bg}$ (V)\", fontsize=fontSize)\r\n\r\n for index, selection in enumerate (selectedList): \r\n # Filter the data based on widget inputs\r\n bigData=copied_original.copy()\r\n smallData1 = bigData.loc[:,(selection)] # Filter based on name of data\r\n vBg=smallData1.iloc[:,0] # Select the Vbg\r\n iDrain=smallData1.iloc[:,1] # Select the Ids\r\n \r\n # Decide if the user wants to plot as Current or Resistance\r\n if mouse == 0: # Plot as Current\r\n yPlotlabel=\"$I_{sd}$ (A)\"\r\n yPlotValue=iDrain\r\n ax1.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\r\n elif mouse == 1: # Plot as Resistance\r\n yPlotlabel=\"Resistance ($\\Omega$)\"\r\n yPlotValue= (Vds)/iDrain # Calculate the Resistance\r\n\r\n if index==0: # Initialise a secondary axis to plot the Sheet Resistance \r\n ax2 = ax1.twinx()\r\n ax2.tick_params(axis=\"y\", labelsize=labelSize)\r\n\r\n # Finalise the axis parameters\r\n ax1.set_ylabel(yPlotlabel, fontsize=fontSize)\r\n ax1.tick_params(axis=\"both\", labelsize=labelSize)\r\n\r\n # Plot the data \r\n color=reds[index]\r\n if mouse == 1:\r\n ax2.plot(vBg,yPlotValue*(W/L), color=color, linewidth=0.5, marker = \"o\", markersize=1)\r\n ax2.set_ylabel('Sheet Resistance ($\\Omega$/Sqaure)', color=\"blue\",fontsize=fontSize)\r\n ax2.tick_params(axis='y', labelcolor=\"blue\")\r\n ax1.plot(vBg, yPlotValue, color=color, label=selection, linewidth=0.5, marker = \"o\", markersize=1)\r\n ax1.legend(fancybox=True,fontsize=\"medium\")\r\n\r\n \r\n ## CONSTRUCT DATAFRAMES FOR MOBILITIES AND PARAMETER DETAILS PLOTS ##\r\n # Call the Stats Table Function Here to Build the BOD_Parameters Dataframe\r\n newFrame=statsTable(selection)\r\n frameToAdd=newFrame\r\n BODStats=pd.concat([BODStats,frameToAdd], axis=1)\r\n \r\n # Call the Mobility Function Here to Build the BOD_Mobilities Dataframe\r\n (FWD,REV,mS,mF)=mobility(selection,Vds,L,W,oxideThick,oxideDielectric)\r\n BODMobility=pd.concat([BODMobility,mF], axis=1)\r\n BODMobListFwd.append(FWD)\r\n BODMobListRev.append(REV)\r\n \r\n ## COMPLETE PLOTTING MOBILITIES AND PARAMETER DETAILS BELOW ##\r\n if len(selectedList)>0:\r\n # Rename the stages to shorten them \r\n shortNames=[]\r\n fourCharacterNames=[]\r\n for items in selectedList:\r\n if items.find(\"_\")==1 or items.find(\"_\")==2: # Then the user has numbered folders as per SCRAMBLES instructions\r\n firstOccurence=items.find(\"_\")\r\n secondOccurence=items.find(\"_\",firstOccurence+1)\r\n if secondOccurence-firstOccurence<5: # Then the foldername is less than 4 characters long\r\n shortNames.append(items[firstOccurence+1:secondOccurence])\r\n fourCharacterNames.append(items[firstOccurence+1:secondOccurence])\r\n else:\r\n shortNames.append(items[firstOccurence+1:])\r\n fourCharacterNames.append(items[firstOccurence+1:firstOccurence+5]) \r\n else:\r\n shortNames.append(items[:4])\r\n fourCharacterNames.append(items[0:4])\r\n \r\n # Collect Data from BOD_Parameters table \r\n fDPI=list(BODStats.iloc[0])\r\n fDPV=list(BODStats.iloc[1])\r\n fDPMaxgrad=list(BODStats.iloc[2])\r\n fDPMaxgradV=list(BODStats.iloc[3])\r\n fDPMaxgradI=list(BODStats.iloc[4])\r\n fI0Vg=list(BODStats.iloc[5])\r\n rDPI=list(BODStats.iloc[6])\r\n rDPV=list(BODStats.iloc[7])\r\n rDPMaxgrad=list(BODStats.iloc[8])\r\n rDPMaxgradV=list(BODStats.iloc[9])\r\n rDPMaxgradI=list(BODStats.iloc[10])\r\n rI0Vg=list(BODStats.iloc[11])\r\n\r\n # Initialise Plots\r\n figStats,axStats=pp.subplots(3,2) #RightFrame - Parameter Details\r\n figStatsL,axStatsL=pp.subplots(1,3) #MiddleBFrame - Mobilities\r\n \r\n ## PLOT MOBILITIES ##\r\n for selection, color, f, re in zip(selectedList, reds, BODMobListFwd, BODMobListRev):\r\n filteredMob=BODMobility.loc[:,[selection]]\r\n mobilityPlot = filteredMob.iloc[:,1].values\r\n vPlot = filteredMob.iloc[:,0].values\r\n axStatsL[0].plot(vPlot[:f],mobilityPlot[:f], color=color, marker=\"|\", linewidth=0) \r\n axStatsL[0].plot(vPlot[f:re],mobilityPlot[f:re], color=color, marker=\"_\",linewidth=0)\r\n axStatsL[0].plot(vPlot[re:],mobilityPlot[re:], color=color, marker=\"|\",linewidth=0)\r\n holes=np.concatenate([mobilityPlot[:f],mobilityPlot[re:]])\r\n axStatsL[1].hist(holes,bins=10, color=color, alpha=0.5, rwidth=0.6)\r\n electrons=mobilityPlot[f:re]\r\n axStatsL[2].hist(electrons,bins=10, color=color, alpha=0.5, rwidth=0.6)\r\n \r\n # Finalise the axes parameters for the Sweep Visualisation Plot\r\n axStatsL[0].set_xlabel(\"$V_{bg}$ (V)\", fontsize=fontSize)\r\n axStatsL[0].set_ylabel(\"$\\mu$ ($cm^2 V^{-1} s^{-1}$)\", fontsize=fontSize)\r\n axStatsL[0].tick_params(axis=\"both\", labelsize=labelSize)\r\n axStatsL[1].set_xlabel(\"Hole $\\mu$ ($cm^2 V^{-1} s^{-1}$)\", fontsize=fontSize)\r\n axStatsL[1].set_ylabel(\"Frequency\", fontsize=fontSize)\r\n axStatsL[1].tick_params(axis=\"both\", labelsize=labelSize)\r\n axStatsL[2].set_xlabel(\"Electron $\\mu$ ($cm^2 V^{-1} s^{-1}$)\",fontsize=fontSize)\r\n axStatsL[2].set_ylabel(\"Frequency\", fontsize=fontSize)\r\n axStatsL[2].tick_params(axis=\"both\", labelsize=labelSize)\r\n \r\n \r\n ## PLOT PARAMETER DETAILS ##\r\n for q,w,e,r, k,l, x, y, i,j, color, selection in zip(fDPMaxgradV, fDPMaxgradI, rDPMaxgradV, rDPMaxgradI,fI0Vg, rI0Vg,fDPV, fDPI, rDPV, rDPI,reds, shortNames):\r\n \r\n # Below if/elif decides plotting behaviour for current or resistance visulaisation and also\r\n # overlays the Dirac points and Max Transconductance to the Sweep Visualisation Plot \r\n if mouse == 0:\r\n ax1.plot(x,y,marker=\">\", color=color, markersize=10)\r\n ax1.plot(i,j,marker=\"<\", color=color, markersize=10)\r\n ax1.plot(q,w,marker=\">\", color=color, markerfacecolor=\"none\", markersize=10)\r\n ax1.plot(e,r,marker=\"<\", color=color, markerfacecolor=\"none\", markersize=10)\r\n\r\n axStats[0,0].scatter(x,y,marker=\">\", color=color, label=selection)\r\n axStats[0,0].scatter(i,j,marker=\"<\", color=color)\r\n\r\n axStats[0,1].scatter(selection,k,marker=\">\",color=color)\r\n axStats[0,1].scatter(selection,l,marker=\"<\",color=color)\r\n axStats[0,1].annotate(**defineInsides(k,l,selection,\"vE\")[1],bbox=textBox)\r\n axStats[0,1].vlines(**defineInsides(k,l,selection,\"vE\")[0])\r\n\r\n axStats[2,0].scatter(selection,y,marker=\">\", color=color)\r\n axStats[2,0].scatter(selection,j,marker=\"<\", color=color)\r\n axStats[2,0].annotate(**defineInsides(y,j,selection,\"vE\")[1],bbox=textBox)\r\n axStats[2,0].vlines(**(defineInsides(y,j,selection,\"vE\")[0]))\r\n \r\n axStats[2,1].scatter(selection,w,color=color,marker=\"$\\u25BB$\")\r\n axStats[2,1].scatter(selection,r,color=color,marker=\"$\\u25C5$\")\r\n axStats[2,1].annotate(**defineInsides(w,r,selection,\"vE\")[1],bbox=textBox)\r\n axStats[2,1].vlines(**defineInsides(w,r,selection,\"vE\")[0])\r\n\r\n elif mouse==1:\r\n ax1.plot(x,Vds/y,marker=\">\", color=color, markersize=10)\r\n ax1.plot(i,Vds/j,marker=\"<\", color=color, markersize=10)\r\n ax1.plot(q,Vds/w,marker=\">\", color=color, markerfacecolor=\"none\", markersize=10)\r\n ax1.plot(e,Vds/r,marker=\"<\", color=color, markerfacecolor=\"none\", markersize=10) \r\n\r\n axStats[0,0].scatter(x,Vds/y,marker=\">\", color=color, label=selection)\r\n axStats[0,0].scatter(i,Vds/j,marker=\"<\", color=color)\r\n\r\n axStats[0,1].scatter(selection,Vds/k,marker=\">\",color=color)\r\n axStats[0,1].scatter(selection,Vds/l,marker=\"<\",color=color)\r\n axStats[0,1].annotate(**defineInsides(Vds/k,Vds/l,selection,\"vP\")[1],bbox=textBox)\r\n axStats[0,1].vlines(**defineInsides(Vds/k,Vds/l,selection,\"vP\")[0])\r\n\r\n axStats[2,0].scatter(selection,Vds/y,marker=\">\", color=color)\r\n axStats[2,0].scatter(selection,Vds/j,marker=\"<\", color=color)\r\n axStats[2,0].annotate(**defineInsides(Vds/y,Vds/j,selection,\"vP\")[1],bbox=textBox)\r\n axStats[2,0].vlines(**(defineInsides(Vds/y,Vds/j,selection,\"vP\")[0]))\r\n \r\n axStats[2,1].scatter(selection,Vds/w,color=color,marker=\"$\\u25BB$\")\r\n axStats[2,1].scatter(selection,Vds/r,color=color,marker=\"$\\u25C5$\")\r\n axStats[2,1].annotate(**defineInsides(Vds/w,Vds/r,selection,\"vP\")[1],bbox=textBox)\r\n axStats[2,1].vlines(**defineInsides(Vds/w,Vds/r,selection,\"vP\")[0])\r\n\r\n axStats[1,1].scatter(q,selection,color=color,marker=\"$\\u25BB$\")\r\n axStats[1,1].scatter(e,selection,color=color,marker=\"$\\u25C5$\")\r\n axStats[1,1].annotate(**defineInsides(q,e,selection,\"hE\")[1],bbox=textBox)\r\n axStats[1,1].hlines(**defineInsides(q,e,selection,\"hE\")[0])\r\n\r\n axStats[1,0].scatter(x,selection,marker=\">\", color=color)\r\n axStats[1,0].scatter(i,selection,marker=\"<\", color=color)\r\n axStats[1,0].annotate(**defineInsides(x,i,selection,\"hE\")[1],bbox=textBox)\r\n axStats[1,0].hlines(**(defineInsides(x,i,selection,\"hE\")[0]))\r\n\r\n # Below if/elif decides axes behaviour depending on current or resistance visulaisation\r\n if mouse == 0:\r\n axStats[0,0].set_ylabel(\"$I_{sd}$ (A)\",fontsize=fontSize)\r\n axStats[0,0].ticklabel_format(style='sci', axis='y', scilimits=(0,0))\r\n axStats[0,0].set_ylim([0.96*min(min(fDPI),min(rDPI)),1.04*max(max(fDPI),max(rDPI))])\r\n axStats[0,0].set_xlim([0.96*min(min(fDPV),min(rDPV)),1.04*max(max(fDPV),max(rDPV))])\r\n\r\n axStats[0,1].set_ylabel(\"$I_{sd}$ (A)\",fontsize=fontSize)\r\n axStats[0,1].set_ylim([0.96*min(min(fI0Vg),min(rI0Vg)),1.04*max(max(fI0Vg),max(rI0Vg))])\r\n axStats[0,1].ticklabel_format(style='sci', axis='y', scilimits=(0,0))\r\n axStats[0,1].set_title('$I_{sd}$ @ $V_{bg}$=0',fontsize= \"small\",loc='right')\r\n\r\n axStats[2,0].set_ylabel(\"$I_{sd}$ (A)\",fontsize=fontSize)\r\n axStats[2,0].ticklabel_format(style='sci', axis='y', scilimits=(0,0))\r\n axStats[2,0].set_ylim([0.96*min(min(fDPI),min(rDPI)),1.04*max(max(fDPI),max(rDPI))])\r\n axStats[2,0].set_title('Dirac Point Currents',fontsize= \"small\", loc='right')\r\n\r\n axStats[2,1].ticklabel_format(style='sci', axis='y', scilimits=(0,0))\r\n axStats[2,1].set_ylabel(\"$I_{sd}$ (A)\",fontsize=fontSize)\r\n axStats[2,1].set_ylim([0.96*min(min(fDPMaxgradI),min(rDPMaxgradI)),1.04*max(max(fDPMaxgradI),max(rDPMaxgradI))])\r\n axStats[2,1].set_title('Max Trans. Currents',fontsize= \"small\", loc='right')\r\n\r\n elif mouse ==1:\r\n axStats[0,0].set_ylabel(\"$R_{sd}$ ($\\Omega$)\",fontsize=fontSize)\r\n axStats[0,0].ticklabel_format(style='plain', axis='y', scilimits=(0,0))\r\n axStats[0,0].set_ylim([0.96*min(Vds/max(fDPI),Vds/max(rDPI)),1.04*max(Vds/min(fDPI),Vds/min(rDPI))])\r\n axStats[0,0].set_xlim([0.96*min(min(fDPV),min(rDPV)),1.04*max(max(fDPV),max(rDPV))])\r\n\r\n axStats[0,1].set_ylabel(\"$R_{sd}$ ($\\Omega$)\",fontsize=fontSize)\r\n axStats[0,1].set_ylim([0.96*min(Vds/max(fI0Vg),Vds/max(rI0Vg)),1.04*max(Vds/min(fI0Vg),Vds/min(rI0Vg))])\r\n axStats[0,1].ticklabel_format(style='plain', axis='y', scilimits=(0,0))\r\n axStats[0,1].set_title('$R_{sd}$ @ $V_{bg}$=0',fontsize= \"small\",loc='right') \r\n \r\n axStats[2,0].set_ylabel(\"$R_{sd}$ ($\\Omega$)\",fontsize=fontSize)\r\n axStats[2,0].ticklabel_format(style='plain', axis='y', scilimits=(0,0))\r\n axStats[2,0].set_ylim([0.96*min(Vds/max(fDPI),Vds/max(rDPI)),1.04*max(Vds/min(fDPI),Vds/min(rDPI))])\r\n axStats[2,0].set_title('Dirac Point Resistances',fontsize= \"small\", loc='right')\r\n\r\n axStats[2,1].ticklabel_format(style='plain', axis='y', scilimits=(0,0))\r\n axStats[2,1].set_ylabel(\"$R_{sd}$ ($\\Omega$)\",fontsize=fontSize)\r\n axStats[2,1].set_ylim([0.96*min(Vds/max(fDPMaxgradI),Vds/max(rDPMaxgradI)),1.04*max(Vds/min(fDPMaxgradI),Vds/min(rDPMaxgradI))])\r\n axStats[2,1].set_title('Max Trans. Resistances',fontsize= \"small\", loc='right')\r\n \r\n # Finalise the axes parameters for the Parameter Details plot\r\n axStats[0,0].set_title('Dirac Points',fontsize= \"small\",loc='right')\r\n axStats[0,0].set_xlabel(\"$V_{bg}$ (V)\",fontsize=fontSize)\r\n axStats[0,0].tick_params(axis=\"both\", labelsize=labelSize)\r\n \r\n axStats[0,1].set_xticklabels(fourCharacterNames, rotation=45, fontsize=\"xx-small\")\r\n axStats[0,1].tick_params(axis=\"both\", labelsize=labelSize)\r\n\r\n axStats[1,0].set_title('Dirac Point Voltages',fontsize= \"small\", loc='right')\r\n axStats[1,0].set_yticklabels(fourCharacterNames, rotation=45, fontsize=\"xx-small\")\r\n axStats[1,0].set_xlabel(\"$V_{bg}$ (V)\",fontsize=fontSize)\r\n axStats[1,0].tick_params(axis=\"both\", labelsize=labelSize)\r\n \r\n axStats[1,1].set_title('Max Trans. Voltages',fontsize= \"small\", loc='right')\r\n axStats[1,1].set_xlabel(\"$V_{bg}$ (V)\",fontsize=fontSize)\r\n axStats[1,1].set_yticklabels(fourCharacterNames, rotation=45, fontsize=\"xx-small\")\r\n axStats[1,1].set_facecolor(\"#f5f5f5\")\r\n axStats[1,1].tick_params(axis=\"both\", labelsize=labelSize)\r\n\r\n axStats[2,0].set_xticklabels(fourCharacterNames, rotation=45, fontsize=\"xx-small\")\r\n axStats[2,0].tick_params(axis=\"both\", labelsize=labelSize)\r\n\r\n axStats[2,1].set_xticklabels(fourCharacterNames, rotation=45, fontsize=\"xx-small\")\r\n axStats[2,1].set_facecolor(\"#f5f5f5\")\r\n axStats[2,1].tick_params(axis=\"both\", labelsize=labelSize)\r\n\r\n # Reposition the plots with respect to their white space before passing to front GUI\r\n figStatsL.subplots_adjust(left = 0.10, right = 0.99,bottom = 0.16, top = 0.99, wspace = 0.28) \r\n figStats.subplots_adjust(top=0.95,bottom=0.06,left=0.09,right=0.94,hspace=0.57,wspace=0.28)\r\n if mouse == 1:\r\n fig.subplots_adjust(left=0.09, bottom=0.16, right=0.91, top=0.93)\r\n else:\r\n fig.subplots_adjust(left=0.09, bottom=0.16, right=0.99, top=0.93)\r\n\r\n return fig, figStats,figStatsL\r\n\r\ndef averageData(selectedList,userName):\r\n \r\n ## INITIALISE VARIABLES ##\r\n BODAverage=pd.DataFrame()\r\n lovelyOldToad=[]\r\n\r\n # Filter the dataframe with the entries in the selectedList\r\n global copied_original\r\n bigData=copied_original\r\n fdf=bigData.loc[:,selectedList]\r\n # Get column for back gate values\r\n vBG=fdf.iloc[:,0]\r\n # Get columns for current\r\n iDrains=fdf.xs(fdf.columns[1][1],axis=1,level=1,drop_level=False)\r\n averageIDrain=iDrains.mean(axis=1)\r\n # Concatenate the two columns together\r\n BODAverage=pd.concat([vBG,averageIDrain],axis=1)\r\n # Rename the columns so that it can be read by legacy data visualisation\r\n BODAverage.columns=[fdf.columns[0][1],fdf.columns[1][1]]\r\n # Add the multicolumn level so that it can be found in the list\r\n newName=selectedList[0][:selectedList[0].find(\"_\")+1]+userName+\"_\"+\"AVE\" #Append AVE so users know this has been edited\r\n BODAverage.columns=pd.MultiIndex.from_product([[newName],BODAverage.columns])\r\n # Concatenate to the copied_original database\r\n copied_original=pd.concat([copied_original,BODAverage],axis=1)\r\n # Now get a list of headers to display in the Datalist\r\n for x in copied_original.columns.get_level_values(0):\r\n if x not in lovelyOldToad:\r\n lovelyOldToad.append(x)\r\n \r\n return(lovelyOldToad)\r\n\r\ndef exportSelectedF(selectedList,Vds,L,W,oxideThick,oxideDielectric):\r\n \r\n ## INITIALISE VARIABLES ##\r\n BODExportSelect=pd.DataFrame()\r\n BODStats1=pd.DataFrame()\r\n BODMobility1=pd.DataFrame()\r\n \r\n if len (selectedList)>0:\r\n for selection in selectedList: \r\n \r\n # Section to export Data\r\n bigData=copied_original.copy()\r\n smallData1 = bigData.loc[:,(selection)]\r\n smallData1.columns=pd.MultiIndex.from_product([[selection],smallData1.columns]) \r\n BODExportSelect=pd.concat([BODExportSelect,smallData1],axis=1)\r\n\r\n # Section to export the Parameters\r\n newFrame1=statsTable(selection)\r\n rFrame=Vds/(newFrame1.iloc[[0,4,5,6,10,11],:]) # Add in the Parameters for Resistance space\r\n rFrame.index=['fDPR', 'fMaxgradR','fR0Vg', 'rDPR', 'rMaxgradR','rR0Vg']\r\n newFrame1=pd.concat([newFrame1,rFrame],axis=0)\r\n frameToAdd1=newFrame1\r\n BODStats1=pd.concat([BODStats1,frameToAdd1], axis=1)\r\n BODStats1.index.name=\"BOD_Params\"\r\n \r\n # Section to export the Mobilities\r\n _,_,_,newFrameMob1=mobility(selection,Vds,L,W,oxideThick,oxideDielectric)\r\n BODMobility1=pd.concat([BODMobility1,newFrameMob1], axis=1,ignore_index=False)\r\n\r\n return BODExportSelect,BODStats1,BODMobility1\r\n\r\ndef exportALLF():\r\n return copied_original\r\n\r\ndef defineInsides(x,y,selection,HorV): \r\n # This function determines the label and line colours for all plots in the Parameter Details\r\n \r\n if HorV == \"hE\":\r\n if x < y:\r\n insidesLines={\"y\":selection,\"xmin\":x,\"xmax\":y,\"color\":\"red\",\"zorder\":0}\r\n insidesText={\"s\":\"{:.0f}\".format(abs(x-y)),\"xy\":(((x+y)/2),selection),\"textcoords\":\"offset points\",\"xytext\":(0,5), \"color\":\"red\", \"fontsize\":\"xx-small\"}\r\n elif x > y:\r\n insidesLines={\"y\":selection,\"xmin\":y,\"xmax\":x,\"color\":\"black\",\"zorder\":0}\r\n insidesText={\"s\":\"{:.0f}\".format(abs(x-y)),\"xy\":(((x+y)/2),selection),\"textcoords\":\"offset points\",\"xytext\":(0,5), \"color\":\"black\", \"fontsize\":\"xx-small\"}\r\n else:\r\n insidesLines={\"y\":selection,\"xmin\":y,\"xmax\":x,\"color\":\"black\",\"zorder\":0, \"linestyle\":\"None\"}\r\n insidesText={\"xy\":(((x+y)/2),selection),\"s\":\"\"} \r\n\r\n if HorV[0] ==\"v\":\r\n if x < y:\r\n insidesLines={\"x\":selection,\"ymin\":x,\"ymax\":y,\"color\":\"red\",\"zorder\":0}\r\n if HorV[1] == \"E\":\r\n insidesText={\"s\":\"{:.1e}\".format(abs(x-y)),\"xy\":(selection,((x+y)/2)),\"textcoords\":\"offset points\",\"xytext\":(5,0), \"color\":\"red\", \"fontsize\":\"xx-small\"}\r\n elif HorV[1] == \"P\":\r\n insidesText={\"s\":\"{:.0f}\".format(abs(x-y)),\"xy\":(selection,((x+y)/2)),\"textcoords\":\"offset points\",\"xytext\":(5,0), \"color\":\"red\", \"fontsize\":\"xx-small\"}\r\n \r\n elif x > y:\r\n insidesLines={\"x\":selection,\"ymin\":y,\"ymax\":x,\"color\":\"black\",\"zorder\":0}\r\n if HorV[1] == \"E\":\r\n insidesText={\"s\":\"{:.1e}\".format(abs(x-y)),\"xy\":(selection,((x+y)/2)),\"textcoords\":\"offset points\",\"xytext\":(5,0), \"color\":\"black\", \"fontsize\":\"xx-small\"}\r\n if HorV[1] == \"P\":\r\n insidesText={\"s\":\"{:.0f}\".format(abs(x-y)),\"xy\":(selection,((x+y)/2)),\"textcoords\":\"offset points\",\"xytext\":(5,0), \"color\":\"black\", \"fontsize\":\"xx-small\"}\r\n \r\n else:\r\n insidesLines={\"x\":selection,\"ymin\":y,\"ymax\":x,\"color\":\"black\",\"zorder\":0, \"linestyle\":\"None\"}\r\n insidesText={\"xy\":(selection,((x+y)/2)),\"s\":\"\"} \r\n\r\n return insidesLines,insidesText\r\n\r\n#############################\r\n## TESTING CONDUCTED BELOW ##\r\n#############################\r\n\r\nif __name__==\"__main__\":\r\n # Testing is conducted down here...\r\n # print(\"Running test code inside the module\")\r\n pass # Uncomment this if no testing is required" ]
[ [ "pandas.concat", "pandas.read_csv", "pandas.Series", "numpy.gradient", "matplotlib.pyplot.subplots", "pandas.DataFrame", "numpy.concatenate", "pandas.MultiIndex.from_product", "matplotlib.pyplot.close" ] ]
bruyle/oxidizable-pfas-precursor-inference
[ "b8a763c6eb0b3e49bca54ea7e85c692678822eef" ]
[ "infer_precursors.py" ]
[ "\"\"\" Generate samples of the posterior for PFAA precursors from TOP assay\nmeasurements in aqueous matrices.\n\nAuthors:\nColin Thackray ([email protected])\nBridger Ruyle ([email protected])\n\"\"\"\nimport argparse\nimport numpy as np\nimport pandas as pd\nfrom sampling import sample_measurement\nfrom functions import makeb\n\n# Command line arguments\nparser = argparse.ArgumentParser(\n description='Sample posterior for precursors.')\nparser.add_argument('ISTART', metavar='istart', type=int,\n help='first sample index (first index is 0)', default=0)\nparser.add_argument('IEND', metavar='iend', type=int, nargs='?',\n help='last sample index (first index is 0)', default=None)\nparser.add_argument('-d', '--datafile', dest='FILENAME', action='store',\n default='data/measurements.csv',\n help='location of measurements file')\nparser.add_argument('-o', '--outfile', dest='OUTFILE_STEM', action='store',\n default='infer_out/mcmcout_',\n help='Stem for output filename. Will gain suffix \\\n N (sample index)')\nparser.add_argument('-t', '--target-steps', dest='TARGET',\n action='store', default=2500, type=int,\n help='Effective sample size to attain')\nparser.add_argument('-m', '--max-steps', dest='MAX_STEPS',\n action='store', default=50000, type=int,\n help='Maximum number of steps before quiting.')\nparser.add_argument('-D', '--max-depth', dest='MAX_DEPTH',\n action='store', default=3, type=int,\n help='Maximum depth of windowing in sampler tuning.')\n\nargs = parser.parse_args()\nif args.IEND is None:\n args.IEND = args.ISTART\n\n# Load input data from disk\ndf = pd.read_csv(args.FILENAME)\nnames = df['Sample'].values\n\nmeasurements = df[['C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'PFOS']].values\ninmdlss = df[['C3MDL', 'C4MDL', 'C5MDL', 'C6MDL', 'C7MDL', 'C8MDL',\n 'PFOSMDL']].values\ninobserrs = df[['C3err', 'C4err', 'C5err', 'C6err', 'C7err', 'C8err',\n 'PFOSerr']].values\nnmeas = measurements.shape[0]\nmeasures = measurements\nmdlss = inmdlss\nerrs = inobserrs\n\n# Do sampling for requested measurements\nfor bi in range(args.ISTART, args.IEND+1):\n print('Calculating for sample ' + df['Sample'][bi], end='')\n\n PFOS = (measures[bi, 6], mdlss[bi, 6])\n\n # Not everyone measures C8 (PFNA), but if measured should be used.\n C8 = df['C8incl'][bi] # Boolean\n\n # Generate obs and MDL arrays\n mdls = makeb(mdlss[bi, :6], C8=C8)\n b = makeb(measures[bi, :6], C8=C8)\n berr = makeb(errs[bi, :6], C8=C8)\n\n prior_name = df['prior_name'][bi]\n print(f' with prior {prior_name}.')\n\n # Run MCMC ensemble to sample posterior\n sampler = sample_measurement(b,\n mdls,\n berr,\n PFOS,\n prior=prior_name,\n C8=C8,\n Nincrement=1000,\n TARGET_EFFECTIVE_STEPS=args.TARGET,\n MAX_STEPS=args.MAX_STEPS,\n MAX_DEPTH=args.MAX_DEPTH)\n\n # Save sampling output to disk\n trajectory = sampler.flatchain[:,:-1]\n outfile = f'{args.OUTFILE_STEM}{bi}'\n np.save(outfile, trajectory)\n" ]
[ [ "pandas.read_csv", "numpy.save" ] ]
florianthonig/listen-attend-and-spell
[ "218dd4f200cd564d3052c550dbbfe1f2cd836008" ]
[ "vctk/build_vocab.py" ]
[ "import sys\nimport numpy as np\n\n\ns = set()\n\nf = np.load(sys.argv[1]).item()\n\nfor line in f.values():\n s.update(line)\n\nd = sorted(list(s))\n\nwith open(sys.argv[2], 'w') as f:\n print('\\n'.join(d), file=f)\n" ]
[ [ "numpy.load" ] ]
dbbpjch/tf-quant-finance
[ "fa577701afe2f40b6cf0740336f44dc3c1b8222b" ]
[ "tf_quant_finance/experimental/pricing_platform/framework/market_data/volatility_surface.py" ]
[ "# Lint as: python3\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Implementation of VolatilitySurface object.\"\"\"\n\nfrom typing import Optional\n\nimport tensorflow.compat.v2 as tf\n\nfrom tf_quant_finance import datetime as dateslib\nfrom tf_quant_finance import math\nfrom tf_quant_finance.experimental.pricing_platform.framework.core import daycount_conventions\nfrom tf_quant_finance.experimental.pricing_platform.framework.core import implied_volatility_type\nfrom tf_quant_finance.experimental.pricing_platform.framework.core import processed_market_data as pmd\nfrom tf_quant_finance.experimental.pricing_platform.framework.core import types\nfrom tf_quant_finance.experimental.pricing_platform.framework.market_data import utils\n\n_DayCountConventions = daycount_conventions.DayCountConventions\n_DayCountConventionsProtoType = types.DayCountConventionsProtoType\n\ninterpolation_2d = math.interpolation.interpolation_2d\n\n\nclass VolatilitySurface(pmd.VolatilitySurface):\n \"\"\"Represents a volatility surface.\"\"\"\n\n def __init__(\n self,\n valuation_date: types.DateTensor,\n expiries: types.DateTensor,\n strikes: types.FloatTensor,\n volatilities: types.FloatTensor,\n daycount_convention: Optional[_DayCountConventionsProtoType] = None,\n dtype: Optional[tf.DType] = None,\n name: Optional[str] = None):\n \"\"\"Initializes the volatility surface.\n\n Args:\n valuation_date: A `DateTensor` specifying the valuation (or\n settlement) date for the curve.\n expiries: A `DateTensor` containing the expiry dates on which the\n implied volatilities are specified. Should have a compatible shape with\n valuation_date.\n strikes: A `Tensor` of real dtype specifying the strikes corresponding to\n the input maturities. The shape of this input should match the shape of\n `expiries`.\n volatilities: A `Tensor` of real dtype specifying the volatilities\n corresponding to the input maturities. The shape of this input should\n match the shape of `expiries`.\n daycount_convention: `DayCountConventions` to use for the interpolation\n purpose.\n Default value: `None` which maps to actual/365 day count convention.\n dtype: `tf.Dtype`. Optional input specifying the dtype of the `rates`\n input.\n name: Python str. The name to give to the ops created by this function.\n Default value: `None` which maps to 'rate_curve'.\n \"\"\"\n self._name = name or \"VolatilitySurface\"\n self._dtype = dtype or tf.float64\n with tf.name_scope(self._name):\n self._daycount_convention = (\n daycount_convention or _DayCountConventions.ACTUAL_365)\n self._day_count_fn = utils.get_daycount_fn(self._daycount_convention)\n self._valuation_date = dateslib.convert_to_date_tensor(\n valuation_date)\n self._expiries = dateslib.convert_to_date_tensor(\n expiries)\n self._strikes = tf.convert_to_tensor(\n strikes, dtype=self._dtype, name=\"strikes\")\n self._volatilities = tf.convert_to_tensor(\n volatilities, dtype=self._dtype, name=\"volatilities\")\n expiry_times = self._day_count_fn(\n start_date=self._valuation_date,\n end_date=self._expiries,\n dtype=self._dtype)\n self._interpolator = interpolation_2d.Interpolation2D(\n expiry_times, strikes, volatilities, dtype=self._dtype)\n\n def volatility(self,\n expiry: types.DateTensor,\n strike: types.FloatTensor,\n term: Optional[types.Period] = None) -> types.FloatTensor:\n \"\"\"Returns the interpolated volatility on a specified set of expiries.\n\n Args:\n expiry: The expiry dates for which the interpolation is desired.\n strike: The strikes for which the interpolation is desired.\n term: Optional input specifiying the term of the underlying rate for\n which the interpolation is desired. Relevant for interest rate implied\n volatility data.\n\n Returns:\n A `Tensor` of the same shape as `expiry` with the interpolated volatility\n from the volatility surface.\n \"\"\"\n del term\n expiry = dateslib.convert_to_date_tensor(expiry)\n expiries = self._day_count_fn(\n start_date=self._valuation_date,\n end_date=expiry,\n dtype=self._dtype)\n strike = tf.convert_to_tensor(strike, dtype=self._dtype, name=\"strike\")\n return self._interpolator.interpolate(expiries, strike)\n\n def volatility_type(self) -> implied_volatility_type.ImpliedVolatilityType:\n \"\"\"Returns the type of implied volatility.\"\"\"\n pass\n\n def node_expiries(self) -> types.DateTensor:\n \"\"\"Expiry dates at which the implied volatilities are specified.\"\"\"\n return self._expiries\n\n def node_strikes(self) -> tf.Tensor:\n \"\"\"Striks at which the implied volatilities are specified.\"\"\"\n return self._strikes\n\n def node_terms(self) -> types.Period:\n \"\"\"Rate terms corresponding to the specified implied volatilities.\"\"\"\n pass\n\n__all__ = [\"VolatilitySurface\"]\n" ]
[ [ "tensorflow.compat.v2.convert_to_tensor", "tensorflow.compat.v2.name_scope" ] ]
rodrigoarenas456/tspme
[ "7d429b482851a6a5bfda293c65b7e4e4df8cca2b" ]
[ "tspme/metaheuristics.py" ]
[ "import numpy as np\nimport random\nfrom sklearn.metrics.pairwise import euclidean_distances\nfrom tspme.utils.customer_properties import LazyProperty\n\n\nclass SimulatedAnnealing:\n\n def __init__(self, init_temp_factor=1, alpha=0.995, stop_temp=0.5, n_cycles=50):\n self.init_temp = init_temp_factor\n self.alpha = alpha\n self.stop_temp = stop_temp\n self.n_cycles = n_cycles\n self.steps = None\n self.cost_matrix = None\n self.locations_generator = None\n self.solutions_size = None\n self.cost_hist = None\n self.solution = None\n self.callback = None\n\n def set_distance_matrix(self, locations_generator):\n self.locations_generator = locations_generator\n self.cost_matrix = euclidean_distances(self.locations_generator[\"locations\"])\n return self.cost_matrix\n\n @LazyProperty\n def size(self):\n return len(self.locations_generator[\"x\"])\n\n def tour_len(self, tour):\n return np.sum(self.cost_matrix[np.roll(tour, 1), tour])\n\n def opt2(self, route):\n new_route = route.copy()\n crossover_points = np.random.choice(range(self.size), size=2, replace=False)\n _init_pos, _end_pos = min(crossover_points), max(crossover_points)\n new_route = [new_route[:_init_pos], new_route[_init_pos:_end_pos][::-1], new_route[_end_pos:]]\n new_route = np.hstack(new_route).astype(int)\n new_route[self.size] = new_route[0]\n return new_route\n\n #TODO: Change random.sample for np.random.choice\n def fit(self, return_cost_hist=False):\n current_solution = random.sample(range(self.size), self.size)\n current_solution.append(current_solution[0])\n current_len = self.tour_len(current_solution)\n t = 0.05 * self.tour_len(current_solution)\n self.n_cycles * int(np.log(self.init_temp / t) / np.log(self.alpha) + 1)\n self.cost_hist = [self.tour_len(current_solution)]\n\n k = 0\n while t > self.stop_temp:\n for i in enumerate(range(self.n_cycles)):\n acceptance_threshold = random.random()\n candidate = self.opt2(current_solution)\n candidate_len = self.tour_len(candidate)\n if candidate_len < current_len or acceptance_threshold < np.exp(-(candidate_len - current_len) / t):\n current_solution = candidate\n current_len = candidate_len\n k += 1\n if return_cost_hist:\n self.cost_hist.append(current_len)\n t *= self.alpha\n self.solution = current_solution\n self.callback = {\"route\": self.solution,\n \"cost\": current_len}\n if return_cost_hist:\n self.callback[\"cost_hist\"] = self.cost_hist\n\n return self.callback\n" ]
[ [ "numpy.hstack", "numpy.log", "sklearn.metrics.pairwise.euclidean_distances", "numpy.exp", "numpy.roll" ] ]
tijsmaas/TrafficPrediction
[ "9129faea8fee8c2d90595d2974f0b11030ad2674", "9129faea8fee8c2d90595d2974f0b11030ad2674" ]
[ "scripts/eval_baseline_methods.py", "model/pytorch/engine.py" ]
[ "import argparse\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\nfrom statsmodels.tsa.vector_ar.var_model import VAR\r\n\r\nfrom lib.logger import get_logger\r\nfrom lib.metrics.metrics_np import masked_rmse_np, masked_mape_np, masked_mae_np\r\nfrom lib.utils import StandardScaler\r\n\r\n\r\n\r\n\r\ndef historical_average_predict(df, period=12 * 24 * 7, test_ratio=0.2, null_val=0.):\r\n \"\"\"\r\n Calculates the historical average of sensor reading.\r\n :param df:\r\n :param period: default 1 week. (12 measurements per hour, 24 hours, 7 days)\r\n :param test_ratio:\r\n :param null_val: default 0.\r\n :return:\r\n \"\"\"\r\n n_sample, n_sensor = df.shape\r\n n_test = int(round(n_sample * test_ratio))\r\n n_train = n_sample - n_test\r\n y_test = df[-n_test:]\r\n y_predict = pd.DataFrame.copy(y_test)\r\n\r\n for i in range(n_train, min(n_sample, n_train + period)):\r\n inds = [j for j in range(i % period, n_train, period)]\r\n historical = df.iloc[inds, :]\r\n y_predict.iloc[i - n_train, :] = historical[historical != null_val].mean()\r\n # Copy each period.\r\n for i in range(n_train + period, n_sample, period):\r\n size = min(period, n_sample - i)\r\n start = i - n_train\r\n y_predict.iloc[start:start + size, :] = y_predict.iloc[start - period: start + size - period, :].values\r\n return y_predict, y_test\r\n\r\n\r\ndef static_predict(df, n_forward, test_ratio=0.2):\r\n \"\"\"\r\n Assumes $x^{t+1} = x^{t}$\r\n :param df:\r\n :param n_forward:\r\n :param test_ratio:\r\n :return:\r\n \"\"\"\r\n test_num = int(round(df.shape[0] * test_ratio))\r\n y_test = df[-test_num:]\r\n y_predict = df.shift(n_forward).iloc[-test_num:]\r\n return y_predict, y_test\r\n\r\n\r\ndef var_predict(df, n_forwards=(1, 3), n_lags=4, test_ratio=0.2):\r\n \"\"\"\r\n Multivariate time series forecasting using Vector Auto-Regressive Model.\r\n :param df: pandas.DataFrame, index: time, columns: sensor id, content: data.\r\n :param n_forwards: a tuple of horizons.\r\n :param n_lags: the order of the VAR model.\r\n :param test_ratio:\r\n :return: [list of prediction in different horizon], dt_test\r\n \"\"\"\r\n n_sample, n_output = df.shape\r\n n_test = int(round(n_sample * test_ratio))\r\n n_train = n_sample - n_test\r\n df_train, df_test = df[:n_train], df[n_train:]\r\n\r\n scaler = StandardScaler(mean=df_train.values.mean(), std=df_train.values.std())\r\n data = scaler.transform(df_train.values)\r\n var_model = VAR(data)\r\n var_result = var_model.fit(n_lags)\r\n max_n_forwards = np.max(n_forwards)\r\n # Do forecasting.\r\n result = np.zeros(shape=(len(n_forwards), n_test, n_output))\r\n start = n_train - n_lags - max_n_forwards + 1\r\n for input_ind in range(start, n_sample - n_lags):\r\n prediction = var_result.forecast(scaler.transform(df.values[input_ind: input_ind + n_lags]), max_n_forwards)\r\n for i, n_forward in enumerate(n_forwards):\r\n result_ind = input_ind - n_train + n_lags + n_forward - 1\r\n if 0 <= result_ind < n_test:\r\n result[i, result_ind, :] = prediction[n_forward - 1, :]\r\n\r\n df_predicts = []\r\n for i, n_forward in enumerate(n_forwards):\r\n df_predict = pd.DataFrame(scaler.inverse_transform(result[i]), index=df_test.index, columns=df_test.columns)\r\n df_predicts.append(df_predict)\r\n return df_predicts, df_test\r\n\r\n\r\ndef eval_static(traffic_reading_df):\r\n logger.info('Static')\r\n horizons = [1, 3, 6, 12]\r\n logger.info('\\t'.join(['Model', 'Horizon', 'RMSE', 'MAPE', 'MAE']))\r\n for horizon in horizons:\r\n y_predict, y_test = static_predict(traffic_reading_df, n_forward=horizon, test_ratio=0.2)\r\n rmse = masked_rmse_np(preds=y_predict.as_matrix(), labels=y_test.as_matrix(), null_val=0)\r\n mape = masked_mape_np(preds=y_predict.as_matrix(), labels=y_test.as_matrix(), null_val=0)\r\n mae = masked_mae_np(preds=y_predict.as_matrix(), labels=y_test.as_matrix(), null_val=0)\r\n line = 'Static\\t%d\\t%.2f\\t%.2f\\t%.2f' % (horizon, rmse, mape * 100, mae)\r\n logger.info(line)\r\n\r\n\r\ndef eval_historical_average(traffic_reading_df, period):\r\n y_predict, y_test = historical_average_predict(traffic_reading_df, period=period, test_ratio=0.2)\r\n rmse = masked_rmse_np(preds=y_predict.as_matrix(), labels=y_test.as_matrix(), null_val=0)\r\n mape = masked_mape_np(preds=y_predict.as_matrix(), labels=y_test.as_matrix(), null_val=0)\r\n mae = masked_mae_np(preds=y_predict.as_matrix(), labels=y_test.as_matrix(), null_val=0)\r\n logger.info('Historical Average')\r\n logger.info('\\t'.join(['Model', 'Horizon', 'RMSE', 'MAPE', 'MAE']))\r\n for horizon in [1, 3, 6, 12]:\r\n line = 'HA\\t%d\\t%.2f\\t%.2f\\t%.2f' % (horizon, rmse, mape * 100, mae)\r\n logger.info(line)\r\n\r\n\r\ndef eval_var(traffic_reading_df, n_lags=3):\r\n n_forwards = [1, 3, 6, 12]\r\n y_predicts, y_test = var_predict(traffic_reading_df, n_forwards=n_forwards, n_lags=n_lags,\r\n test_ratio=0.2)\r\n logger.info('VAR (lag=%d)' % n_lags)\r\n logger.info('Model\\tHorizon\\tRMSE\\tMAPE\\tMAE')\r\n for i, horizon in enumerate(n_forwards):\r\n rmse = masked_rmse_np(preds=y_predicts[i].as_matrix(), labels=y_test.as_matrix(), null_val=0)\r\n mape = masked_mape_np(preds=y_predicts[i].as_matrix(), labels=y_test.as_matrix(), null_val=0)\r\n mae = masked_mae_np(preds=y_predicts[i].as_matrix(), labels=y_test.as_matrix(), null_val=0)\r\n line = 'VAR\\t%d\\t%.2f\\t%.2f\\t%.2f' % (horizon, rmse, mape * 100, mae)\r\n logger.info(line)\r\n\r\n\r\ndef main(args):\r\n traffic_reading_df = pd.read_hdf(args.traffic_reading_filename)\r\n # eval_static(traffic_reading_df)\r\n eval_historical_average(traffic_reading_df, period=7 * 24 * 12)\r\n # eval_var(traffic_reading_df, n_lags=3)\r\n\r\n\r\nif __name__ == '__main__':\r\n # Ex with python -m scripts.eval_baseline_methods\r\n logger = get_logger('data/model', 'Baseline')\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--traffic_reading_filename', default=\"data/metr-la.h5\", type=str,\r\n help='Path to the traffic Dataframe.')\r\n args = parser.parse_args()\r\n main(args)\r\n", "import torch\nimport torch.optim as optim\nfrom torch import nn\nfrom tqdm import tqdm\n\nfrom lib.metrics import metrics_torch\nfrom model.pytorch.gwnet_model import gwnet\n\nclass Evaluator():\n def __init__(self, scaler, device, model):\n self.model = model\n self.model.to(device)\n self.scaler = scaler\n self.device = device\n\n def compute_preds(self, loader):\n self.model.eval()\n outputs = []\n y_vals = []\n for iter, (x, y) in tqdm(enumerate(loader.get_iterator())):\n testx = torch.Tensor(x).to(self.device)\n testy = torch.Tensor(y[..., 0]).to(self.device)\n testx = testx.transpose(1, 3)\n with torch.no_grad():\n # [64, 12, 1, 207]\n output = self.model(testx)\n preds = output.transpose(1, 3)\n outputs.append(preds.squeeze())\n y_vals.append(testy.transpose(1, 2))\n yhat = torch.cat(outputs, dim=0)\n realy = torch.cat(y_vals, dim=0)\n return yhat.cpu().numpy(), realy.cpu().numpy()\n\n def eval(self, input, real_val):\n self.model.eval()\n input = nn.functional.pad(input,(1,0,0,0))\n with torch.no_grad():\n output = self.model(input)\n predict, real = self._postprocess(output, real_val)\n loss, rmse, mape = metrics_torch.calculate_metrics_torch(predict, real, 0.0)\n return loss.item(), rmse.item(), mape.item()\n\n # model -> evaluation\n # output = [batch_size,12,num_nodes,1]\n def _postprocess(self, output, real_val):\n output = output.transpose(1,3)\n predict = self.scaler.inverse_transform(output)\n real = torch.unsqueeze(real_val, dim=1)\n return predict, real\n\n\nclass Trainer(Evaluator):\n def __init__(self, scaler, device, model, lrate, wdecay):\n super().__init__(scaler, device, model)\n self.optimizer = optim.Adam(self.model.parameters(), lr=lrate, weight_decay=wdecay)\n self.loss = metrics_torch.masked_mae_torch\n self.clip = 5\n\n def train(self, input, real_val):\n self.model.train()\n self.optimizer.zero_grad()\n input = nn.functional.pad(input,(1,0,0,0))\n output = self.model(input)\n predict, real = self._postprocess(output, real_val)\n loss, rmse, mape = metrics_torch.calculate_metrics_torch(predict, real, 0.0)\n loss.backward()\n if self.clip is not None:\n torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.clip)\n self.optimizer.step()\n return loss.item(), rmse.item(), mape.item()\n\n" ]
[ [ "numpy.max", "pandas.read_hdf", "pandas.DataFrame.copy" ], [ "torch.Tensor", "torch.cat", "torch.unsqueeze", "torch.no_grad", "torch.nn.functional.pad" ] ]
nightwnvol/AMICO
[ "88638dbbe05a89bbf96354ccca79622828bd3c75" ]
[ "amico/preproc.py" ]
[ "from __future__ import print_function\n\nimport numpy as np\nfrom scipy.optimize import minimize\nimport scipy.special\nfrom tqdm import tqdm\nfrom amico.util import get_verbose\n\n# Kaden's functionals\ndef F_norm_Diff_K(E0,Signal,sigma_diff):\n # ------- SMT functional\n sig2 = sigma_diff**2.0\n F_norm = np.sum( ( Signal - np.sqrt( (np.pi*sig2)/2.0) * scipy.special.eval_laguerre(1.0/2.0, -1.0 * (E0**2.0) / (2.0*sig2), out=None) )**2.0 )\n return np.array(F_norm)\n\ndef der_Diff(E0,Signal,sigma_diff):\n E0 = np.array(E0)\n sig2 = sigma_diff**2.0\n k1 = np.sqrt((np.pi*sig2)/2.0)\n ET = -1.0*(E0**2.0)/(2.0*sig2)\n der1 = 2.0 * ( Signal - k1 * scipy.special.eval_laguerre(0.5, ET) )\n der2 = k1 * scipy.special.hyp1f1( 0.5, 2.0, ET ) * (-0.5/(2.0*sig2)) * E0\n return der1 * der2\n\ndef debiasRician(DWI,SNR,mask,scheme):\n debiased_DWI = np.zeros(DWI.shape)\n idx = 0\n with tqdm(total=mask.sum(), ncols=70, bar_format=' |{bar}| {percentage:4.1f}%', disable=(get_verbose()<3)) as progress:\n for ix in range(DWI.shape[0]):\n for iy in range(DWI.shape[1]):\n for iz in range(DWI.shape[2]):\n if mask[ix,iy,iz]:\n b0 = DWI[ix,iy,iz,scheme.b0_idx].mean()\n sigma_diff = b0/SNR\n init_guess = DWI[ix,iy,iz,:].copy()\n tmp = minimize(F_norm_Diff_K, init_guess, args=(init_guess,sigma_diff), method = 'L-BFGS-B', jac=der_Diff)\n debiased_DWI[ix,iy,iz] = tmp.x\n progress.update()\n return debiased_DWI\n" ]
[ [ "numpy.array", "numpy.zeros", "numpy.sqrt", "scipy.optimize.minimize" ] ]
aseber/OpenCV
[ "9b5deef24acdc3664e7989e78d8935bbd140a880" ]
[ "BlurredMask.py" ]
[ "import cv2\nimport numpy\n\ndef main():\n ##Use default camera\n camera = cv2.VideoCapture(-1)\n\n # cv2.namedwindow('Original')\n while camera.isOpened():\n _, image = camera.read()\n cv2.imshow('Original', image)\n #print image\n cv2.waitKey(5)\n\n hsvImage = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n\n lower_yellow = numpy.array([25,0,120])\n upper_yellow = numpy.array([47,251,255])\n\n mask = cv2.inRange(hsvImage, lower_yellow, upper_yellow)\n\n blurredMask = cv2.bilateralFilter(mask, 9, 75, 75)\n\n cv2.imshow('blurredMask', blurredMask)\n\n contimg, contours, hierarchy = cv2.findContours(blurredMask,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n\n contourimg = cv2.drawContours(contimg, contours, 20, (0,255,0), 3)\n\n cv2.imshow('Contimg', contourimg)\n\n cv2.imshow('HSV Image', mask)\n\nif __name__ == '__main__':\n main()" ]
[ [ "numpy.array" ] ]
dubosese/mbuild
[ "5823c22ded6d31dc00bb3e8e30583720414ddfe0" ]
[ "mbuild/compound.py" ]
[ "from __future__ import print_function, division\n\n__all__ = ['load', 'clone', 'Compound', 'Particle']\n\nimport collections\nfrom collections import OrderedDict, defaultdict\nfrom copy import deepcopy\nimport itertools\nimport os\nimport sys\nimport tempfile\nfrom warnings import warn\n\nimport mdtraj as md\nfrom mdtraj.core.element import get_by_symbol\nimport numpy as np\nfrom oset import oset as OrderedSet\nimport parmed as pmd\nfrom parmed.periodic_table import AtomicNum, element_by_name, Mass\nfrom six import integer_types, string_types\n\nfrom mbuild.bond_graph import BondGraph\nfrom mbuild.box import Box\nfrom mbuild.exceptions import MBuildError\nfrom mbuild.utils.decorators import deprecated\nfrom mbuild.formats.xyz import read_xyz\nfrom mbuild.formats.hoomdxml import write_hoomdxml\nfrom mbuild.formats.lammpsdata import write_lammpsdata\nfrom mbuild.formats.gsdwriter import write_gsd\nfrom mbuild.periodic_kdtree import PeriodicCKDTree\nfrom mbuild.utils.io import run_from_ipython, import_\nfrom mbuild.coordinate_transform import _translate, _rotate\n\n\ndef load(filename, relative_to_module=None, compound=None, coords_only=False,\n rigid=False, use_parmed=False, smiles=False, **kwargs):\n \"\"\"Load a file into an mbuild compound.\n\n Files are read using the MDTraj package unless the `use_parmed` argument is\n specified as True. Please refer to http://mdtraj.org/1.8.0/load_functions.html\n for formats supported by MDTraj and https://parmed.github.io/ParmEd/html/\n readwrite.html for formats supported by ParmEd.\n\n Parameters\n ----------\n filename : str\n Name of the file from which to load atom and bond information.\n relative_to_module : str, optional, default=None\n Instead of looking in the current working directory, look for the file\n where this module is defined. This is typically used in Compound\n classes that will be instantiated from a different directory\n (such as the Compounds located in mbuild.lib).\n compound : mb.Compound, optional, default=None\n Existing compound to load atom and bond information into.\n coords_only : bool, optional, default=False\n Only load the coordinates into an existing compoint.\n rigid : bool, optional, default=False\n Treat the compound as a rigid body\n use_parmed : bool, optional, default=False\n Use readers from ParmEd instead of MDTraj.\n smiles: bool, optional, default=False\n Use Open Babel to parse filename as a SMILES string\n or file containing a SMILES string\n **kwargs : keyword arguments\n Key word arguments passed to mdTraj for loading.\n\n Returns\n -------\n compound : mb.Compound\n\n \"\"\"\n # Handle mbuild *.py files containing a class that wraps a structure file\n # in its own folder. E.g., you build a system from ~/foo.py and it imports\n # from ~/bar/baz.py where baz.py loads ~/bar/baz.pdb.\n if relative_to_module:\n script_path = os.path.realpath(\n sys.modules[relative_to_module].__file__)\n file_dir = os.path.dirname(script_path)\n filename = os.path.join(file_dir, filename)\n\n if compound is None:\n compound = Compound()\n\n # Handle the case of a xyz file, which must use an internal reader\n extension = os.path.splitext(filename)[-1]\n if extension == '.xyz' and not 'top' in kwargs:\n if coords_only:\n tmp = read_xyz(filename)\n if tmp.n_particles != compound.n_particles:\n raise ValueError('Number of atoms in {filename} does not match'\n ' {compound}'.format(**locals()))\n ref_and_compound = zip(tmp._particles(include_ports=False),\n compound.particles(include_ports=False))\n for ref_particle, particle in ref_and_compound:\n particle.pos = ref_particle.pos\n else:\n compound = read_xyz(filename)\n return compound\n\n if use_parmed:\n warn(\n \"use_parmed set to True. Bonds may be inferred from inter-particle \"\n \"distances and standard residue templates!\")\n structure = pmd.load_file(filename, structure=True, **kwargs)\n compound.from_parmed(structure, coords_only=coords_only)\n\n elif smiles:\n pybel = import_('pybel')\n # First we try treating filename as a SMILES string\n try:\n mymol = pybel.readstring(\"smi\", filename)\n # Now we treat it as a filename\n except(OSError, IOError):\n # For now, we only support reading in a single smiles molecule,\n # but pybel returns a generator, so we get the first molecule\n # and warn the user if there is more\n\n mymol_generator = pybel.readfile(\"smi\", filename)\n mymol_list = list(mymol_generator)\n if len(mymol_list) == 1:\n mymol = mymol_list[0]\n else:\n mymol = mymol_list[0]\n warn(\"More than one SMILES string in file, more than one SMILES \"\n \"string is not supported, using {}\".format(mymol.write(\"smi\")))\n\n tmp_dir = tempfile.mkdtemp()\n temp_file = os.path.join(tmp_dir, 'smiles_to_mol2_intermediate.mol2')\n mymol.make3D()\n mymol.write(\"MOL2\", temp_file)\n structure = pmd.load_file(temp_file, structure=True, **kwargs)\n compound.from_parmed(structure, coords_only=coords_only)\n\n else:\n traj = md.load(filename, **kwargs)\n compound.from_trajectory(traj, frame=-1, coords_only=coords_only)\n\n if rigid:\n compound.label_rigid_bodies()\n return compound\n\n\ndef clone(existing_compound, clone_of=None, root_container=None):\n \"\"\"A faster alternative to deepcopying.\n\n Does not resolve circular dependencies. This should be safe provided\n you never try to add the top of a Compound hierarchy to a\n sub-Compound.\n\n Parameters\n ----------\n existing_compound : mb.Compound\n Existing Compound that will be copied\n\n Other Parameters\n ----------------\n clone_of : dict, optional\n root_container : mb.Compound, optional\n\n \"\"\"\n if clone_of is None:\n clone_of = dict()\n\n newone = existing_compound._clone(clone_of=clone_of,\n root_container=root_container)\n existing_compound._clone_bonds(clone_of=clone_of)\n return newone\n\n\nclass Compound(object):\n \"\"\"A building block in the mBuild hierarchy.\n\n Compound is the superclass of all composite building blocks in the mBuild\n hierarchy. That is, all composite building blocks must inherit from\n compound, either directly or indirectly. The design of Compound follows the\n Composite design pattern (Gamma, Erich; Richard Helm; Ralph Johnson; John\n M. Vlissides (1995). Design Patterns: Elements of Reusable Object-Oriented\n Software. Addison-Wesley. p. 395. ISBN 0-201-63361-2.), with Compound being\n the composite, and Particle playing the role of the primitive (leaf) part,\n where Particle is in fact simply an alias to the Compound class.\n\n Compound maintains a list of children (other Compounds contained within),\n and provides a means to tag the children with labels, so that the compounds\n can be easily looked up later. Labels may also point to objects outside the\n Compound's containment hierarchy. Compound has built-in support for copying\n and deepcopying Compound hierarchies, enumerating particles or bonds in the\n hierarchy, proximity based searches, visualization, I/O operations, and a\n number of other convenience methods.\n\n Parameters\n ----------\n subcompounds : mb.Compound or list of mb.Compound, optional, default=None\n One or more compounds to be added to self.\n name : str, optional, default=self.__class__.__name__\n The type of Compound.\n pos : np.ndarray, shape=(3,), dtype=float, optional, default=[0, 0, 0]\n The position of the Compound in Cartestian space\n charge : float, optional, default=0.0\n Currently not used. Likely removed in next release.\n periodicity : np.ndarray, shape=(3,), dtype=float, optional, default=[0, 0, 0]\n The periodic lengths of the Compound in the x, y and z directions.\n Defaults to zeros which is treated as non-periodic.\n port_particle : bool, optional, default=False\n Whether or not this Compound is part of a Port\n\n Attributes\n ----------\n bond_graph : mb.BondGraph\n Graph-like object that stores bond information for this Compound\n children : OrderedSet\n Contains all children (other Compounds).\n labels : OrderedDict\n Labels to Compound/Atom mappings. These do not necessarily need not be\n in self.children.\n parent : mb.Compound\n The parent Compound that contains this part. Can be None if this\n compound is the root of the containment hierarchy.\n referrers : set\n Other compounds that reference this part with labels.\n rigid_id : int, default=None\n The ID of the rigid body that this Compound belongs to. Only Particles\n (the bottom of the containment hierarchy) can have integer values for\n `rigid_id`. Compounds containing rigid particles will always have\n `rigid_id == None`. See also `contains_rigid`.\n boundingbox\n center\n contains_rigid\n max_rigid_id\n n_particles\n n_bonds\n root\n xyz\n xyz_with_ports\n\n \"\"\"\n\n def __init__(self, subcompounds=None, name=None, pos=None, charge=0.0,\n periodicity=None, port_particle=False):\n super(Compound, self).__init__()\n\n if name:\n if not isinstance(name, string_types):\n raise ValueError(\n 'Compound.name should be a string. You passed '\n '{}'.format(name))\n self.name = name\n else:\n self.name = self.__class__.__name__\n\n # A periodicity of zero in any direction is treated as non-periodic.\n if periodicity is None:\n self._periodicity = np.array([0.0, 0.0, 0.0])\n else:\n self._periodicity = np.asarray(periodicity)\n\n if pos is not None:\n self._pos = np.asarray(pos, dtype=float)\n else:\n self._pos = np.zeros(3)\n\n self.parent = None\n self.children = OrderedSet()\n self.labels = OrderedDict()\n self.referrers = set()\n\n self.bond_graph = None\n self.port_particle = port_particle\n\n self._rigid_id = None\n self._contains_rigid = False\n self._check_if_contains_rigid_bodies = False\n\n # self.add() must be called after labels and children are initialized.\n if subcompounds:\n if charge:\n raise MBuildError(\n 'Cannot set the charge of a Compound containing '\n 'subcompounds.')\n self.add(subcompounds)\n self._charge = 0.0\n else:\n self._charge = charge\n\n def particles(self, include_ports=False):\n \"\"\"Return all Particles of the Compound.\n\n Parameters\n ----------\n include_ports : bool, optional, default=False\n Include port particles\n\n Yields\n -------\n mb.Compound\n The next Particle in the Compound\n\n \"\"\"\n if not self.children:\n yield self\n else:\n for particle in self._particles(include_ports):\n yield particle\n\n def _particles(self, include_ports=False):\n \"\"\"Return all Particles of the Compound. \"\"\"\n for child in self.successors():\n if not child.children:\n if include_ports or not child.port_particle:\n yield child\n\n def successors(self):\n \"\"\"Yield Compounds below self in the hierarchy.\n\n Yields\n -------\n mb.Compound\n The next Particle below self in the hierarchy\n\n \"\"\"\n if not self.children:\n return\n for part in self.children:\n # Parts local to the current Compound.\n yield part\n # Parts further down the hierarchy.\n for subpart in part.successors():\n yield subpart\n\n @property\n def n_particles(self):\n \"\"\"Return the number of Particles in the Compound.\n\n Returns\n -------\n int\n The number of Particles in the Compound\n\n \"\"\"\n if not self.children:\n return 1\n else:\n return self._n_particles(include_ports=False)\n\n def _n_particles(self, include_ports=False):\n \"\"\"Return the number of Particles in the Compound. \"\"\"\n return sum(1 for _ in self._particles(include_ports))\n\n def _contains_only_ports(self):\n for part in self.children:\n if not part.port_particle:\n return False\n return True\n\n def ancestors(self):\n \"\"\"Generate all ancestors of the Compound recursively.\n\n Yields\n ------\n mb.Compound\n The next Compound above self in the hierarchy\n\n \"\"\"\n if self.parent is not None:\n yield self.parent\n for ancestor in self.parent.ancestors():\n yield ancestor\n\n @property\n def root(self):\n \"\"\"The Compound at the top of self's hierarchy.\n\n Returns\n -------\n mb.Compound\n The Compound at the top of self's hierarchy\n\n \"\"\"\n parent = None\n for parent in self.ancestors():\n pass\n if parent is None:\n return self\n return parent\n\n def particles_by_name(self, name):\n \"\"\"Return all Particles of the Compound with a specific name\n\n Parameters\n ----------\n name : str\n Only particles with this name are returned\n\n Yields\n ------\n mb.Compound\n The next Particle in the Compound with the user-specified name\n\n \"\"\"\n for particle in self.particles():\n if particle.name == name:\n yield particle\n\n @property\n def charge(self):\n return sum([particle._charge for particle in self.particles()])\n\n @charge.setter\n def charge(self, value):\n if self._contains_only_ports():\n self._charge = value\n else:\n raise AttributeError(\n \"charge is immutable for Compounds that are \"\n \"not at the bottom of the containment hierarchy.\")\n\n @property\n def rigid_id(self):\n return self._rigid_id\n\n @rigid_id.setter\n def rigid_id(self, value):\n if self._contains_only_ports():\n self._rigid_id = value\n for ancestor in self.ancestors():\n ancestor._check_if_contains_rigid_bodies = True\n else:\n raise AttributeError(\n \"rigid_id is immutable for Compounds that are \"\n \"not at the bottom of the containment hierarchy.\")\n\n @property\n def contains_rigid(self):\n \"\"\"Returns True if the Compound contains rigid bodies\n\n If the Compound contains any particle with a rigid_id != None\n then contains_rigid will return True. If the Compound has no\n children (i.e. the Compound resides at the bottom of the containment\n hierarchy) then contains_rigid will return False.\n\n Returns\n -------\n bool\n True if the Compound contains any particle with a rigid_id != None\n\n Notes\n -----\n The private variable '_check_if_contains_rigid_bodies' is used to help\n cache the status of 'contains_rigid'. If '_check_if_contains_rigid_bodies'\n is False, then the rigid body containment of the Compound has not changed,\n and the particle tree is not traversed, boosting performance.\n\n \"\"\"\n if self._check_if_contains_rigid_bodies:\n self._check_if_contains_rigid_bodies = False\n if any(particle.rigid_id is not None for particle in self._particles()):\n self._contains_rigid = True\n else:\n self._contains_rigid = False\n return self._contains_rigid\n\n @property\n def max_rigid_id(self):\n \"\"\"Returns the maximum rigid body ID contained in the Compound.\n\n This is usually used by compound.root to determine the maximum\n rigid_id in the containment hierarchy.\n\n Returns\n -------\n int or None\n The maximum rigid body ID contained in the Compound. If no\n rigid body IDs are found, None is returned\n\n \"\"\"\n try:\n return max([particle.rigid_id for particle in self.particles()\n if particle.rigid_id is not None])\n except ValueError:\n return\n\n def rigid_particles(self, rigid_id=None):\n \"\"\"Generate all particles in rigid bodies.\n\n If a rigid_id is specified, then this function will only yield particles\n with a matching rigid_id.\n\n Parameters\n ----------\n rigid_id : int, optional\n Include only particles with this rigid body ID\n\n Yields\n ------\n mb.Compound\n The next particle with a rigid_id that is not None, or the next\n particle with a matching rigid_id if specified\n\n \"\"\"\n for particle in self.particles():\n if rigid_id is not None:\n if particle.rigid_id == rigid_id:\n yield particle\n else:\n if particle.rigid_id is not None:\n yield particle\n\n def label_rigid_bodies(self, discrete_bodies=None, rigid_particles=None):\n \"\"\"Designate which Compounds should be treated as rigid bodies\n\n If no arguments are provided, this function will treat the compound\n as a single rigid body by providing all particles in `self` with the\n same rigid_id. If `discrete_bodies` is not None, each instance of\n a Compound with a name found in `discrete_bodies` will be treated as a\n unique rigid body. If `rigid_particles` is not None, only Particles\n (Compounds at the bottom of the containment hierarchy) matching this name\n will be considered part of the rigid body.\n\n Parameters\n ----------\n discrete_bodies : str or list of str, optional, default=None\n Name(s) of Compound instances to be treated as unique rigid bodies.\n Compound instances matching this (these) name(s) will be provided\n with unique rigid_ids\n rigid_particles : str or list of str, optional, default=None\n Name(s) of Compound instances at the bottom of the containment\n hierarchy (Particles) to be included in rigid bodies. Only Particles\n matching this (these) name(s) will have their rigid_ids altered to\n match the rigid body number.\n\n Examples\n --------\n Creating a rigid benzene\n\n >>> import mbuild as mb\n >>> from mbuild.utils.io import get_fn\n >>> benzene = mb.load(get_fn('benzene.mol2'))\n >>> benzene.label_rigid_bodies()\n\n Creating a semi-rigid benzene, where only the carbons are treated as\n a rigid body\n\n >>> import mbuild as mb\n >>> from mbuild.utils.io import get_fn\n >>> benzene = mb.load(get_fn('benzene.mol2'))\n >>> benzene.label_rigid_bodies(rigid_particles='C')\n\n Create a box of rigid benzenes, where each benzene has a unique rigid\n body ID.\n\n >>> import mbuild as mb\n >>> from mbuild.utils.io import get_fn\n >>> benzene = mb.load(get_fn('benzene.mol2'))\n >>> benzene.name = 'Benzene'\n >>> filled = mb.fill_box(benzene,\n ... n_compounds=10,\n ... box=[0, 0, 0, 4, 4, 4])\n >>> filled.label_rigid_bodies(distinct_bodies='Benzene')\n\n Create a box of semi-rigid benzenes, where each benzene has a unique\n rigid body ID and only the carbon portion is treated as rigid.\n\n >>> import mbuild as mb\n >>> from mbuild.utils.io import get_fn\n >>> benzene = mb.load(get_fn('benzene.mol2'))\n >>> benzene.name = 'Benzene'\n >>> filled = mb.fill_box(benzene,\n ... n_compounds=10,\n ... box=[0, 0, 0, 4, 4, 4])\n >>> filled.label_rigid_bodies(distinct_bodies='Benzene',\n ... rigid_particles='C')\n\n \"\"\"\n if discrete_bodies is not None:\n if isinstance(discrete_bodies, string_types):\n discrete_bodies = [discrete_bodies]\n if rigid_particles is not None:\n if isinstance(rigid_particles, string_types):\n rigid_particles = [rigid_particles]\n\n if self.root.max_rigid_id is not None:\n rigid_id = self.root.max_rigid_id + 1\n warn(\"{} rigid bodies already exist. Incrementing 'rigid_id'\"\n \"starting from {}.\".format(rigid_id, rigid_id))\n else:\n rigid_id = 0\n\n for successor in self.successors():\n if discrete_bodies and successor.name not in discrete_bodies:\n continue\n for particle in successor.particles():\n if rigid_particles and particle.name not in rigid_particles:\n continue\n particle.rigid_id = rigid_id\n if discrete_bodies:\n rigid_id += 1\n\n def unlabel_rigid_bodies(self):\n \"\"\"Remove all rigid body labels from the Compound \"\"\"\n self._check_if_contains_rigid_bodies = True\n for child in self.children:\n child._check_if_contains_rigid_bodies = True\n for particle in self.particles():\n particle.rigid_id = None\n\n def _increment_rigid_ids(self, increment):\n \"\"\"Increment the rigid_id of all rigid Particles in a Compound\n\n Adds `increment` to the rigid_id of all Particles in `self` that\n already have an integer rigid_id.\n \"\"\"\n for particle in self.particles():\n if particle.rigid_id is not None:\n particle.rigid_id += increment\n\n def _reorder_rigid_ids(self):\n \"\"\"Reorder rigid body IDs ensuring consecutiveness.\n\n Primarily used internally to ensure consecutive rigid_ids following\n removal of a Compound.\n\n \"\"\"\n max_rigid = self.max_rigid_id\n unique_rigid_ids = sorted(\n set([p.rigid_id for p in self.rigid_particles()]))\n n_unique_rigid = len(unique_rigid_ids)\n if max_rigid and n_unique_rigid != max_rigid + 1:\n missing_rigid_id = (\n unique_rigid_ids[-1] * (unique_rigid_ids[-1] + 1)) / 2 - sum(unique_rigid_ids)\n for successor in self.successors():\n if successor.rigid_id is not None:\n if successor.rigid_id > missing_rigid_id:\n successor.rigid_id -= 1\n if self.rigid_id:\n if self.rigid_id > missing_rigid_id:\n self.rigid_id -= 1\n\n def add(self, new_child, label=None, containment=True, replace=False,\n inherit_periodicity=True, reset_rigid_ids=True):\n \"\"\"Add a part to the Compound.\n\n Note:\n This does not necessarily add the part to self.children but may\n instead be used to add a reference to the part to self.labels. See\n 'containment' argument.\n\n Parameters\n ----------\n new_child : mb.Compound or list-like of mb.Compound\n The object(s) to be added to this Compound.\n label : str, optional\n A descriptive string for the part.\n containment : bool, optional, default=True\n Add the part to self.children.\n replace : bool, optional, default=True\n Replace the label if it already exists.\n inherit_periodicity : bool, optional, default=True\n Replace the periodicity of self with the periodicity of the\n Compound being added\n reset_rigid_ids : bool, optional, default=True\n If the Compound to be added contains rigid bodies, reset the\n rigid_ids such that values remain distinct from rigid_ids\n already present in `self`. Can be set to False if attempting\n to add Compounds to an existing rigid body.\n\n \"\"\"\n # Support batch add via lists, tuples and sets.\n if (isinstance(new_child, collections.Iterable) and\n not isinstance(new_child, string_types)):\n for child in new_child:\n self.add(child, reset_rigid_ids=reset_rigid_ids)\n return\n\n if not isinstance(new_child, Compound):\n raise ValueError('Only objects that inherit from mbuild.Compound '\n 'can be added to Compounds. You tried to add '\n '\"{}\".'.format(new_child))\n\n if new_child.contains_rigid or new_child.rigid_id is not None:\n if self.contains_rigid and reset_rigid_ids:\n new_child._increment_rigid_ids(increment=self.max_rigid_id + 1)\n self._check_if_contains_rigid_bodies = True\n if self.rigid_id is not None:\n self.rigid_id = None\n\n # Create children and labels on the first add operation\n if self.children is None:\n self.children = OrderedSet()\n if self.labels is None:\n self.labels = OrderedDict()\n\n if containment:\n if new_child.parent is not None:\n raise MBuildError('Part {} already has a parent: {}'.format(\n new_child, new_child.parent))\n self.children.add(new_child)\n new_child.parent = self\n\n if new_child.bond_graph is not None:\n if self.root.bond_graph is None:\n self.root.bond_graph = new_child.bond_graph\n else:\n self.root.bond_graph.compose(new_child.bond_graph)\n\n new_child.bond_graph = None\n\n # Add new_part to labels. Does not currently support batch add.\n if label is None:\n label = '{0}[$]'.format(new_child.__class__.__name__)\n\n if label.endswith('[$]'):\n label = label[:-3]\n if label not in self.labels:\n self.labels[label] = []\n label_pattern = label + '[{}]'\n\n count = len(self.labels[label])\n self.labels[label].append(new_child)\n label = label_pattern.format(count)\n\n if not replace and label in self.labels:\n raise MBuildError('Label \"{0}\" already exists in {1}.'.format(\n label, self))\n else:\n self.labels[label] = new_child\n new_child.referrers.add(self)\n\n if (inherit_periodicity and isinstance(new_child, Compound) and\n new_child.periodicity.any()):\n self.periodicity = new_child.periodicity\n\n def remove(self, objs_to_remove):\n \"\"\"Remove children from the Compound.\n\n Parameters\n ----------\n objs_to_remove : mb.Compound or list of mb.Compound\n The Compound(s) to be removed from self\n\n \"\"\"\n if not self.children:\n return\n\n if not hasattr(objs_to_remove, '__iter__'):\n objs_to_remove = [objs_to_remove]\n objs_to_remove = set(objs_to_remove)\n\n if len(objs_to_remove) == 0:\n return\n\n remove_from_here = objs_to_remove.intersection(self.children)\n self.children -= remove_from_here\n yet_to_remove = objs_to_remove - remove_from_here\n\n for removed in remove_from_here:\n for child in removed.children:\n removed.remove(child)\n\n for removed_part in remove_from_here:\n if removed_part.rigid_id is not None:\n for ancestor in removed_part.ancestors():\n ancestor._check_if_contains_rigid_bodies = True\n if self.root.bond_graph and self.root.bond_graph.has_node(\n removed_part):\n for neighbor in self.root.bond_graph.neighbors(removed_part):\n self.root.remove_bond((removed_part, neighbor))\n self.root.bond_graph.remove_node(removed_part)\n self._remove_references(removed_part)\n\n # Remove the part recursively from sub-compounds.\n for child in self.children:\n child.remove(yet_to_remove)\n if child.contains_rigid:\n self.root._reorder_rigid_ids()\n\n def _remove_references(self, removed_part):\n \"\"\"Remove labels pointing to this part and vice versa. \"\"\"\n removed_part.parent = None\n\n # Remove labels in the hierarchy pointing to this part.\n referrers_to_remove = set()\n for referrer in removed_part.referrers:\n if removed_part not in referrer.ancestors():\n for label, referred_part in list(referrer.labels.items()):\n if referred_part is removed_part:\n del referrer.labels[label]\n referrers_to_remove.add(referrer)\n removed_part.referrers -= referrers_to_remove\n\n # Remove labels in this part pointing into the hierarchy.\n labels_to_delete = []\n if isinstance(removed_part, Compound):\n for label, part in list(removed_part.labels.items()):\n if not isinstance(part, Compound):\n for p in part:\n self._remove_references(p)\n elif removed_part not in part.ancestors():\n try:\n part.referrers.discard(removed_part)\n except KeyError:\n pass\n else:\n labels_to_delete.append(label)\n for label in labels_to_delete:\n removed_part.labels.pop(label, None)\n\n def referenced_ports(self):\n \"\"\"Return all Ports referenced by this Compound.\n\n Returns\n -------\n list of mb.Compound\n A list of all ports referenced by the Compound\n\n \"\"\"\n from mbuild.port import Port\n return [port for port in self.labels.values()\n if isinstance(port, Port)]\n\n def all_ports(self):\n \"\"\"Return all Ports referenced by this Compound and its successors\n\n Returns\n -------\n list of mb.Compound\n A list of all Ports referenced by this Compound and its successors\n\n \"\"\"\n from mbuild.port import Port\n return [successor for successor in self.successors()\n if isinstance(successor, Port)]\n\n def available_ports(self):\n \"\"\"Return all unoccupied Ports referenced by this Compound.\n\n Returns\n -------\n list of mb.Compound\n A list of all unoccupied ports referenced by the Compound\n\n \"\"\"\n from mbuild.port import Port\n return [port for port in self.labels.values()\n if isinstance(port, Port) and not port.used]\n\n def bonds(self):\n \"\"\"Return all bonds in the Compound and sub-Compounds.\n\n Yields\n -------\n tuple of mb.Compound\n The next bond in the Compound\n\n See Also\n --------\n bond_graph.edges_iter : Iterates over all edges in a BondGraph\n\n \"\"\"\n if self.root.bond_graph:\n if self.root == self:\n return self.root.bond_graph.edges_iter()\n else:\n return self.root.bond_graph.subgraph(\n self.particles()).edges_iter()\n else:\n return iter(())\n\n @property\n def n_bonds(self):\n \"\"\"Return the number of bonds in the Compound.\n\n Returns\n -------\n int\n The number of bonds in the Compound\n\n \"\"\"\n return sum(1 for _ in self.bonds())\n\n def add_bond(self, particle_pair):\n \"\"\"Add a bond between two Particles.\n\n Parameters\n ----------\n particle_pair : indexable object, length=2, dtype=mb.Compound\n The pair of Particles to add a bond between\n\n \"\"\"\n if self.root.bond_graph is None:\n self.root.bond_graph = BondGraph()\n\n self.root.bond_graph.add_edge(particle_pair[0], particle_pair[1])\n\n def generate_bonds(self, name_a, name_b, dmin, dmax):\n \"\"\"Add Bonds between all pairs of types a/b within [dmin, dmax].\n\n Parameters\n ----------\n name_a : str\n The name of one of the Particles to be in each bond\n name_b : str\n The name of the other Particle to be in each bond\n dmin : float\n The minimum distance between Particles for considering a bond\n dmax : float\n The maximum distance between Particles for considering a bond\n\n \"\"\"\n particle_kdtree = PeriodicCKDTree(\n data=self.xyz, bounds=self.periodicity)\n particle_array = np.array(list(self.particles()))\n added_bonds = list()\n for p1 in self.particles_by_name(name_a):\n nearest = self.particles_in_range(p1, dmax, max_particles=20,\n particle_kdtree=particle_kdtree,\n particle_array=particle_array)\n for p2 in nearest:\n if p2 == p1:\n continue\n bond_tuple = (p1, p2) if id(p1) < id(p2) else (p2, p1)\n if bond_tuple in added_bonds:\n continue\n min_dist = self.min_periodic_distance(p2.pos, p1.pos)\n if (p2.name == name_b) and (dmin <= min_dist <= dmax):\n self.add_bond((p1, p2))\n added_bonds.append(bond_tuple)\n\n def remove_bond(self, particle_pair):\n \"\"\"Deletes a bond between a pair of Particles\n\n Parameters\n ----------\n particle_pair : indexable object, length=2, dtype=mb.Compound\n The pair of Particles to remove the bond between\n\n \"\"\"\n from mbuild.port import Port\n if self.root.bond_graph is None or not self.root.bond_graph.has_edge(\n *particle_pair):\n warn(\"Bond between {} and {} doesn't exist!\".format(*particle_pair))\n return\n self.root.bond_graph.remove_edge(*particle_pair)\n bond_vector = particle_pair[0].pos - particle_pair[1].pos\n if np.allclose(bond_vector, np.zeros(3)):\n warn(\"Particles {} and {} overlap! Ports will not be added.\"\n \"\".format(*particle_pair))\n return\n distance = np.linalg.norm(bond_vector)\n particle_pair[0].parent.add(Port(anchor=particle_pair[0],\n orientation=-bond_vector,\n separation=distance / 2), 'port[$]')\n particle_pair[1].parent.add(Port(anchor=particle_pair[1],\n orientation=bond_vector,\n separation=distance / 2), 'port[$]')\n\n @property\n def pos(self):\n if not self.children:\n return self._pos\n else:\n return self.center\n\n @pos.setter\n def pos(self, value):\n if not self.children:\n self._pos = value\n else:\n raise MBuildError('Cannot set position on a Compound that has'\n ' children.')\n\n @property\n def periodicity(self):\n return self._periodicity\n\n @periodicity.setter\n def periodicity(self, periods):\n self._periodicity = np.array(periods)\n\n @property\n def xyz(self):\n \"\"\"Return all particle coordinates in this compound.\n\n Returns\n -------\n pos : np.ndarray, shape=(n, 3), dtype=float\n Array with the positions of all particles.\n \"\"\"\n if not self.children:\n pos = np.expand_dims(self._pos, axis=0)\n else:\n arr = np.fromiter(itertools.chain.from_iterable(\n particle.pos for particle in self.particles()), dtype=float)\n pos = arr.reshape((-1, 3))\n return pos\n\n @property\n def xyz_with_ports(self):\n \"\"\"Return all particle coordinates in this compound including ports.\n\n Returns\n -------\n pos : np.ndarray, shape=(n, 3), dtype=float\n Array with the positions of all particles and ports.\n\n \"\"\"\n if not self.children:\n pos = self._pos\n else:\n arr = np.fromiter(\n itertools.chain.from_iterable(\n particle.pos for particle in self.particles(\n include_ports=True)), dtype=float)\n pos = arr.reshape((-1, 3))\n return pos\n\n @xyz.setter\n def xyz(self, arrnx3):\n \"\"\"Set the positions of the particles in the Compound, excluding the Ports.\n\n This function does not set the position of the ports.\n\n Parameters\n ----------\n arrnx3 : np.ndarray, shape=(n,3), dtype=float\n The new particle positions\n\n \"\"\"\n if not self.children:\n if not arrnx3.shape[0] == 1:\n raise ValueError(\n 'Trying to set position of {} with more than one'\n 'coordinate: {}'.format(\n self, arrnx3))\n self.pos = np.squeeze(arrnx3)\n else:\n for atom, coords in zip(\n self._particles(\n include_ports=False), arrnx3):\n atom.pos = coords\n\n @xyz_with_ports.setter\n def xyz_with_ports(self, arrnx3):\n \"\"\"Set the positions of the particles in the Compound, including the Ports.\n\n Parameters\n ----------\n arrnx3 : np.ndarray, shape=(n,3), dtype=float\n The new particle positions\n\n \"\"\"\n if not self.children:\n if not arrnx3.shape[0] == 1:\n raise ValueError(\n 'Trying to set position of {} with more than one'\n 'coordinate: {}'.format(\n self, arrnx3))\n self.pos = np.squeeze(arrnx3)\n else:\n for atom, coords in zip(\n self._particles(\n include_ports=True), arrnx3):\n atom.pos = coords\n\n @property\n def center(self):\n \"\"\"The cartesian center of the Compound based on its Particles.\n\n Returns\n -------\n np.ndarray, shape=(3,), dtype=float\n The cartesian center of the Compound based on its Particles\n\n \"\"\"\n\n if np.all(np.isfinite(self.xyz)):\n return np.mean(self.xyz, axis=0)\n\n @property\n def boundingbox(self):\n \"\"\"Compute the bounding box of the compound.\n\n Returns\n -------\n mb.Box\n The bounding box for this Compound\n\n \"\"\"\n xyz = self.xyz\n return Box(mins=xyz.min(axis=0), maxs=xyz.max(axis=0))\n\n def min_periodic_distance(self, xyz0, xyz1):\n \"\"\"Vectorized distance calculation considering minimum image.\n\n Parameters\n ----------\n xyz0 : np.ndarray, shape=(3,), dtype=float\n Coordinates of first point\n xyz1 : np.ndarray, shape=(3,), dtype=float\n Coordinates of second point\n\n Returns\n -------\n float\n Vectorized distance between the two points following minimum\n image convention\n\n \"\"\"\n d = np.abs(xyz0 - xyz1)\n d = np.where(d > 0.5 * self.periodicity, self.periodicity - d, d)\n return np.sqrt((d ** 2).sum(axis=-1))\n\n def particles_in_range(\n self,\n compound,\n dmax,\n max_particles=20,\n particle_kdtree=None,\n particle_array=None):\n \"\"\"Find particles within a specified range of another particle.\n\n Parameters\n ----------\n compound : mb.Compound\n Reference particle to find other particles in range of\n dmax : float\n Maximum distance from 'compound' to look for Particles\n max_particles : int, optional, default=20\n Maximum number of Particles to return\n particle_kdtree : mb.PeriodicCKDTree, optional\n KD-tree for looking up nearest neighbors. If not provided, a KD-\n tree will be generated from all Particles in self\n particle_array : np.ndarray, shape=(n,), dtype=mb.Compound, optional\n Array of possible particles to consider for return. If not\n provided, this defaults to all Particles in self\n\n Returns\n -------\n np.ndarray, shape=(n,), dtype=mb.Compound\n Particles in range of compound according to user-defined limits\n\n See Also\n --------\n periodic_kdtree.PerioidicCKDTree : mBuild implementation of kd-trees\n scipy.spatial.ckdtree : Further details on kd-trees\n\n \"\"\"\n if particle_kdtree is None:\n particle_kdtree = PeriodicCKDTree(\n data=self.xyz, bounds=self.periodicity)\n _, idxs = particle_kdtree.query(\n compound.pos, k=max_particles, distance_upper_bound=dmax)\n idxs = idxs[idxs != self.n_particles]\n if particle_array is None:\n particle_array = np.array(list(self.particles()))\n return particle_array[idxs]\n\n def visualize(self, show_ports=False, \n backend='py3dmol', color_scheme={}): # pragma: no cover\n \"\"\"Visualize the Compound using py3dmol (default) or nglview.\n\n Allows for visualization of a Compound within a Jupyter Notebook.\n\n Parameters\n ----------\n show_ports : bool, optional, default=False\n Visualize Ports in addition to Particles\n backend : str, optional, default='py3dmol'\n Specify the backend package to visualize compounds\n Currently supported: py3dmol, nglview\n color_scheme : dict, optional\n Specify coloring for non-elemental particles\n keys are strings of the particle names\n values are strings of the colors\n i.e. {'_CGBEAD': 'blue'}\n\n \"\"\"\n viz_pkg = {'nglview': self._visualize_nglview,\n 'py3dmol': self._visualize_py3dmol}\n if run_from_ipython():\n if backend.lower() in viz_pkg:\n return viz_pkg[backend.lower()](show_ports=show_ports, \n color_scheme=color_scheme)\n else:\n raise RuntimeError(\"Unsupported visualization \" +\n \"backend ({}). \".format(backend) +\n \"Currently supported backends include nglview and py3dmol\")\n \n else:\n raise RuntimeError('Visualization is only supported in Jupyter '\n 'Notebooks.')\n\n def _visualize_py3dmol(self, show_ports=False, color_scheme={}):\n \"\"\"Visualize the Compound using py3Dmol.\n\n Allows for visualization of a Compound within a Jupyter Notebook.\n\n Parameters\n ----------\n show_ports : bool, optional, default=False\n Visualize Ports in addition to Particles\n color_scheme : dict, optional\n Specify coloring for non-elemental particles\n keys are strings of the particle names\n values are strings of the colors\n i.e. {'_CGBEAD': 'blue'}\n\n\n Returns\n ------\n view : py3Dmol.view\n\n \"\"\"\n py3Dmol = import_('py3Dmol')\n remove_digits = lambda x: ''.join(i for i in x if not i.isdigit()\n or i == '_')\n\n modified_color_scheme = {}\n for name, color in color_scheme.items():\n # Py3dmol does some element string conversions, \n # first character is as-is, rest of the characters are lowercase\n new_name = name[0] + name[1:].lower() \n modified_color_scheme[new_name] = color\n modified_color_scheme[name] = color\n\n for particle in self.particles():\n particle.name = remove_digits(particle.name).upper()\n if not particle.name:\n particle.name = 'UNK'\n tmp_dir = tempfile.mkdtemp()\n self.save(os.path.join(tmp_dir, 'tmp.mol2'),\n show_ports=show_ports,\n overwrite=True)\n\n view = py3Dmol.view()\n view.addModel(open(os.path.join(tmp_dir, 'tmp.mol2'), 'r').read(),\n 'mol2', keepH=True)\n view.setStyle({'stick': {'radius': 0.2,\n 'color':'grey'},\n 'sphere': {'scale': 0.3,\n 'colorscheme':modified_color_scheme}})\n view.zoomTo()\n\n return view\n\n def _visualize_nglview(self, show_ports=False, color_scheme={}):\n \"\"\"Visualize the Compound using nglview.\n\n Allows for visualization of a Compound within a Jupyter Notebook.\n\n Parameters\n ----------\n show_ports : bool, optional, default=False\n Visualize Ports in addition to Particles\n \"\"\"\n nglview = import_('nglview')\n from mdtraj.geometry.sasa import _ATOMIC_RADII\n remove_digits = lambda x: ''.join(i for i in x if not i.isdigit()\n or i == '_')\n for particle in self.particles():\n particle.name = remove_digits(particle.name).upper()\n if not particle.name:\n particle.name = 'UNK'\n tmp_dir = tempfile.mkdtemp()\n self.save(os.path.join(tmp_dir, 'tmp.mol2'),\n show_ports=show_ports,\n overwrite=True)\n widget = nglview.show_file(os.path.join(tmp_dir, 'tmp.mol2'))\n widget.clear()\n widget.add_ball_and_stick(cylinderOnly=True)\n elements = set([particle.name for particle in self.particles()])\n scale = 50.0\n for element in elements:\n try:\n widget.add_ball_and_stick('_{}'.format(\n element.upper()), aspect_ratio=_ATOMIC_RADII[element.title()]**1.5 * scale)\n except KeyError:\n ids = [str(i) for i, particle in enumerate(self.particles())\n if particle.name == element]\n widget.add_ball_and_stick(\n '@{}'.format(\n ','.join(ids)),\n aspect_ratio=0.17**1.5 * scale,\n color='grey')\n if show_ports:\n widget.add_ball_and_stick('_VS',\n aspect_ratio=1.0, color='#991f00')\n return widget\n\n def update_coordinates(self, filename, update_port_locations=True):\n \"\"\"Update the coordinates of this Compound from a file.\n\n Parameters\n ----------\n filename : str\n Name of file from which to load coordinates. Supported file types\n are the same as those supported by load()\n update_port_locations : bool, optional, default=True\n Update the locations of Ports so that they are shifted along with\n their anchor particles. Note: This conserves the location of\n Ports with respect to the anchor Particle, but does not conserve\n the orientation of Ports with respect to the molecule as a whole.\n\n See Also\n --------\n load : Load coordinates from a file\n\n \"\"\"\n if update_port_locations:\n xyz_init = self.xyz\n self = load(filename, compound=self, coords_only=True)\n self._update_port_locations(xyz_init)\n else:\n self = load(filename, compound=self, coords_only=True)\n\n def _update_port_locations(self, initial_coordinates):\n \"\"\"Adjust port locations after particles have moved\n\n Compares the locations of Particles between 'self' and an array of\n reference coordinates. Shifts Ports in accordance with how far anchors\n have been moved. This conserves the location of Ports with respect to\n their anchor Particles, but does not conserve the orientation of Ports\n with respect to the molecule as a whole.\n\n Parameters\n ----------\n initial_coordinates : np.ndarray, shape=(n, 3), dtype=float\n Reference coordinates to use for comparing how far anchor Particles\n have shifted.\n\n \"\"\"\n particles = list(self.particles())\n for port in self.all_ports():\n if port.anchor:\n idx = particles.index(port.anchor)\n shift = particles[idx].pos - initial_coordinates[idx]\n port.translate(shift)\n\n def _kick(self):\n \"\"\"Slightly adjust all coordinates in a Compound\n\n Provides a slight adjustment to coordinates to kick them out of local\n energy minima.\n \"\"\"\n xyz_init = self.xyz\n for particle in self.particles():\n particle.pos += (np.random.rand(3,) - 0.5) / 100\n self._update_port_locations(xyz_init)\n\n warning_message = 'Please use Compound.energy_minimize()'\n\n @deprecated(warning_message)\n def energy_minimization(self, forcefield='UFF', steps=1000, **kwargs):\n self.energy_minimize(forcefield=forcefield, steps=steps, **kwargs)\n\n def energy_minimize(self, forcefield='UFF', steps=1000, **kwargs):\n \"\"\"Perform an energy minimization on a Compound\n\n Default beahvior utilizes Open Babel (http://openbabel.org/docs/dev/)\n to perform an energy minimization/geometry optimization on a\n Compound by applying a generic force field\n\n Can also utilize OpenMM (http://openmm.org/) to energy minimize\n after atomtyping a Compound using\n Foyer (https://github.com/mosdef-hub/foyer) to apply a forcefield\n XML file that contains valid SMARTS strings.\n\n This function is primarily intended to be used on smaller components,\n with sizes on the order of 10's to 100's of particles, as the energy\n minimization scales poorly with the number of particles.\n\n Parameters\n ----------\n steps : int, optional, default=1000\n The number of optimization iterations\n forcefield : str, optional, default='UFF'\n The generic force field to apply to the Compound for minimization.\n Valid options are 'MMFF94', 'MMFF94s', ''UFF', 'GAFF', and 'Ghemical'.\n Please refer to the Open Babel documentation (http://open-babel.\n readthedocs.io/en/latest/Forcefields/Overview.html) when considering\n your choice of force field.\n Utilizing OpenMM for energy minimization requires a forcefield\n XML file with valid SMARTS strings. Please refer to (http://docs.\n openmm.org/7.0.0/userguide/application.html#creating-force-fields)\n for more information.\n\n\n Keyword Arguments\n ------------\n algorithm : str, optional, default='cg'\n The energy minimization algorithm. Valid options are 'steep',\n 'cg', and 'md', corresponding to steepest descent, conjugate\n gradient, and equilibrium molecular dynamics respectively.\n For _energy_minimize_openbabel\n scale_bonds : float, optional, default=1\n Scales the bond force constant (1 is completely on).\n For _energy_minimize_openmm\n scale_angles : float, optional, default=1\n Scales the angle force constant (1 is completely on)\n For _energy_minimize_openmm\n scale_torsions : float, optional, default=1\n Scales the torsional force constants (1 is completely on)\n For _energy_minimize_openmm\n Note: Only Ryckaert-Bellemans style torsions are currently supported \n scale_nonbonded : float, optional, default=1\n Scales epsilon (1 is completely on)\n For _energy_minimize_openmm\n\n References\n ----------\n If using _energy_minimize_openmm(), please cite:\n .. [1] P. Eastman, M. S. Friedrichs, J. D. Chodera, R. J. Radmer,\n C. M. Bruns, J. P. Ku, K. A. Beauchamp, T. J. Lane,\n L.-P. Wang, D. Shukla, T. Tye, M. Houston, T. Stich,\n C. Klein, M. R. Shirts, and V. S. Pande.\n \"OpenMM 4: A Reusable, Extensible, Hardware Independent\n Library for High Performance Molecular Simulation.\"\n J. Chem. Theor. Comput. 9(1): 461-469. (2013).\n\n\n If using _energy_minimize_openbabel(), please cite:\n .. [1] O'Boyle, N.M.; Banck, M.; James, C.A.; Morley, C.;\n Vandermeersch, T.; Hutchison, G.R. \"Open Babel: An open\n chemical toolbox.\" (2011) J. Cheminf. 3, 33\n\n .. [2] Open Babel, version X.X.X http://openbabel.org, (installed\n Month Year)\n\n If using the 'MMFF94' force field please also cite the following:\n .. [3] T.A. Halgren, \"Merck molecular force field. I. Basis, form,\n scope, parameterization, and performance of MMFF94.\" (1996)\n J. Comput. Chem. 17, 490-519\n .. [4] T.A. Halgren, \"Merck molecular force field. II. MMFF94 van der\n Waals and electrostatic parameters for intermolecular\n interactions.\" (1996) J. Comput. Chem. 17, 520-552\n .. [5] T.A. Halgren, \"Merck molecular force field. III. Molecular\n geometries and vibrational frequencies for MMFF94.\" (1996)\n J. Comput. Chem. 17, 553-586\n .. [6] T.A. Halgren and R.B. Nachbar, \"Merck molecular force field.\n IV. Conformational energies and geometries for MMFF94.\" (1996)\n J. Comput. Chem. 17, 587-615\n .. [7] T.A. Halgren, \"Merck molecular force field. V. Extension of\n MMFF94 using experimental data, additional computational data,\n and empirical rules.\" (1996) J. Comput. Chem. 17, 616-641\n\n If using the 'MMFF94s' force field please cite the above along with:\n .. [8] T.A. Halgren, \"MMFF VI. MMFF94s option for energy minimization\n studies.\" (1999) J. Comput. Chem. 20, 720-729\n\n If using the 'UFF' force field please cite the following:\n .. [3] Rappe, A.K., Casewit, C.J., Colwell, K.S., Goddard, W.A. III,\n Skiff, W.M. \"UFF, a full periodic table force field for\n molecular mechanics and molecular dynamics simulations.\" (1992)\n J. Am. Chem. Soc. 114, 10024-10039\n\n If using the 'GAFF' force field please cite the following:\n .. [3] Wang, J., Wolf, R.M., Caldwell, J.W., Kollman, P.A., Case, D.A.\n \"Development and testing of a general AMBER force field\" (2004)\n J. Comput. Chem. 25, 1157-1174\n\n If using the 'Ghemical' force field please cite the following:\n .. [3] T. Hassinen and M. Perakyla, \"New energy terms for reduced\n protein models implemented in an off-lattice force field\" (2001)\n J. Comput. Chem. 22, 1229-1242\n\n\n\n \"\"\"\n tmp_dir = tempfile.mkdtemp()\n original = clone(self)\n self._kick()\n self.save(os.path.join(tmp_dir, 'un-minimized.mol2'))\n extension = os.path.splitext(forcefield)[-1]\n openbabel_ffs = ['MMFF94', 'MMFF94s', 'UFF', 'GAFF', 'Ghemical']\n if forcefield in openbabel_ffs:\n self._energy_minimize_openbabel(tmp_dir, forcefield=forcefield,\n steps=steps, **kwargs)\n elif extension == '.xml':\n self._energy_minimize_openmm(tmp_dir, forcefield_files=forcefield,\n forcefield_name=None,\n steps=steps, **kwargs)\n else:\n self._energy_minimize_openmm(tmp_dir, forcefield_files=None,\n forcefield_name=forcefield,\n steps=steps, **kwargs)\n\n self.update_coordinates(os.path.join(tmp_dir, 'minimized.pdb'))\n\n def _energy_minimize_openmm(\n self,\n tmp_dir,\n forcefield_files=None,\n forcefield_name=None,\n steps=1000,\n scale_bonds=1,\n scale_angles=1,\n scale_torsions=1,\n scale_nonbonded=1):\n \"\"\" Perform energy minimization using OpenMM\n\n Converts an mBuild Compound to a Parmed Structure,\n applies a forcefield using Foyer, and creates an OpenMM System.\n\n Parameters\n ----------\n forcefield_files : str or list of str, optional, default=None\n Forcefield files to load \n forcefield_name : str, optional, default=None\n Apply a named forcefield to the output file using the `foyer`\n package, e.g. 'oplsaa'. Forcefields listed here:\n https://github.com/mosdef-hub/foyer/tree/master/foyer/forcefields\n steps : int, optional, default=1000\n Number of energy minimization iterations\n scale_bonds : float, optional, default=1\n Scales the bond force constant (1 is completely on)\n scale_angles : float, optiona, default=1\n Scales the angle force constant (1 is completely on)\n scale_torsions : float, optional, default=1\n Scales the torsional force constants (1 is completely on)\n scale_nonbonded : float, optional, default=1\n Scales epsilon (1 is completely on)\n\n\n Notes\n -----\n Assumes a particular organization for the force groups\n (HarmonicBondForce, HarmonicAngleForce, RBTorsionForce, NonBondedForce)\n\n References\n ----------\n\n .. [1] P. Eastman, M. S. Friedrichs, J. D. Chodera, R. J. Radmer,\n C. M. Bruns, J. P. Ku, K. A. Beauchamp, T. J. Lane,\n L.-P. Wang, D. Shukla, T. Tye, M. Houston, T. Stich,\n C. Klein, M. R. Shirts, and V. S. Pande.\n \"OpenMM 4: A Reusable, Extensible, Hardware Independent\n Library for High Performance Molecular Simulation.\"\n J. Chem. Theor. Comput. 9(1): 461-469. (2013).\n\n\n\n \"\"\"\n foyer = import_('foyer')\n\n to_parmed = self.to_parmed()\n ff = foyer.Forcefield(forcefield_files=forcefield_files, name=forcefield_name)\n to_parmed = ff.apply(to_parmed)\n\n from simtk.openmm.app.simulation import Simulation\n from simtk.openmm.app.pdbreporter import PDBReporter\n from simtk.openmm.openmm import LangevinIntegrator\n import simtk.unit as u\n\n system = to_parmed.createSystem()\n integrator = LangevinIntegrator(298 * u.kelvin, 1 / u.picosecond,\n 0.002 * u.picoseconds)\n simulation = Simulation(to_parmed.topology, system, integrator)\n\n for force in system.getForces():\n if type(force).__name__ == \"HarmonicBondForce\":\n for bond_index in range(force.getNumBonds()):\n atom1, atom2, r0, k = force.getBondParameters(bond_index)\n force.setBondParameters(bond_index,\n atom1, atom2,\n r0, k * scale_bonds)\n force.updateParametersInContext(simulation.context)\n\n elif type(force).__name__ == \"HarmonicAngleForce\":\n for angle_index in range(force.getNumAngles()):\n atom1, atom2, atom3, r0, k = force.getAngleParameters(\n angle_index)\n force.setAngleParameters(angle_index,\n atom1, atom2, atom3,\n r0, k * scale_angles)\n force.updateParametersInContext(simulation.context)\n\n elif type(force).__name__ == \"RBTorsionForce\":\n for torsion_index in range(force.getNumTorsions()):\n atom1, atom2, atom3, atom4, c0, c1, c2, c3, c4, c5 = force.getTorsionParameters(\n torsion_index)\n force.setTorsionParameters(\n torsion_index,\n atom1,\n atom2,\n atom3,\n atom4,\n c0 * scale_torsions,\n c1 * scale_torsions,\n c2 * scale_torsions,\n c3 * scale_torsions,\n c4 * scale_torsions,\n c5 * scale_torsions)\n force.updateParametersInContext(simulation.context)\n\n elif type(force).__name__ == \"NonbondedForce\":\n for nb_index in range(force.getNumParticles()):\n charge, sigma, epsilon = force.getParticleParameters(\n nb_index)\n force.setParticleParameters(nb_index,\n charge, sigma,\n epsilon * scale_nonbonded)\n force.updateParametersInContext(simulation.context)\n\n elif type(force).__name__ == \"CMMotionRemover\":\n pass\n\n else:\n warn(\n 'OpenMM Force {} is '\n 'not currently supported in _energy_minimize_openmm. '\n 'This Force will not be updated!'.format(\n type(force).__name__))\n\n simulation.context.setPositions(to_parmed.positions)\n simulation.minimizeEnergy(maxIterations=steps)\n reporter = PDBReporter(os.path.join(tmp_dir, 'minimized.pdb'), 1)\n reporter.report(\n simulation,\n simulation.context.getState(\n getPositions=True))\n\n def _energy_minimize_openbabel(self, tmp_dir, steps=1000, algorithm='cg',\n forcefield='UFF'):\n \"\"\"Perform an energy minimization on a Compound\n\n Utilizes Open Babel (http://openbabel.org/docs/dev/) to perform an\n energy minimization/geometry optimization on a Compound by applying\n a generic force field.\n\n This function is primarily intended to be used on smaller components,\n with sizes on the order of 10's to 100's of particles, as the energy\n minimization scales poorly with the number of particles.\n\n Parameters\n ----------\n steps : int, optionl, default=1000\n The number of optimization iterations\n algorithm : str, optional, default='cg'\n The energy minimization algorithm. Valid options are 'steep',\n 'cg', and 'md', corresponding to steepest descent, conjugate\n gradient, and equilibrium molecular dynamics respectively.\n forcefield : str, optional, default='UFF'\n The generic force field to apply to the Compound for minimization.\n Valid options are 'MMFF94', 'MMFF94s', ''UFF', 'GAFF', and 'Ghemical'.\n Please refer to the Open Babel documentation (http://open-babel.\n readthedocs.io/en/latest/Forcefields/Overview.html) when considering\n your choice of force field.\n\n References\n ----------\n .. [1] O'Boyle, N.M.; Banck, M.; James, C.A.; Morley, C.;\n Vandermeersch, T.; Hutchison, G.R. \"Open Babel: An open\n chemical toolbox.\" (2011) J. Cheminf. 3, 33\n .. [2] Open Babel, version X.X.X http://openbabel.org, (installed\n Month Year)\n\n If using the 'MMFF94' force field please also cite the following:\n .. [3] T.A. Halgren, \"Merck molecular force field. I. Basis, form,\n scope, parameterization, and performance of MMFF94.\" (1996)\n J. Comput. Chem. 17, 490-519\n .. [4] T.A. Halgren, \"Merck molecular force field. II. MMFF94 van der\n Waals and electrostatic parameters for intermolecular\n interactions.\" (1996) J. Comput. Chem. 17, 520-552\n .. [5] T.A. Halgren, \"Merck molecular force field. III. Molecular\n geometries and vibrational frequencies for MMFF94.\" (1996)\n J. Comput. Chem. 17, 553-586\n .. [6] T.A. Halgren and R.B. Nachbar, \"Merck molecular force field.\n IV. Conformational energies and geometries for MMFF94.\" (1996)\n J. Comput. Chem. 17, 587-615\n .. [7] T.A. Halgren, \"Merck molecular force field. V. Extension of\n MMFF94 using experimental data, additional computational data,\n and empirical rules.\" (1996) J. Comput. Chem. 17, 616-641\n\n If using the 'MMFF94s' force field please cite the above along with:\n .. [8] T.A. Halgren, \"MMFF VI. MMFF94s option for energy minimization\n studies.\" (1999) J. Comput. Chem. 20, 720-729\n\n If using the 'UFF' force field please cite the following:\n .. [3] Rappe, A.K., Casewit, C.J., Colwell, K.S., Goddard, W.A. III,\n Skiff, W.M. \"UFF, a full periodic table force field for\n molecular mechanics and molecular dynamics simulations.\" (1992)\n J. Am. Chem. Soc. 114, 10024-10039\n\n If using the 'GAFF' force field please cite the following:\n .. [3] Wang, J., Wolf, R.M., Caldwell, J.W., Kollman, P.A., Case, D.A.\n \"Development and testing of a general AMBER force field\" (2004)\n J. Comput. Chem. 25, 1157-1174\n\n If using the 'Ghemical' force field please cite the following:\n .. [3] T. Hassinen and M. Perakyla, \"New energy terms for reduced\n protein models implemented in an off-lattice force field\" (2001)\n J. Comput. Chem. 22, 1229-1242\n \"\"\"\n\n openbabel = import_('openbabel')\n\n for particle in self.particles():\n try:\n get_by_symbol(particle.name)\n except KeyError:\n raise MBuildError(\"Element name {} not recognized. Cannot \"\n \"perform minimization.\"\n \"\".format(particle.name))\n\n obConversion = openbabel.OBConversion()\n obConversion.SetInAndOutFormats(\"mol2\", \"pdb\")\n mol = openbabel.OBMol()\n\n obConversion.ReadFile(mol, os.path.join(tmp_dir, \"un-minimized.mol2\"))\n\n ff = openbabel.OBForceField.FindForceField(forcefield)\n if ff is None:\n raise MBuildError(\"Force field '{}' not supported for energy \"\n \"minimization. Valid force fields are 'MMFF94', \"\n \"'MMFF94s', 'UFF', 'GAFF', and 'Ghemical'.\"\n \"\".format(forcefield))\n warn(\n \"Performing energy minimization using the Open Babel package. Please \"\n \"refer to the documentation to find the appropriate citations for \"\n \"Open Babel and the {} force field\".format(forcefield))\n ff.Setup(mol)\n if algorithm == 'steep':\n ff.SteepestDescent(steps)\n elif algorithm == 'md':\n ff.MolecularDynamicsTakeNSteps(steps, 300)\n elif algorithm == 'cg':\n ff.ConjugateGradients(steps)\n else:\n raise MBuildError(\"Invalid minimization algorithm. Valid options \"\n \"are 'steep', 'cg', and 'md'.\")\n ff.UpdateCoordinates(mol)\n\n obConversion.WriteFile(mol, os.path.join(tmp_dir, 'minimized.pdb'))\n\n def save(self, filename, show_ports=False, forcefield_name=None,\n forcefield_files=None, forcefield_debug=False, box=None,\n overwrite=False, residues=None, references_file=None,\n combining_rule='lorentz', foyerkwargs={}, **kwargs):\n \"\"\"Save the Compound to a file.\n\n Parameters\n ----------\n filename : str\n Filesystem path in which to save the trajectory. The extension or\n prefix will be parsed and control the format. Supported\n extensions are: 'hoomdxml', 'gsd', 'gro', 'top', 'lammps', 'lmp'\n show_ports : bool, optional, default=False\n Save ports contained within the compound.\n forcefield_files : str, optional, default=None\n Apply a forcefield to the output file using a forcefield provided\n by the `foyer` package.\n forcefield_name : str, optional, default=None\n Apply a named forcefield to the output file using the `foyer`\n package, e.g. 'oplsaa'. Forcefields listed here:\n https://github.com/mosdef-hub/foyer/tree/master/foyer/forcefields\n forcefield_debug : bool, optional, default=False\n Choose level of verbosity when applying a forcefield through `foyer`.\n Specifically, when missing atom types in the forcefield xml file,\n determine if the warning is condensed or verbose.\n box : mb.Box, optional, default=self.boundingbox (with buffer)\n Box information to be written to the output file. If 'None', a\n bounding box is used with 0.25nm buffers at each face to avoid\n overlapping atoms.\n overwrite : bool, optional, default=False\n Overwrite if the filename already exists\n residues : str of list of str\n Labels of residues in the Compound. Residues are assigned by\n checking against Compound.name.\n references_file : str, optional, default=None\n Specify a filename to write references for the forcefield that is\n to be applied. References are written in BiBTeX format.\n combining_rule : str, optional, default='lorentz'\n Specify the combining rule for nonbonded interactions. Only relevant\n when the `foyer` package is used to apply a forcefield. Valid\n options are 'lorentz' and 'geometric', specifying Lorentz-Berthelot\n and geometric combining rules respectively.\n \n\n Other Parameters\n ----------------\n foyerkwargs : dict, optional\n Specify keyword arguments when applying the foyer Forcefield\n ref_distance : float, optional, default=1.0\n Normalization factor used when saving to .gsd and .hoomdxml formats\n for converting distance values to reduced units.\n ref_energy : float, optional, default=1.0\n Normalization factor used when saving to .gsd and .hoomdxml formats\n for converting energy values to reduced units.\n ref_mass : float, optional, default=1.0\n Normalization factor used when saving to .gsd and .hoomdxml formats\n for converting mass values to reduced units.\n atom_style: str, default='full'\n Defines the style of atoms to be saved in a LAMMPS data file. The following atom\n styles are currently supported: 'full', 'atomic', 'charge', 'molecular'\n see http://lammps.sandia.gov/doc/atom_style.html for more\n information on atom styles.\n\n See Also\n --------\n formats.gsdwrite.write_gsd : Write to GSD format\n formats.hoomdxml.write_hoomdxml : Write to Hoomd XML format\n formats.lammpsdata.write_lammpsdata : Write to LAMMPS data format\n\n \"\"\"\n extension = os.path.splitext(filename)[-1]\n if extension == '.xyz':\n traj = self.to_trajectory(show_ports=show_ports)\n traj.save(filename)\n return\n\n # Savers supported by mbuild.formats\n savers = {'.hoomdxml': write_hoomdxml,\n '.gsd': write_gsd,\n '.lammps': write_lammpsdata,\n '.lmp': write_lammpsdata}\n\n try:\n saver = savers[extension]\n except KeyError:\n saver = None\n\n if os.path.exists(filename) and not overwrite:\n raise IOError('{0} exists; not overwriting'.format(filename))\n\n structure = self.to_parmed(box=box, residues=residues,\n show_ports=show_ports)\n # Apply a force field with foyer if specified\n if forcefield_name or forcefield_files:\n foyer = import_('foyer')\n ff = foyer.Forcefield(forcefield_files=forcefield_files,\n name=forcefield_name, debug=forcefield_debug)\n structure = ff.apply(structure, references_file=references_file,\n **foyerkwargs)\n structure.combining_rule = combining_rule\n\n total_charge = sum([atom.charge for atom in structure])\n if round(total_charge, 4) != 0.0:\n warn('System is not charge neutral. Total charge is {}.'\n ''.format(total_charge))\n\n # Provide a warning if rigid_ids are not sequential from 0\n if self.contains_rigid:\n unique_rigid_ids = sorted(set([\n p.rigid_id for p in self.rigid_particles()]))\n if max(unique_rigid_ids) != len(unique_rigid_ids) - 1:\n warn(\"Unique rigid body IDs are not sequential starting from zero.\")\n\n if saver: # mBuild supported saver.\n if extension in ['.gsd', '.hoomdxml']:\n kwargs['rigid_bodies'] = [\n p.rigid_id for p in self.particles()]\n saver(filename=filename, structure=structure, **kwargs)\n else: # ParmEd supported saver.\n structure.save(filename, overwrite=overwrite, **kwargs)\n\n def translate(self, by):\n \"\"\"Translate the Compound by a vector\n\n Parameters\n ----------\n by : np.ndarray, shape=(3,), dtype=float\n\n \"\"\"\n new_positions = _translate(self.xyz_with_ports, by)\n self.xyz_with_ports = new_positions\n\n def translate_to(self, pos):\n \"\"\"Translate the Compound to a specific position\n\n Parameters\n ----------\n pos : np.ndarray, shape=3(,), dtype=float\n\n \"\"\"\n self.translate(pos - self.center)\n\n def rotate(self, theta, around):\n \"\"\"Rotate Compound around an arbitrary vector.\n\n Parameters\n ----------\n theta : float\n The angle by which to rotate the Compound, in radians.\n around : np.ndarray, shape=(3,), dtype=float\n The vector about which to rotate the Compound.\n\n \"\"\"\n new_positions = _rotate(self.xyz_with_ports, theta, around)\n self.xyz_with_ports = new_positions\n\n def spin(self, theta, around):\n \"\"\"Rotate Compound in place around an arbitrary vector.\n\n Parameters\n ----------\n theta : float\n The angle by which to rotate the Compound, in radians.\n around : np.ndarray, shape=(3,), dtype=float\n The axis about which to spin the Compound.\n\n \"\"\"\n around = np.asarray(around).reshape(3)\n center_pos = self.center\n self.translate(-center_pos)\n self.rotate(theta, around)\n self.translate(center_pos)\n\n # Interface to Trajectory for reading/writing .pdb and .mol2 files.\n # -----------------------------------------------------------------\n def from_trajectory(self, traj, frame=-1, coords_only=False):\n \"\"\"Extract atoms and bonds from a md.Trajectory.\n\n Will create sub-compounds for every chain if there is more than one\n and sub-sub-compounds for every residue.\n\n Parameters\n ----------\n traj : mdtraj.Trajectory\n The trajectory to load.\n frame : int, optional, default=-1 (last)\n The frame to take coordinates from.\n coords_only : bool, optional, default=False\n Only read coordinate information\n\n \"\"\"\n if coords_only:\n if traj.n_atoms != self.n_particles:\n raise ValueError('Number of atoms in {traj} does not match'\n ' {self}'.format(**locals()))\n atoms_particles = zip(traj.topology.atoms,\n self.particles(include_ports=False))\n if None in self._particles(include_ports=False):\n raise ValueError('Some particles are None')\n for mdtraj_atom, particle in atoms_particles:\n particle.pos = traj.xyz[frame, mdtraj_atom.index]\n return\n\n atom_mapping = dict()\n for chain in traj.topology.chains:\n if traj.topology.n_chains > 1:\n chain_compound = Compound()\n self.add(chain_compound, 'chain[$]')\n else:\n chain_compound = self\n for res in chain.residues:\n for atom in res.atoms:\n new_atom = Particle(name=str(atom.name),\n pos=traj.xyz[frame, atom.index])\n chain_compound.add(\n new_atom, label='{0}[$]'.format(\n atom.name))\n atom_mapping[atom] = new_atom\n\n for mdtraj_atom1, mdtraj_atom2 in traj.topology.bonds:\n atom1 = atom_mapping[mdtraj_atom1]\n atom2 = atom_mapping[mdtraj_atom2]\n self.add_bond((atom1, atom2))\n\n if np.any(traj.unitcell_lengths) and np.any(traj.unitcell_lengths[0]):\n self.periodicity = traj.unitcell_lengths[0]\n else:\n self.periodicity = np.array([0., 0., 0.])\n\n def to_trajectory(self, show_ports=False, chains=None,\n residues=None, box=None):\n \"\"\"Convert to an md.Trajectory and flatten the compound.\n\n Parameters\n ----------\n show_ports : bool, optional, default=False\n Include all port atoms when converting to trajectory.\n chains : mb.Compound or list of mb.Compound\n Chain types to add to the topology\n residues : str of list of str\n Labels of residues in the Compound. Residues are assigned by\n checking against Compound.name.\n box : mb.Box, optional, default=self.boundingbox (with buffer)\n Box information to be used when converting to a `Trajectory`.\n If 'None', a bounding box is used with a 0.5nm buffer in each\n dimension. to avoid overlapping atoms, unless `self.periodicity`\n is not None, in which case those values are used for the\n box lengths.\n\n Returns\n -------\n trajectory : md.Trajectory\n\n See also\n --------\n _to_topology\n\n \"\"\"\n atom_list = [particle for particle in self.particles(show_ports)]\n\n top = self._to_topology(atom_list, chains, residues)\n\n # Coordinates.\n xyz = np.ndarray(shape=(1, top.n_atoms, 3), dtype='float')\n for idx, atom in enumerate(atom_list):\n xyz[0, idx] = atom.pos\n\n # Unitcell information.\n unitcell_angles = [90.0, 90.0, 90.0]\n if box is None:\n unitcell_lengths = np.empty(3)\n for dim, val in enumerate(self.periodicity):\n if val:\n unitcell_lengths[dim] = val\n else:\n unitcell_lengths[dim] = self.boundingbox.lengths[dim] + 0.5\n else:\n unitcell_lengths = box.lengths\n unitcell_angles = box.angles\n\n return md.Trajectory(xyz, top, unitcell_lengths=unitcell_lengths,\n unitcell_angles=unitcell_angles)\n\n def _to_topology(self, atom_list, chains=None, residues=None):\n \"\"\"Create a mdtraj.Topology from a Compound.\n\n Parameters\n ----------\n atom_list : list of mb.Compound\n Atoms to include in the topology\n chains : mb.Compound or list of mb.Compound\n Chain types to add to the topology\n residues : str of list of str\n Labels of residues in the Compound. Residues are assigned by\n checking against Compound.name.\n\n Returns\n -------\n top : mdtraj.Topology\n\n See Also\n --------\n mdtraj.Topology : Details on the mdtraj Topology object\n\n \"\"\"\n from mdtraj.core.topology import Topology\n\n if isinstance(chains, string_types):\n chains = [chains]\n if isinstance(chains, (list, set)):\n chains = tuple(chains)\n\n if isinstance(residues, string_types):\n residues = [residues]\n if isinstance(residues, (list, set)):\n residues = tuple(residues)\n top = Topology()\n atom_mapping = {}\n\n default_chain = top.add_chain()\n default_residue = top.add_residue('RES', default_chain)\n\n compound_residue_map = dict()\n atom_residue_map = dict()\n compound_chain_map = dict()\n atom_chain_map = dict()\n\n for atom in atom_list:\n # Chains\n if chains:\n if atom.name in chains:\n current_chain = top.add_chain()\n compound_chain_map[atom] = current_chain\n else:\n for parent in atom.ancestors():\n if chains and parent.name in chains:\n if parent not in compound_chain_map:\n current_chain = top.add_chain()\n compound_chain_map[parent] = current_chain\n current_residue = top.add_residue(\n 'RES', current_chain)\n break\n else:\n current_chain = default_chain\n else:\n current_chain = default_chain\n atom_chain_map[atom] = current_chain\n\n # Residues\n if residues:\n if atom.name in residues:\n current_residue = top.add_residue(atom.name, current_chain)\n compound_residue_map[atom] = current_residue\n else:\n for parent in atom.ancestors():\n if residues and parent.name in residues:\n if parent not in compound_residue_map:\n current_residue = top.add_residue(\n parent.name, current_chain)\n compound_residue_map[parent] = current_residue\n break\n else:\n current_residue = default_residue\n else:\n if chains:\n try: # Grab the default residue from the custom chain.\n current_residue = next(current_chain.residues)\n except StopIteration: # Add the residue to the current chain\n current_residue = top.add_residue('RES', current_chain)\n else: # Grab the default chain's default residue\n current_residue = default_residue\n atom_residue_map[atom] = current_residue\n\n # Add the actual atoms\n try:\n elem = get_by_symbol(atom.name)\n except KeyError:\n elem = get_by_symbol(\"VS\")\n at = top.add_atom(atom.name, elem, atom_residue_map[atom])\n at.charge = atom.charge\n atom_mapping[atom] = at\n\n # Remove empty default residues.\n chains_to_remove = [\n chain for chain in top.chains if chain.n_atoms == 0]\n residues_to_remove = [res for res in top.residues if res.n_atoms == 0]\n for chain in chains_to_remove:\n top._chains.remove(chain)\n for res in residues_to_remove:\n for chain in top.chains:\n try:\n chain._residues.remove(res)\n except ValueError: # Already gone.\n pass\n\n for atom1, atom2 in self.bonds():\n # Ensure that both atoms are part of the compound. This becomes an\n # issue if you try to convert a sub-compound to a topology which is\n # bonded to a different subcompound.\n if all(a in atom_mapping.keys() for a in [atom1, atom2]):\n top.add_bond(atom_mapping[atom1], atom_mapping[atom2])\n return top\n\n def from_parmed(self, structure, coords_only=False):\n \"\"\"Extract atoms and bonds from a pmd.Structure.\n\n Will create sub-compounds for every chain if there is more than one\n and sub-sub-compounds for every residue.\n\n Parameters\n ----------\n structure : pmd.Structure\n The structure to load.\n coords_only : bool\n Set preexisting atoms in compound to coordinates given by structure.\n\n \"\"\"\n if coords_only:\n if len(structure.atoms) != self.n_particles:\n raise ValueError(\n 'Number of atoms in {structure} does not match'\n ' {self}'.format(\n **locals()))\n atoms_particles = zip(structure.atoms,\n self.particles(include_ports=False))\n if None in self._particles(include_ports=False):\n raise ValueError('Some particles are None')\n for parmed_atom, particle in atoms_particles:\n particle.pos = np.array([parmed_atom.xx,\n parmed_atom.xy,\n parmed_atom.xz]) / 10\n return\n\n atom_mapping = dict()\n chain_id = None\n chains = defaultdict(list)\n for residue in structure.residues:\n chains[residue.chain].append(residue)\n\n for chain, residues in chains.items():\n if len(chains) > 1:\n chain_compound = Compound()\n self.add(chain_compound, chain_id)\n else:\n chain_compound = self\n for residue in residues:\n for atom in residue.atoms:\n pos = np.array([atom.xx, atom.xy, atom.xz]) / 10\n new_atom = Particle(name=str(atom.name), pos=pos)\n chain_compound.add(\n new_atom, label='{0}[$]'.format(\n atom.name))\n atom_mapping[atom] = new_atom\n\n for bond in structure.bonds:\n atom1 = atom_mapping[bond.atom1]\n atom2 = atom_mapping[bond.atom2]\n self.add_bond((atom1, atom2))\n\n if structure.box is not None:\n # Convert from A to nm\n self.periodicity = 0.1 * structure.box[0:3]\n else:\n self.periodicity = np.array([0., 0., 0.])\n\n def to_parmed(self, box=None, title='', residues=None, show_ports=False,\n infer_residues=False):\n \"\"\"Create a ParmEd Structure from a Compound.\n\n Parameters\n ----------\n box : mb.Box, optional, default=self.boundingbox (with buffer)\n Box information to be used when converting to a `Structure`.\n If 'None', a bounding box is used with 0.25nm buffers at\n each face to avoid overlapping atoms, unless `self.periodicity`\n is not None, in which case those values are used for the\n box lengths.\n title : str, optional, default=self.name\n Title/name of the ParmEd Structure\n residues : str of list of str\n Labels of residues in the Compound. Residues are assigned by\n checking against Compound.name.\n show_ports : boolean, optional, default=False\n Include all port atoms when converting to a `Structure`.\n infer_residues : bool, optional, default=False\n Attempt to assign residues based on names of children.\n\n Returns\n -------\n parmed.structure.Structure\n ParmEd Structure object converted from self\n\n See Also\n --------\n parmed.structure.Structure : Details on the ParmEd Structure object\n\n \"\"\"\n structure = pmd.Structure()\n structure.title = title if title else self.name\n atom_mapping = {} # For creating bonds below\n guessed_elements = set()\n\n if not residues and infer_residues:\n residues = list(set([child.name for child in self.children]))\n\n if isinstance(residues, string_types):\n residues = [residues]\n if isinstance(residues, (list, set)):\n residues = tuple(residues)\n\n default_residue = pmd.Residue('RES')\n port_residue = pmd.Residue('PRT')\n compound_residue_map = dict()\n atom_residue_map = dict()\n\n for atom in self.particles(include_ports=show_ports):\n if atom.port_particle:\n current_residue = port_residue\n atom_residue_map[atom] = current_residue\n\n if current_residue not in structure.residues:\n structure.residues.append(current_residue)\n\n pmd_atom = pmd.Atom(atomic_number=0, name='VS',\n mass=0, charge=0)\n pmd_atom.xx, pmd_atom.xy, pmd_atom.xz = atom.pos * 10 # Angstroms\n\n else:\n if residues and atom.name in residues:\n current_residue = pmd.Residue(atom.name)\n atom_residue_map[atom] = current_residue\n compound_residue_map[atom] = current_residue\n elif residues:\n for parent in atom.ancestors():\n if residues and parent.name in residues:\n if parent not in compound_residue_map:\n current_residue = pmd.Residue(parent.name)\n compound_residue_map[parent] = current_residue\n atom_residue_map[atom] = current_residue\n break\n else: # Did not find specified residues in ancestors.\n current_residue = default_residue\n atom_residue_map[atom] = current_residue\n else:\n current_residue = default_residue\n atom_residue_map[atom] = current_residue\n\n if current_residue not in structure.residues:\n structure.residues.append(current_residue)\n\n atomic_number = None\n name = ''.join(char for char in atom.name if not char.isdigit())\n try:\n atomic_number = AtomicNum[atom.name.capitalize()]\n except KeyError:\n element = element_by_name(atom.name.capitalize())\n if name not in guessed_elements:\n warn(\n 'Guessing that \"{}\" is element: \"{}\"'.format(\n atom, element))\n guessed_elements.add(name)\n else:\n element = atom.name.capitalize()\n\n atomic_number = atomic_number or AtomicNum[element]\n mass = Mass[element]\n pmd_atom = pmd.Atom(atomic_number=atomic_number, name=atom.name,\n mass=mass, charge=atom.charge)\n pmd_atom.xx, pmd_atom.xy, pmd_atom.xz = atom.pos * 10 # Angstroms\n\n residue = atom_residue_map[atom]\n structure.add_atom(pmd_atom, resname=residue.name,\n resnum=residue.idx)\n\n atom_mapping[atom] = pmd_atom\n\n structure.residues.claim()\n\n for atom1, atom2 in self.bonds():\n bond = pmd.Bond(atom_mapping[atom1], atom_mapping[atom2])\n structure.bonds.append(bond)\n # pad box with .25nm buffers\n if box is None:\n box = self.boundingbox\n box_vec_max = box.maxs.tolist()\n box_vec_min = box.mins.tolist()\n for dim, val in enumerate(self.periodicity):\n if val:\n box_vec_max[dim] = val\n box_vec_min[dim] = 0.0\n if not val:\n box_vec_max[dim] += 0.25\n box_vec_min[dim] -= 0.25\n box.mins = np.asarray(box_vec_min)\n box.maxs = np.asarray(box_vec_max)\n\n box_vector = np.empty(6)\n if box.angles is not None:\n box_vector[3:6] = box.angles\n else:\n box_vector[3] = box_vector[4] = box_vector[5] = 90.0\n for dim in range(3):\n box_vector[dim] = box.lengths[dim] * 10\n structure.box = box_vector\n return structure\n\n def to_networkx(self, names_only=False):\n \"\"\"Create a NetworkX graph representing the hierarchy of a Compound.\n\n Parameters\n ----------\n names_only : bool, optional, default=False Store only the names of the\n compounds in the graph. When set to False, the default behavior,\n the nodes are the compounds themselves.\n\n Returns\n -------\n G : networkx.DiGraph\n \"\"\"\n nx = import_('networkx')\n\n nodes = list()\n edges = list()\n if names_only:\n nodes.append(self.name)\n else:\n nodes.append(self)\n nodes, edges = self._iterate_children(nodes, edges, names_only=names_only)\n\n graph = nx.DiGraph()\n graph.add_nodes_from(nodes)\n graph.add_edges_from(edges)\n return graph\n\n def _iterate_children(self, nodes, edges, names_only=False):\n if not self.children:\n return nodes, edges\n for child in self.children:\n if names_only:\n nodes.append(child.name)\n edges.append([child.parent.name, child.name])\n else:\n nodes.append(child)\n edges.append([child.parent, child])\n nodes, edges = child._iterate_children(nodes, edges, names_only=names_only)\n return nodes, edges\n\n def to_intermol(self, molecule_types=None): # pragma: no cover\n \"\"\"Create an InterMol system from a Compound.\n\n Parameters\n ----------\n molecule_types : list or tuple of subclasses of Compound\n\n Returns\n -------\n intermol_system : intermol.system.System\n\n \"\"\"\n from intermol.atom import Atom as InterMolAtom\n from intermol.molecule import Molecule\n from intermol.system import System\n import simtk.unit as u\n\n if isinstance(molecule_types, list):\n molecule_types = tuple(molecule_types)\n elif molecule_types is None:\n molecule_types = (type(self),)\n intermol_system = System()\n\n last_molecule_compound = None\n for atom_index, atom in enumerate(self.particles()):\n for parent in atom.ancestors():\n # Don't want inheritance via isinstance().\n if type(parent) in molecule_types:\n # Check if we have encountered this molecule type before.\n if parent.name not in intermol_system.molecule_types:\n self._add_intermol_molecule_type(\n intermol_system, parent)\n if parent != last_molecule_compound:\n last_molecule_compound = parent\n last_molecule = Molecule(name=parent.name)\n intermol_system.add_molecule(last_molecule)\n break\n else:\n # Should never happen if molecule_types only contains\n # type(self)\n raise ValueError(\n 'Found an atom {} that is not part of any of '\n 'the specified molecule types {}'.format(\n atom, molecule_types))\n\n # Add the actual intermol atoms.\n intermol_atom = InterMolAtom(atom_index + 1, name=atom.name,\n residue_index=1, residue_name='RES')\n intermol_atom.position = atom.pos * u.nanometers\n last_molecule.add_atom(intermol_atom)\n return intermol_system\n\n @staticmethod\n def _add_intermol_molecule_type(intermol_system, parent): # pragma: no cover\n \"\"\"Create a molecule type for the parent and add bonds. \"\"\"\n from intermol.moleculetype import MoleculeType\n from intermol.forces.bond import Bond as InterMolBond\n\n molecule_type = MoleculeType(name=parent.name)\n intermol_system.add_molecule_type(molecule_type)\n\n for index, parent_atom in enumerate(parent.particles()):\n parent_atom.index = index + 1\n\n for atom1, atom2 in parent.bonds():\n intermol_bond = InterMolBond(atom1.index, atom2.index)\n molecule_type.bonds.add(intermol_bond)\n\n def __getitem__(self, selection):\n if isinstance(selection, integer_types):\n return list(self.particles())[selection]\n if isinstance(selection, string_types):\n if selection not in self.labels:\n raise MBuildError('{}[\\'{}\\'] does not exist.'.format(self.name,selection))\n return self.labels.get(selection)\n\n def __repr__(self):\n descr = list('<')\n descr.append(self.name + ' ')\n\n if self.children:\n descr.append('{:d} particles, '.format(self.n_particles))\n if any(self.periodicity):\n descr.append('periodicity: {}, '.format(self.periodicity))\n else:\n descr.append('non-periodic, ')\n else:\n descr.append('pos=({: .4f},{: .4f},{: .4f}), '.format(*self.pos))\n\n descr.append('{:d} bonds, '.format(self.n_bonds))\n\n descr.append('id: {}>'.format(id(self)))\n return ''.join(descr)\n\n def _clone(self, clone_of=None, root_container=None):\n \"\"\"A faster alternative to deepcopying.\n\n Does not resolve circular dependencies. This should be safe provided\n you never try to add the top of a Compound hierarchy to a\n sub-Compound. Clones compound hierarchy only, not the bonds.\n \"\"\"\n if root_container is None:\n root_container = self\n if clone_of is None:\n clone_of = dict()\n\n # If this compound has already been cloned, return that.\n if self in clone_of:\n return clone_of[self]\n\n # Otherwise we make a new clone.\n cls = self.__class__\n newone = cls.__new__(cls)\n\n # Remember that we're cloning the new one of self.\n clone_of[self] = newone\n\n newone.name = deepcopy(self.name)\n newone.periodicity = deepcopy(self.periodicity)\n newone._pos = deepcopy(self._pos)\n newone.port_particle = deepcopy(self.port_particle)\n newone._check_if_contains_rigid_bodies = deepcopy(\n self._check_if_contains_rigid_bodies)\n newone._contains_rigid = deepcopy(self._contains_rigid)\n newone._rigid_id = deepcopy(self._rigid_id)\n newone._charge = deepcopy(self._charge)\n if hasattr(self, 'index'):\n newone.index = deepcopy(self.index)\n\n if self.children is None:\n newone.children = None\n else:\n newone.children = OrderedSet()\n # Parent should be None initially.\n newone.parent = None\n newone.labels = OrderedDict()\n newone.referrers = set()\n newone.bond_graph = None\n\n # Add children to clone.\n if self.children:\n for child in self.children:\n newchild = child._clone(clone_of, root_container)\n newone.children.add(newchild)\n newchild.parent = newone\n\n # Copy labels, except bonds with atoms outside the hierarchy.\n if self.labels:\n for label, compound in self.labels.items():\n if not isinstance(compound, list):\n newone.labels[label] = compound._clone(\n clone_of, root_container)\n compound.referrers.add(clone_of[compound])\n else:\n # compound is a list of compounds, so we create an empty\n # list, and add the clones of the original list elements.\n newone.labels[label] = []\n for subpart in compound:\n newone.labels[label].append(\n subpart._clone(clone_of, root_container))\n # Referrers must have been handled already, or the will\n # be handled\n\n return newone\n\n def _clone_bonds(self, clone_of=None):\n newone = clone_of[self]\n for c1, c2 in self.bonds():\n try:\n newone.add_bond((clone_of[c1], clone_of[c2]))\n except KeyError:\n raise MBuildError(\n \"Cloning failed. Compound contains bonds to \"\n \"Particles outside of its containment hierarchy.\")\n\n\nParticle = Compound\n" ]
[ [ "numpy.expand_dims", "numpy.abs", "numpy.isfinite", "numpy.asarray", "numpy.squeeze", "numpy.linalg.norm", "numpy.ndarray", "numpy.mean", "numpy.zeros", "numpy.any", "numpy.random.rand", "numpy.array", "numpy.where", "numpy.empty" ] ]
FrancescoNegri/federated-learning-gmm
[ "4351b5a4c9ff30df2c6d2244029f73ae02492c76" ]
[ "src/sampling.py" ]
[ "import math\nimport numpy as np\n# from torchvision import datasets, transforms\n\ndef sample_iid(dataset, n_clients):\n n_samples = int(len(dataset) / n_clients)\n clients_groups, all_idxs = {}, [i for i in range(len(dataset))]\n \n for i in range(n_clients):\n clients_groups[i] = set(np.random.choice(all_idxs, n_samples, replace=False))\n all_idxs = list(set(all_idxs) - clients_groups[i])\n\n return clients_groups\n\ndef sample_non_iid(dataset, n_clients, shards_per_client = 2):\n\n # shards per client\n shards_per_client = shards_per_client\n n_shards = n_clients * shards_per_client\n n_samples = math.floor(len(dataset) / n_shards)\n\n shards_idxs = [i for i in range(n_shards)]\n clients_groups = {i: np.array([]) for i in range(n_clients)}\n idxs = np.arange(n_shards * n_samples)\n print(idxs)\n\n for client_idx in range(n_clients):\n # Pick randomly the n shards for this client\n client_shards_idxs = np.random.choice(shards_idxs, shards_per_client, replace=False)\n # Remove the selected n shards from the available ones\n shards_idxs = list(set(shards_idxs) - set(client_shards_idxs))\n \n for shard_idx in set(client_shards_idxs):\n clients_groups[client_idx] = np.concatenate(\n (clients_groups[client_idx], idxs[shard_idx * n_samples : (shard_idx + 1) * n_samples]), \n axis=0\n )\n\n clients_groups[client_idx] = clients_groups[client_idx].astype(int)\n\n return clients_groups\n\n# def mnist_noniid_unequal(dataset, num_users):\n# \"\"\"\n# Sample non-I.I.D client data from MNIST dataset s.t clients\n# have unequal amount of data\n# :param dataset:\n# :param num_users:\n# :returns a dict of clients with each clients assigned certain\n# number of training imgs\n# \"\"\"\n# # 60,000 training imgs --> 50 imgs/shard X 1200 shards\n# num_shards, num_imgs = 1200, 50\n# idx_shard = [i for i in range(num_shards)]\n# dict_users = {i: np.array([]) for i in range(num_users)}\n# idxs = np.arange(num_shards*num_imgs)\n# labels = dataset.train_labels.numpy()\n\n# # sort labels\n# idxs_labels = np.vstack((idxs, labels))\n# idxs_labels = idxs_labels[:, idxs_labels[1, :].argsort()]\n# idxs = idxs_labels[0, :]\n\n# # Minimum and maximum shards assigned per client:\n# min_shard = 1\n# max_shard = 30\n\n# # Divide the shards into random chunks for every client\n# # s.t the sum of these chunks = num_shards\n# random_shard_size = np.random.randint(min_shard, max_shard+1,\n# size=num_users)\n# random_shard_size = np.around(random_shard_size /\n# sum(random_shard_size) * num_shards)\n# random_shard_size = random_shard_size.astype(int)\n\n# # Assign the shards randomly to each client\n# if sum(random_shard_size) > num_shards:\n\n# for i in range(num_users):\n# # First assign each client 1 shard to ensure every client has\n# # atleast one shard of data\n# rand_set = set(np.random.choice(idx_shard, 1, replace=False))\n# idx_shard = list(set(idx_shard) - rand_set)\n# for rand in rand_set:\n# dict_users[i] = np.concatenate(\n# (dict_users[i], idxs[rand*num_imgs:(rand+1)*num_imgs]),\n# axis=0)\n\n# random_shard_size = random_shard_size-1\n\n# # Next, randomly assign the remaining shards\n# for i in range(num_users):\n# if len(idx_shard) == 0:\n# continue\n# shard_size = random_shard_size[i]\n# if shard_size > len(idx_shard):\n# shard_size = len(idx_shard)\n# rand_set = set(np.random.choice(idx_shard, shard_size,\n# replace=False))\n# idx_shard = list(set(idx_shard) - rand_set)\n# for rand in rand_set:\n# dict_users[i] = np.concatenate(\n# (dict_users[i], idxs[rand*num_imgs:(rand+1)*num_imgs]),\n# axis=0)\n# else:\n\n# for i in range(num_users):\n# shard_size = random_shard_size[i]\n# rand_set = set(np.random.choice(idx_shard, shard_size,\n# replace=False))\n# idx_shard = list(set(idx_shard) - rand_set)\n# for rand in rand_set:\n# dict_users[i] = np.concatenate(\n# (dict_users[i], idxs[rand*num_imgs:(rand+1)*num_imgs]),\n# axis=0)\n\n# if len(idx_shard) > 0:\n# # Add the leftover shards to the client with minimum images:\n# shard_size = len(idx_shard)\n# # Add the remaining shard to the client with lowest data\n# k = min(dict_users, key=lambda x: len(dict_users.get(x)))\n# rand_set = set(np.random.choice(idx_shard, shard_size,\n# replace=False))\n# idx_shard = list(set(idx_shard) - rand_set)\n# for rand in rand_set:\n# dict_users[k] = np.concatenate(\n# (dict_users[k], idxs[rand*num_imgs:(rand+1)*num_imgs]),\n# axis=0)\n\n# return dict_users" ]
[ [ "numpy.concatenate", "numpy.arange", "numpy.array", "numpy.random.choice" ] ]
yuyasugano/technical
[ "9bb60b961cb993d6c355793499dbc72c245f3ab2" ]
[ "main.py" ]
[ "#!/usr/bin/python\nimport csv\nimport time\nimport json\nimport requests\nimport numpy as np\nimport pandas as pd\nfrom datetime import datetime\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nimport mpl_finance\n\nheaders = {'Content-Type': 'application/json'}\napi_url_base = 'https://public.bitbank.cc'\npair = 'btc_jpy'\nperiod = '1day'\n\ntoday = \"{0:%Y%m%d}\".format(datetime.today())\nyear = \"2018\"\n\ndef api_ohlcv(timestamp):\n api_url = '{0}/{1}/candlestick/{2}/{3}'.format(api_url_base, pair, period, timestamp)\n response = requests.get(api_url, headers=headers)\n\n if response.status_code == 200:\n ohlcv = json.loads(response.content.decode('utf-8'))['data']['candlestick'][0]['ohlcv']\n return ohlcv\n else:\n return None\n\ndef SMA(df):\n df1 = df.copy()\n df1[\"ma25\"] = df1.close.rolling(window=25).mean()\n df1[\"ma75\"] = df1.close.rolling(window=75).mean()\n df1[\"diff\"] = df1.ma25 - df1.ma75\n df1[\"unixtime\"] = [datetime.timestamp(t) for t in df.index]\n\n # line and Moving Average\n xdate = [x.date() for x in df1.index]\n plt.figure(figsize=(15,5))\n plt.plot(xdate, df1.close,label=\"original\")\n plt.plot(xdate, df1.ma75,label=\"75days\")\n plt.plot(xdate, df1.ma25,label=\"25days\")\n plt.xlim(xdate[0], xdate[-1])\n plt.grid()\n\n # Cross points\n for i in range(1, len(df1)):\n if df1.iloc[i-1][\"diff\"] < 0 and df1.iloc[i][\"diff\"] > 0:\n print(\"{}:GOLDEN CROSS\".format(xdate[i]))\n plt.scatter(xdate[i], df1.iloc[i][\"ma25\"], marker=\"o\", s=100, color=\"b\")\n plt.scatter(xdate[i], df1.iloc[i][\"close\"], marker=\"o\", s=50, color=\"b\", alpha=0.5)\n\n if df1.iloc[i-1][\"diff\"] > 0 and df1.iloc[i][\"diff\"] < 0:\n print(\"{}:DEAD CROSS\".format(xdate[i]))\n plt.scatter(xdate[i], df1.iloc[i][\"ma25\"], marker=\"o\", s=100, color=\"r\")\n plt.scatter(xdate[i], df1.iloc[i][\"close\"], marker=\"o\", s=50, color=\"r\", alpha=0.5)\n plt.legend()\n\ndef Bollinger(df, window=25):\n df1 = df.copy()\n df1[\"ma\"] = df1.close.rolling(window=window).mean()\n df1[\"sigma\"] = df1.close.rolling(window=window).std()\n df1[\"ma+2sigma\"] = df1.ma + 2*df1.sigma\n df1[\"ma-2sigma\"] = df1.ma - 2*df1.sigma\n df1[\"diffplus\"] = df1.close - df1[\"ma+2sigma\"] \n df1[\"diffminus\"] = df1[\"ma-2sigma\"] - df1.close\n s_up = df1[df1[\"diffplus\"] > 0][\"close\"]\n s_down = df1[df1[\"diffminus\"] > 0][\"close\"]\n\n xdate = [x.date() for x in df1.index]\n plt.figure(figsize=(15,5))\n plt.grid()\n plt.xlim(xdate[0], xdate[-1])\n plt.scatter(s_up.index, s_up.values,marker=\"x\", s=100, color=\"blue\")\n plt.scatter(s_down.index, s_down.values,marker=\"x\", s=100, color=\"red\")\n plt.plot(xdate, df1.close.values, label=\"original\", color=\"b\", alpha=0.9)\n plt.plot(xdate, df1.ma.values, label=\"{}ma\".format(window))\n plt.fill_between(xdate, df1.ma-df1.sigma, df1.ma+df1.sigma, color=\"red\", alpha=0.7, label=\"$1\\sigma$\")\n plt.fill_between(xdate, df1.ma-2*df1.sigma, df1.ma+2*df1.sigma, color=\"red\", alpha=0.3, label=\"$2\\sigma$\")\n plt.fill_between(xdate, df1.ma-3*df1.sigma, df1.ma+3*df1.sigma, color=\"red\", alpha=0.1, label=\"$3\\sigma$\") \n plt.legend()\n\ndef MACD(df):\n df1 = df.copy()\n df1[\"MACD\"] = df1.close.ewm(span=12, min_periods=1).mean() - df1.close.ewm(span=26, min_periods=1).mean()\n df1[\"signal\"] = df1.MACD.ewm(span=9, min_periods=1).mean()\n df1[\"macd_diff\"] = df1[\"MACD\"] - df1[\"signal\"]\n\n xdate = [x.date() for x in df1.index]\n plt.figure(figsize=(15,10))\n # plot the original\n plt.subplot(211)\n plt.plot(xdate, df1.close,label=\"original\")\n plt.xlim(xdate[0], xdate[-1])\n plt.legend()\n plt.grid()\n # plot MACD\n plt.subplot(212)\n plt.title(\"MACD\")\n plt.plot(xdate, df1.MACD, label=\"MACD\")\n plt.plot(xdate, df1.signal, label=\"signal\")\n plt.xlim(xdate[0], xdate[-1])\n plt.legend()\n plt.grid(True)\n\n # Cross points\n for i in range(1, len(df1)):\n if df1.iloc[i-1][\"macd_diff\"] < 0 and df1.iloc[i][\"macd_diff\"] > 0:\n print(\"{}:GOLDEN CROSS\".format(xdate[0]))\n plt.scatter(xdate[i], df1.iloc[i][\"MACD\"], marker=\"o\", s=100, color=\"b\")\n\n if df1.iloc[i-1][\"macd_diff\"] > 0 and df1.iloc[i][\"macd_diff\"] < 0:\n print(\"{}:DEAD CROSS\".format(xdate[0]))\n plt.scatter(xdate[i], df1.iloc[i][\"MACD\"], marker=\"o\", s=100, color=\"r\")\n\ndef plot_RSI(df, window):\n df1 = df.copy()\n diff = df1.close.diff(periods=1).values\n xdate = [x.date() for x in df1.index]\n RSI = []\n for i in range(window+1, len(xdate)):\n neg = 0\n pos = 0\n for value in diff[i-window:i+1]:\n if value > 0:\n pos += value\n if value < 0:\n neg += value\n pos_ave = pos/window\n neg_ave = np.abs(neg/window)\n rsi = pos_ave/(pos_ave+neg_ave)*100\n RSI.append(rsi)\n # draw RSI figure\n plt.plot(xdate[window+1:], RSI, label = \"RSI {}\".format(window), lw=2.5, alpha=0.6)\n plt.xlim(xdate[window+1], xdate[-1])\n plt.ylim(0,100)\n plt.legend()\n\ndef RSI(df):\n df1 = df.copy()\n xdate = [x.date() for x in df1.index]\n plt.figure(figsize=(15,10))\n # plot the original\n plt.subplot(211)\n plt.plot(xdate, df1.close,label=\"original\")\n plt.xlim(xdate[0], xdate[-1])\n plt.legend()\n plt.grid()\n # plot RSI\n plt.subplot(212)\n plt.grid()\n plt.title(\"RSI\")\n plot_RSI(df1, window=9)\n plot_RSI(df1, window=22)\n plot_RSI(df1, window=42)\n plt.fill_between(xdate, np.ones(len(xdate))*30, color=\"blue\", alpha=0.2)\n plt.fill_between(xdate, np.ones(len(xdate))*70, np.ones(len(xdate))*100, color=\"red\", alpha=0.2)\n plt.plot(xdate, np.ones(len(xdate))*30, color=\"blue\", linestyle=\"dotted\")\n plt.plot(xdate, np.ones(len(xdate))*70, color=\"red\", linestyle=\"dotted\")\n plt.show()\n\ndef Ichimoku(df):\n df1 = df.copy()\n max_9 = df1.high.rolling(window=9).max()\n min_9 = df1.high.rolling(window=9).min()\n df1[\"tenkan\"] = (max_9+min_9)/2\n df1[\"base\"] = (df1.high.rolling(window=26).max()+df1.high.rolling(window=26).min())/2\n xdate = [x.date() for x in df1.index]\n plt.figure(figsize=(15,5)) \n plt.grid()\n plt.plot(xdate, df1.close, color=\"b\", lw=1, linestyle=\"dotted\", label=\"original\")\n plt.plot(xdate, df1.tenkan, label=\"Conversion line\")\n plt.plot(xdate, df1.base, label=\"Base line\")\n senkou1 = ((df1.tenkan+df1.base)/2).iloc[:-26]\n senkou2 = ((df1.high.rolling(window=52).max()+df1.high.rolling(window=52).min())/2).iloc[:-26]\n plt.fill_between(xdate[26:], senkou1, senkou2, color=\"blue\", alpha=0.2, label=\"Cloud\")\n plt.legend(loc=4)\n plt.xlim(xdate[0], xdate[-1])\n\ndef main():\n ohlc = api_ohlcv(year)\n open, high, low, close, volume, timestamp = [],[],[],[],[],[]\n\n for i in ohlc:\n open.append(int(i[0]))\n high.append(int(i[1]))\n low.append(int(i[2]))\n close.append(int(i[3]))\n volume.append(float(i[4]))\n time_str = str(i[5])\n timestamp.append(datetime.fromtimestamp(int(time_str[:10])).strftime('%Y/%m/%d %H:%M:%M'))\n\n date_time_index = pd.to_datetime(timestamp) # convert to DateTimeIndex type\n df = pd.DataFrame({'open': open, 'high': high, 'low': low, 'close': close}, index=date_time_index) # volume is not contained\n # adjustment for JST if required\n df.index += pd.offsets.Hour(9)\n\n print(df.head(5))\n # SMA(df)\n # Bollinger(df, window=25)\n # MACD(df)\n # RSI(df)\n # Ichimoku(df)\n\nif __name__ == '__main__':\n main()\n\n" ]
[ [ "matplotlib.pyplot.legend", "pandas.to_datetime", "pandas.offsets.Hour", "numpy.abs", "matplotlib.pyplot.scatter", "matplotlib.pyplot.title", "matplotlib.pyplot.ylim", "pandas.DataFrame", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlim", "matplotlib.pyplot.subplot", "matplotlib.pyplot.fill_between", "matplotlib.pyplot.grid", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
pietrobarbiero/pytorch_explainer
[ "beb7d6f40acab1d0bf79bf8dfb0425c7dd4ea174" ]
[ "tests/test_logic_layer.py" ]
[ "import unittest\n\nimport torch\nfrom pytorch_lightning import seed_everything\nfrom torch.nn.functional import one_hot\n\nimport torch_explain as te\nfrom torch_explain.logic.metrics import test_explanation, complexity, concept_consistency, formula_consistency\nfrom torch_explain.logic.nn import entropy, psi\nfrom torch_explain.nn.functional import prune_equal_fanin\n\n\nclass TestTemplateObject(unittest.TestCase):\n def test_psi_explain_class_binary(self):\n for i in range(1):\n seed_everything(i)\n\n # Problem 1\n x = torch.tensor([\n [0, 0],\n [0, 1],\n [1, 0],\n [1, 1],\n ], dtype=torch.float)\n y = torch.tensor([0, 1, 1, 0], dtype=torch.float).unsqueeze(1)\n\n layers = [\n torch.nn.Linear(x.shape[1], 10),\n torch.nn.Sigmoid(),\n torch.nn.Linear(10, 5),\n torch.nn.Sigmoid(),\n torch.nn.Linear(5, 1),\n torch.nn.Sigmoid(),\n ]\n model = torch.nn.Sequential(*layers)\n\n optimizer = torch.optim.AdamW(model.parameters(), lr=0.01)\n loss_form = torch.nn.BCELoss()\n model.train()\n for epoch in range(6001):\n optimizer.zero_grad()\n y_pred = model(x)\n loss = loss_form(y_pred, y) + 0.000001 * te.nn.functional.l1_loss(model)\n loss.backward()\n optimizer.step()\n\n model = prune_equal_fanin(model, epoch, prune_epoch=1000, k=2)\n\n # compute accuracy\n if epoch % 100 == 0:\n accuracy = y.eq(y_pred>0.5).sum().item() / y.size(0)\n print(f'Epoch {epoch}: loss {loss:.4f} train accuracy: {accuracy:.4f}')\n\n y1h = one_hot(y.squeeze().long())\n\n explanation = psi.explain_class(model, x)\n explanation_complexity = complexity(explanation)\n cc = concept_consistency([explanation])\n fc = formula_consistency([explanation])\n print(explanation)\n print(explanation_complexity)\n print(cc)\n print(fc)\n assert explanation == '(feature0000000000 & ~feature0000000001) | (feature0000000001 & ~feature0000000000)'\n accuracy, preds = test_explanation(explanation, x, y1h, target_class=1)\n print(f'Accuracy: {100*accuracy:.2f}%')\n assert accuracy == 1\n\n return\n\n def test_entropy_multi_target(self):\n\n # eye, nose, window, wheel, hand, radio\n x = torch.tensor([\n [0, 0, 0, 0],\n [0, 1, 0, 0],\n [1, 0, 0, 0],\n [1, 1, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 1],\n [0, 0, 1, 0],\n [0, 0, 1, 1],\n ], dtype=torch.float)\n # human, car\n y = torch.tensor([ # 1, 0, 0, 1], dtype=torch.long)\n [0, 1, 0, 1],\n [1, 0, 0, 1],\n [1, 0, 0, 1],\n [0, 1, 0, 1],\n [0, 1, 0, 1],\n [0, 1, 1, 0],\n [0, 1, 1, 0],\n [0, 1, 1, 0],\n ], dtype=torch.float)\n y1h = y # one_hot(y)\n\n layers = [\n te.nn.EntropyLinear(x.shape[1], 20, n_classes=y1h.shape[1], temperature=0.3),\n torch.nn.LeakyReLU(),\n torch.nn.Linear(20, 10),\n torch.nn.LeakyReLU(),\n torch.nn.Linear(10, 1),\n ]\n model = torch.nn.Sequential(*layers)\n\n optimizer = torch.optim.AdamW(model.parameters(), lr=0.001)\n loss_form = torch.nn.BCEWithLogitsLoss()\n model.train()\n\n concept_names = ['x1', 'x2', 'x3', 'x4']\n target_class_names = ['y', '¬y', 'z', '¬z']\n\n for epoch in range(7001):\n # train step\n optimizer.zero_grad()\n y_pred = model(x).squeeze(-1)\n loss = loss_form(y_pred, y) + 0.0001 * te.nn.functional.entropy_logic_loss(model)\n loss.backward()\n optimizer.step()\n\n # compute accuracy\n if epoch % 100 == 0:\n accuracy = (y_pred>0.5).eq(y).sum().item() / (y.size(0) * y.size(1))\n print(f'Epoch {epoch}: loss {loss:.4f} train accuracy: {accuracy:.4f}')\n\n # extract logic formulas\n for target_class in range(y.shape[1]):\n explanation_class_i, exp_raw = entropy.explain_class(model, x, y1h, x, y1h, target_class,\n concept_names=concept_names)\n accuracy_i, preds = test_explanation(exp_raw, x, y1h, target_class)\n if explanation_class_i: explanation_class_i = explanation_class_i.replace('&', '∧').replace('|', '∨').replace('~', '¬')\n explanation_class_i = f'∀x: {explanation_class_i} ↔ {target_class_names[target_class]}'\n\n print(f'\\tExplanation class {target_class} (acc. {accuracy_i*100:.2f}): {explanation_class_i}')\n print()\n\n return\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "torch.nn.Sequential", "torch.nn.BCELoss", "torch.tensor", "torch.nn.Linear", "torch.nn.BCEWithLogitsLoss", "torch.nn.Sigmoid", "torch.nn.LeakyReLU" ] ]
suvadeep-iitb/finetune_alexnet_with_tensorflow
[ "16d88f2a870ef4c0a1febc1807419f68f0481752" ]
[ "multiclass2multilabel.py" ]
[ "import pickle\nimport numpy as np\nfrom scipy.sparse import csr_matrix\n\nimport sys, os\n\n\ndef get_label2id(label_map_file):\n label_map = open(label_map_file).readlines()\n label_map = [l.split() for l in label_map]\n\n num_labels = len(label_map)\n label2id = {label_map[i][0]: i for i in range(num_labels)}\n\n parents = []\n for lbl in label_map:\n parents += lbl[1:]\n parents = sorted(list(set(parents)))\n\n for parent in parents:\n if parent not in label2id:\n label2id[parent] = num_labels\n num_labels += 1\n\n return label2id\n\n\ndef multiclass2multilabel(source_file, save_file, label2id):\n images, labels, ids = pickle.load(open(source_file, 'rb'))\n num_images = 0\n for image_mat in images:\n num_images += image_mat.shape[0]\n assert num_images == len(labels)\n assert num_images == len(ids)\n\n label_map = open(label_map_file).readlines()\n label_map = [l.split() for l in label_map]\n\n num_labels = len(label2id)\n assert num_labels == len(set(label2id.values()))\n\n print('# of images: '+str(num_images))\n print('# of labels: '+str(num_labels))\n\n row_ind = []\n col_ind = []\n data = []\n for i, label in enumerate(labels):\n assert label_map[label][0] == ids[i].split('_')[0]\n for l in label_map[label]:\n row_ind.append(i)\n col_ind.append(label2id[l])\n data.append(np.float32(1.0))\n label_mat = csr_matrix((data,(row_ind, col_ind)), shape=(num_images, num_labels), dtype=np.float32)\n\n pickle.dump((images, label_mat, (ids, label2id)), open(save_file, 'wb'), protocol=pickle.HIGHEST_PROTOCOL)\n\n\nif __name__ == '__main__':\n source_train_file = sys.argv[1]\n source_val_file = sys.argv[2]\n source_test_file = sys.argv[3]\n save_train_file = sys.argv[4]\n save_val_file = sys.argv[5]\n save_test_file = sys.argv[6]\n label_map_file = sys.argv[7]\n\n label2id = get_label2id(label_map_file)\n multiclass2multilabel(source_train_file, save_train_file, label2id)\n multiclass2multilabel(source_val_file, save_val_file, label2id)\n multiclass2multilabel(source_test_file, save_test_file, label2id)\n\n" ]
[ [ "numpy.float32", "scipy.sparse.csr_matrix" ] ]
clowdcap/mysite
[ "2fd6a2f69cfc58ef012138340ae86d8896bff647" ]
[ "News/folium.py" ]
[ "import folium\nfrom geopy.geocoders import Nominatim\nimport pandas as pd\n\nmapa = folium.Map(\n zoom_start=14,\n location=[-25.371781,-49.4141514],\n control_scale=True)\n\nler = pd.read_excel('./tables/locais.xlsx')\nler.drop_duplicates(inplace=True)\n\nlocator = Nominatim(user_agent=\"myGeocoder\")\nlocation = locator.geocode(ler['ENDERECO'])\n\nfor index, linha in ler.iterrows():\n float(linha['LAT'])\n float(linha['LON'])\n folium.Marker([linha['LAT'], linha[\"LON\"]],\n popup=f\"\"\"Zona: {linha[\"ZONA\"]}\\n\\n\\n{linha[\"RUA\"]} \"\"\").add_to(mapa)\n\nmapa.save(\"../templates/folium.html\")\n" ]
[ [ "pandas.read_excel" ] ]
morzh/stylegan2-encoder-pytorch
[ "ae3635030a0d4d88e2bd2aba301ea4b4c2dd7894" ]
[ "src/perceptual_sim_training/models/dist_model.py" ]
[ "\nfrom __future__ import absolute_import\n\nimport sys\nsys.path.append('..')\nsys.path.append('.')\nimport numpy as np\nimport torch\nfrom torch import nn\nimport os\nfrom collections import OrderedDict\nfrom torch.autograd import Variable\nimport itertools\nfrom .base_model import BaseModel\nfrom scipy.ndimage import zoom\nimport fractions\nimport functools\nimport skimage.transform\nfrom IPython import embed\n\nfrom . import networks_basic as networks\n# from PerceptualSimilarity.util import util\nfrom util import util\n\nclass DistModel(BaseModel):\n def name(self):\n return self.model_name\n\n def initialize(self, model='net-lin', net='alex', pnet_rand=False, pnet_tune=False, model_path=None, colorspace='Gray', use_gpu=True, printNet=False, spatial=False, spatial_shape=None, spatial_order=1, spatial_factor=None, is_train=False, batch_size=128, train_loss='ranked', lr=.0001, beta1=0.5, version='0.1', args=None):\n '''\n INPUTS\n model - ['net-lin'] for linearly calibrated network\n ['net'] for off-the-shelf network\n ['fourier-lin'] for fourier based transforms\n ['L2'] for L2 distance\n ['SSIM'] for ssim\n net - ['squeeze','alex','vgg', 'dct', 'fft']\n model_path - if None, will look in weights/[NET_NAME].pth\n colorspace - ['Lab','RGB','Gray'] colorspace for all loss functions\n use_gpu - bool - whether or not to use a GPU\n printNet - bool - whether or not to print network architecture out\n spatial - bool - whether to output an array containing varying distances across spatial dimensions\n spatial_shape - if given, output spatial shape. if None then spatial shape is determined automatically via spatial_factor (see below).\n spatial_factor - if given, specifies upsampling factor relative to the largest spatial extent of a convolutional layer. if None then resized to size of input images.\n spatial_order - spline order of filter for upsampling in spatial mode, by default 1 (bilinear).\n is_train - bool - [True] for training mode\n train_loss - 'ranked' for BCERanked loss (original paper)\n 'weightedsigmoid' for weighted sigmoid loss\n lr - float - initial learning rate\n beta1 - float - initial momentum term for adam\n version - 0.1 for latest, 0.0 was original\n '''\n BaseModel.initialize(self, use_gpu=use_gpu)\n\n self.model = model\n self.net = net\n self.use_gpu = use_gpu\n self.is_train = is_train\n self.spatial = spatial\n self.spatial_shape = spatial_shape\n self.spatial_order = spatial_order\n self.spatial_factor = spatial_factor\n self.batch_size = batch_size\n self.train_loss_option = train_loss\n self.weight_clamp = True # cliclamp negative weights during training\n\n self.model_name = '%s [%s]'%(model,net)\n if(self.model == 'net-lin' and self.net in ['squeeze','alex','vgg']): # pretrained net + linear layer\n self.net = networks.PNetLin(use_gpu=use_gpu,pnet_rand=pnet_rand, pnet_tune=pnet_tune, pnet_type=net, colorspace=colorspace,use_dropout=True,spatial=spatial,version=version)\n kw = {}\n if not use_gpu:\n kw['map_location'] = 'cpu'\n if(model_path is None):\n import inspect\n # model_path = './PerceptualSimilarity/weights/v%s/%s.pth'%(version,net)\n model_path = os.path.abspath(os.path.join(inspect.getfile(self.initialize), '..', '..', 'weights/v%s/%s.pth'%(version,net)))\n\n if(not is_train):\n print('Loading model from: %s'%model_path)\n self.net.load_state_dict(torch.load(model_path, **kw))\n\n elif(self.model in ['watson', 'Watson']):\n self.net = networks.Watson(transform=self.net, use_gpu=use_gpu, is_train=is_train, colorspace=colorspace)\n self.model_name = self.net.model_name\n self.weight_clamp = False\n\n elif(self.model=='net'): # pretrained network\n assert not self.spatial, 'spatial argument not supported yet for uncalibrated networks'\n self.net = networks.PNet(use_gpu=use_gpu,pnet_type=net,colorspace=colorspace)\n self.is_fake_net = True\n elif(self.model in ['L2','l2']):\n self.net = networks.L2(use_gpu=use_gpu,colorspace=colorspace) # not really a network, only for testing\n self.model_name = 'L2'\n elif(self.model in ['L1','l1']):\n self.net = networks.L1(use_gpu=use_gpu,colorspace=colorspace) # not really a network, only for testing\n self.model_name = 'L1'\n elif(self.model in ['edge','Edge']):\n self.net = networks.Edge(use_gpu=use_gpu,colorspace=colorspace, args=args)\n self.model_name = 'Edge'\n elif(self.model in ['DSSIM','dssim','SSIM','ssim']):\n self.net = networks.DSSIM(use_gpu=use_gpu,colorspace=colorspace)\n self.model_name = 'SSIM'\n self.batch_size = 1\n elif(self.model in ['Adaptive', 'adaptive']):\n self.net = networks.RobustLoss(use_gpu=use_gpu,colorspace=colorspace)\n self.model_name = 'Adaptive Robust Loss Function'\n else:\n raise ValueError(\"Model [%s] not recognized.\" % self.model)\n\n self.parameters = list(self.net.parameters())\n\n if self.is_train: # training mode\n # extra network on top to go from distances (d0,d1) => predicted human judgment (h*)\n if self.train_loss_option == 'ranked':\n self.trainLoss = networks.BCERankingLoss(use_gpu=use_gpu)\n elif self.train_loss_option == 'weightedsigmoid':\n self.trainLoss = networks.WeightedSigmoidLoss(use_gpu=use_gpu)\n else:\n raise Exception('No other train loss implemented')\n self.parameters+=self.trainLoss.parameters\n self.lr = lr\n self.old_lr = lr\n self.optimizer_net = torch.optim.Adam(self.parameters, lr=lr, betas=(beta1, 0.999), weight_decay=0.)\n else: # test mode\n self.net.eval()\n\n if(printNet):\n print('---------- Networks initialized -------------')\n networks.print_network(self.net)\n print('-----------------------------------------------')\n\n def forward_pair(self,in1,in2,retPerLayer=False):\n if(retPerLayer):\n return self.net.forward(in1,in2, retPerLayer=True)\n else:\n return self.net.forward(in1,in2)\n\n def forward(self, in0, in1, retNumpy=True):\n ''' Function computes the distance between image patches in0 and in1\n INPUTS\n in0, in1 - torch.Tensor object of shape Nx3xXxY - image patch scaled to [-1,1]\n retNumpy - [False] to return as torch.Tensor, [True] to return as numpy array\n OUTPUT\n computed distances between in0 and in1\n '''\n\n self.input_ref = in0\n self.input_p0 = in1\n\n if(self.use_gpu):\n self.input_ref = self.input_ref.cuda()\n self.input_p0 = self.input_p0.cuda()\n\n self.var_ref = Variable(self.input_ref,requires_grad=True)\n self.var_p0 = Variable(self.input_p0,requires_grad=True)\n\n self.d0 = self.forward_pair(self.var_ref, self.var_p0)\n self.loss_total = self.d0\n\n def convert_output(d0):\n if(retNumpy):\n ans = d0.cpu().data.numpy()\n if not self.spatial:\n ans = ans.flatten()\n else:\n assert(ans.shape[0] == 1 and len(ans.shape) == 4)\n return ans[0,...].transpose([1, 2, 0]) # Reshape to usual numpy image format: (height, width, channels)\n return ans\n else:\n return d0\n\n if self.spatial:\n L = [convert_output(x) for x in self.d0]\n spatial_shape = self.spatial_shape\n if spatial_shape is None:\n if(self.spatial_factor is None):\n spatial_shape = (in0.size()[2],in0.size()[3])\n else:\n spatial_shape = (max([x.shape[0] for x in L])*self.spatial_factor, max([x.shape[1] for x in L])*self.spatial_factor)\n \n L = [skimage.transform.resize(x, spatial_shape, order=self.spatial_order, mode='edge') for x in L]\n \n L = np.mean(np.concatenate(L, 2) * len(L), 2)\n return L\n else:\n return convert_output(self.d0)\n\n # ***** TRAINING FUNCTIONS *****\n def optimize_parameters(self):\n self.forward_train()\n self.optimizer_net.zero_grad()\n self.backward_train()\n self.optimizer_net.step()\n if self.weight_clamp:\n self.clamp_weights()\n\n def clamp_weights(self):\n for module in self.net.modules():\n if(hasattr(module, 'weight') and module.kernel_size==(1,1)):\n module.weight.data = torch.clamp(module.weight.data,min=0)\n\n def set_input(self, data):\n self.input_ref = data['ref']\n self.input_p0 = data['p0']\n self.input_p1 = data['p1']\n self.input_judge = data['judge']\n\n if(self.use_gpu):\n self.input_ref = self.input_ref.cuda()\n self.input_p0 = self.input_p0.cuda()\n self.input_p1 = self.input_p1.cuda()\n self.input_judge = self.input_judge.cuda()\n\n self.var_ref = Variable(self.input_ref,requires_grad=True)\n self.var_p0 = Variable(self.input_p0,requires_grad=True)\n self.var_p1 = Variable(self.input_p1,requires_grad=True)\n\n def forward_train(self): # run forward pass\n self.d0 = self.forward_pair(self.var_ref, self.var_p0)\n self.d1 = self.forward_pair(self.var_ref, self.var_p1)\n self.acc_r = self.compute_accuracy(self.d0,self.d1,self.input_judge)\n\n # var_judge\n self.var_judge = Variable(1.*self.input_judge).view(self.d0.size())\n\n self.loss_total = self.trainLoss.forward(self.d0, self.d1, self.var_judge*2.-1.)\n return self.loss_total\n\n def backward_train(self):\n torch.mean(self.loss_total).backward()\n\n def compute_accuracy(self,d0,d1,judge):\n ''' d0, d1 are Variables, judge is a Tensor '''\n d1_lt_d0 = (d1<d0).cpu().data.numpy().flatten()\n judge_per = judge.cpu().numpy().flatten()\n return d1_lt_d0*judge_per + (1-d1_lt_d0)*(1-judge_per)\n\n def get_current_errors(self):\n retDict = OrderedDict([('loss_total', self.loss_total.data.cpu().numpy()),\n ('acc_r', self.acc_r)])\n\n for key in retDict.keys():\n retDict[key] = np.mean(retDict[key])\n\n return retDict\n\n def get_current_visuals(self):\n zoom_factor = 256/self.var_ref.data.size()[2]\n\n ref_img = util.tensor2im(self.var_ref.data)\n p0_img = util.tensor2im(self.var_p0.data)\n p1_img = util.tensor2im(self.var_p1.data)\n\n ref_img_vis = zoom(ref_img,[zoom_factor, zoom_factor, 1],order=0)\n p0_img_vis = zoom(p0_img,[zoom_factor, zoom_factor, 1],order=0)\n p1_img_vis = zoom(p1_img,[zoom_factor, zoom_factor, 1],order=0)\n\n return OrderedDict([('ref', ref_img_vis),\n ('p0', p0_img_vis),\n ('p1', p1_img_vis)])\n\n def save(self, path, label):\n # save network\n if self.model in ['fourier-lin', 'fourier', 'deep-lin', 'deep']:\n save_filename = '%s_net_.pth' % (label)\n self.net.net.save_weights(os.path.join(path, save_filename))\n else:\n self.save_network(self.net, path, '', label)\n\n # save loss network\n if self.train_loss_option == 'ranked':\n self.save_network(self.trainLoss.net, path, 'rank', label)\n\n def update_learning_rate(self,nepoch_decay):\n lrd = self.lr / nepoch_decay\n lr = self.old_lr - lrd\n\n for param_group in self.optimizer_net.param_groups:\n param_group['lr'] = lr\n\n print('update lr [%s] decay: %f -> %f' % (type,self.old_lr, lr))\n self.old_lr = lr\n\n\n\ndef score_2afc_dataset(data_loader,func):\n ''' Function computes Two Alternative Forced Choice (2AFC) score using\n distance function 'func' in dataset 'data_loader'\n INPUTS\n data_loader - CustomDatasetDataLoader object - contains a TwoAFCDataset inside\n func - callable distance function - calling d=func(in0,in1) should take 2\n pytorch tensors with shape Nx3xXxY, and return numpy array of length N\n OUTPUTS\n [0] - 2AFC score in [0,1], fraction of time func agrees with human evaluators\n [1] - dictionary with following elements\n d0s,d1s - N arrays containing distances between reference patch to perturbed patches \n gts - N array in [0,1], preferred patch selected by human evaluators\n (closer to \"0\" for left patch p0, \"1\" for right patch p1,\n \"0.6\" means 60pct people preferred right patch, 40pct preferred left)\n scores - N array in [0,1], corresponding to what percentage function agreed with humans\n CONSTS\n N - number of test triplets in data_loader\n '''\n\n d0s = []\n d1s = []\n gts = []\n\n # bar = pb.ProgressBar(max_value=data_loader.load_data().__len__())\n for (i,data) in enumerate(data_loader.load_data()):\n if len(data['ref']) > 1:\n d0s+=func(data['p0'], data['ref']).tolist()\n d1s+=func(data['p1'], data['ref']).tolist()\n gts+=data['judge'].cpu().numpy().flatten().tolist()\n else:\n d0s.append(func(data['p0'], data['ref']).item())\n d1s.append(func(data['p1'], data['ref']).item())\n gts.append(data['judge'].cpu().numpy().flatten()[0])\n # bar.update(i)\n\n d0s = np.array(d0s)\n d1s = np.array(d1s)\n gts = np.array(gts)\n scores = (d0s<d1s)*(1.-gts) + (d1s<d0s)*gts + (d1s==d0s)*.5\n\n return(np.mean(scores), dict(d0s=d0s,d1s=d1s,gts=gts,scores=scores))\n\ndef score_jnd_dataset(data_loader,func):\n ''' Function computes JND score using distance function 'func' in dataset 'data_loader'\n INPUTS\n data_loader - CustomDatasetDataLoader object - contains a JNDDataset inside\n func - callable distance function - calling d=func(in0,in1) should take 2\n pytorch tensors with shape Nx3xXxY, and return numpy array of length N\n OUTPUTS\n [0] - JND score in [0,1], mAP score (area under precision-recall curve)\n [1] - dictionary with following elements\n ds - N array containing distances between two patches shown to human evaluator\n sames - N array containing fraction of people who thought the two patches were identical\n CONSTS\n N - number of test triplets in data_loader\n '''\n\n ds = []\n gts = []\n\n # bar = pb.ProgressBar(max_value=data_loader.load_data().__len__())\n for (i,data) in enumerate(data_loader.load_data()):\n ds+=func(data['p0'],data['p1']).tolist()\n gts+=data['same'].cpu().numpy().flatten().tolist()\n # bar.update(i)\n\n sames = np.array(gts)\n ds = np.array(ds)\n\n sorted_inds = np.argsort(ds)\n ds_sorted = ds[sorted_inds]\n sames_sorted = sames[sorted_inds]\n\n TPs = np.cumsum(sames_sorted)\n FPs = np.cumsum(1-sames_sorted)\n FNs = np.sum(sames_sorted)-TPs\n\n precs = TPs/(TPs+FPs)\n recs = TPs/(TPs+FNs)\n score = util.voc_ap(recs,precs)\n\n return(score, dict(ds=ds,sames=sames))\n" ]
[ [ "torch.optim.Adam", "torch.mean", "torch.load", "scipy.ndimage.zoom", "numpy.cumsum", "numpy.concatenate", "numpy.mean", "numpy.argsort", "torch.clamp", "numpy.array", "numpy.sum", "torch.autograd.Variable" ] ]
MrDavidYu/img_utils
[ "707f07390b539c822dafd625febc2aa3887832c8" ]
[ "cv_utils.py" ]
[ "import cv2\r\nimport math\r\nimport numpy as np\r\nfrom PIL import Image\r\nimport pytesseract\r\n\r\n\"\"\"\r\nList of helper, enhancement and transformation funcs commonly used for OCR and\r\nCV processing given image or bounding box information. N.B. bbox format in\r\nTensorflow Object Detection format, i.e. [y_min, x_min, y_max, x_max]\r\n\r\nList of functions:\r\n\r\napply_OCR(): Applies Tesseract OCR library to extract a string from image\r\nauto_rotate_img(): Applies Canny transform and open-cv Hough Line transform\r\n to auto rotate image.\r\ncrop_bbox(): Crop a bounding box region from an image.\r\ndraw_bbox(): Draws a single bounding box on image.\r\ndraw_bboxes(): Draws multiple bounding boxes on image.\r\nexpand_bbox(): Expand the bounding box passed in by a prespecified factor.\r\n\"\"\"\r\n\r\n\r\ndef apply_OCR(img, img_num):\r\n \"\"\"\r\n Applies Tesseract OCR library to extract a string from image.\r\n Modified code from https://github.com/stefbo/ulr-ocr\r\n Input: Image\r\n Output: String\r\n \"\"\"\r\n ret_string = \"test\"\r\n SCREEN_WIDTH = 128\r\n SCREEN_HEIGHT = 64\r\n\r\n if img is None:\r\n print(\"Reading image failed, img is None\")\r\n exit(1)\r\n\r\n # Extract the center part from the image. Quality of the OCR\r\n # for the header line and result unit is poor. The parse the\r\n # unit using template matching below.\r\n\r\n # Re-scale the image before passing it to tesseract and do\r\n # automatic thresholding using Otsu's algorithm.\r\n scale_factor = 4\r\n scaled_img = cv2.resize(img[0:50, 0:SCREEN_WIDTH], (0, 0), fx=scale_factor,\r\n fy=scale_factor, interpolation=cv2.INTER_CUBIC)\r\n # DEBUG: originally cv2.resize(img[10:50, ...])\r\n assert(scaled_img is not None)\r\n\r\n thres, thres_img = cv2.threshold(scaled_img, 0, 255,\r\n cv2.THRESH_BINARY | cv2.THRESH_OTSU)\r\n assert(thres_img is not None)\r\n\r\n text = pytesseract.image_to_string(\r\n thres_img, config='--user-words words.txt config.txt')\r\n ret_string = text\r\n\r\n # TODO: TEMPLATE MATCHING EXTENSION. CHECK ORIGINAL REPO 4 DETAILS\r\n return ret_string\r\n\r\n\r\ndef auto_rotate_img(img_before):\r\n \"\"\" Applies Canny transform and open-cv Hough Line transform to auto rotate\r\n image.\r\n \"\"\"\r\n img_gray = cv2.cvtColor(img_before, cv2.COLOR_BGR2GRAY)\r\n img_edges = cv2.Canny(img_gray, 50, 150, apertureSize=3)\r\n\r\n # Probabalistic Hough Lines Transform\r\n # Input: binary img, rho accuracy, theta accuracy, threshold(min no of\r\n # votes to be considered a line, so if 2 points in a line, then 2),\r\n # minLineLength, maxLineGap\r\n # Returns: Two endpoints of each line\r\n lines = cv2.HoughLinesP(img_edges, 0.01, np.pi/180, 10, minLineLength=10,\r\n maxLineGap=1)\r\n if lines is None:\r\n print(\"\\u001b[31mN.B.: HoughLines detected no lines\\u001b[30m\")\r\n return img_gray\r\n\r\n angles = []\r\n\r\n for i in range(len(lines)):\r\n for x1, y1, x2, y2 in lines[i]:\r\n cv2.line(img_before, (x1, y1), (x2, y2), (255, 0, 0), 1)\r\n angle = math.degrees(math.atan2(y2 - y1, x2 - x1))\r\n angles.append(angle)\r\n\r\n median_angle = np.median(angles)\r\n print(\"Suggested angle is: \", str(median_angle))\r\n if abs(median_angle) > 20:\r\n print(\"Suggested angle too large. Disregarding.\")\r\n img_rotated = img_gray\r\n else:\r\n print(\"Rotating by suggested angle.\")\r\n img_rotated = ndimage.rotate(img_gray, median_angle)\r\n\r\n return img_rotated\r\n\r\n\r\ndef crop_bbox(image, y_min, x_min, y_max, x_max):\r\n \"\"\"\r\n Crop a bounding box region from an image.\r\n Input: Image from which to extract a bounding box, bounding box coordinates\r\n Output: Cropped image using the bounding box\r\n \"\"\"\r\n img = np.asarray(image) # N.B. np.asarray converts (WxHxC) to (HxWxC)\r\n img_width = img.shape[1]\r\n img_height = img.shape[0]\r\n\r\n # Convert bbox coord from ratio to integer relative to the size of the img\r\n x_min_quant = int(np.floor(x_min*img_width))\r\n x_max_quant = int(np.ceil(x_max*img_width))\r\n y_min_quant = int(np.floor(y_min*img_height))\r\n y_max_quant = int(np.ceil(y_max*img_height))\r\n img_ret = Image.fromarray(img[y_min_quant: y_max_quant,\r\n x_min_quant: x_max_quant, :], 'RGB')\r\n return img_ret\r\n\r\n\r\ndef draw_bbox(original_image, y_min, x_min, y_max, x_max, thickness,\r\n color=(255, 0, 0)):\r\n \"\"\" Draws a single bounding box on image.\r\n Input: Original image, bbox coordinates, thickness of line and color.\r\n Output: Image with drawn bbox\r\n Update note: mod draw_bboxes() accordingly\r\n \"\"\"\r\n\r\n # N.B. np.asarray converts (WxHxC) to (HxWxC)\r\n original_image_np = np.asarray(original_image)\r\n img_width = original_image_np.shape[1]\r\n img_height = original_image_np.shape[0]\r\n return_image = np.copy(original_image_np)\r\n x_min_px = int(x_min*img_width)\r\n y_min_px = int(y_min*img_height)\r\n x_max_px = int(x_max*img_width)\r\n y_max_px = int(y_max*img_height)\r\n cv2.rectangle(return_image, (x_min_px, y_min_px), (x_max_px, y_max_px),\r\n color, thickness)\r\n return return_image\r\n\r\n\r\ndef draw_bboxes(original_image, bboxes, num, thickness, color=(255, 0, 0)):\r\n \"\"\" Draws multiple bounding boxes on image\r\n Input: Original image, array of bboxes each with the format (y_min, x_min,\r\n y_max, y_max) number of bboxes to draw, thickness of line and color\r\n Output: Image with drawn bboxes\r\n Update note: mod draw_bbox() accordingly\r\n \"\"\"\r\n\r\n # N.B. np.asarray converts (WxHxC) to (HxWxC)\r\n original_image_np = np.asarray(original_image)\r\n img_width = original_image_np.shape[1]\r\n img_height = original_image_np.shape[0]\r\n return_image = np.copy(original_image_np)\r\n if num > len(bboxes):\r\n print(\"Error: num argument > length of bboxes\")\r\n return None\r\n for i in range(num):\r\n bbox = bboxes[i]\r\n x_min = bbox[1]\r\n y_min = bbox[0]\r\n x_max = bbox[3]\r\n y_max = bbox[2]\r\n x_min_px = int(x_min*img_width)\r\n y_min_px = int(y_min*img_height)\r\n x_max_px = int(x_max*img_width)\r\n y_max_px = int(y_max*img_height)\r\n cv2.rectangle(return_image, (x_min_px, y_min_px), (x_max_px, y_max_px),\r\n color, thickness)\r\n return return_image\r\n\r\n\r\ndef expand_bbox(image, y_min, x_min, y_max, x_max, factor):\r\n \"\"\" Expand the bounding box passed in by a prespecified factor.\r\n Output: Tuple of (new_x_min, new_y_min, new_x_max, new_y_max)\r\n \"\"\"\r\n\r\n # N.B. np.asarray converts (WxHxC) to (HxWxC)\r\n np_image = np.asarray(image)\r\n img_width = np_image.shape[1]\r\n img_height = np_image.shape[0]\r\n x_center = (x_max + x_min) / 2.0\r\n y_center = (y_max + y_min) / 2.0\r\n x_length = (x_max - x_min)\r\n y_length = (y_max - y_min)\r\n new_x_length = x_length * factor\r\n new_y_length = y_length * factor\r\n new_x_min = x_center - (new_x_length/2.0)\r\n new_y_min = y_center - (new_y_length/2.0)\r\n new_x_max = x_center + (new_x_length/2.0)\r\n new_y_max = y_center + (new_y_length/2.0)\r\n if factor <= 0:\r\n print(\"Error: factor must be positive\")\r\n return None\r\n if (new_x_min < 0 or new_y_min < 0 or new_x_max > img_width\r\n or new_y_max > img_height):\r\n print(\"Error: cannot expand since new bbox is out of bounds\")\r\n # TODO: Need to handle the fact that the len of bbox is now wrong\r\n return [max(0, new_y_min), max(0, new_x_min),\r\n min(img_height, new_y_max), min(img_width, new_x_max)]\r\n return [new_y_min, new_x_min, new_y_max, new_x_max]\r\n" ]
[ [ "numpy.asarray", "numpy.median", "numpy.ceil", "numpy.copy", "numpy.floor" ] ]
lucastorterotot/DL_for_HTT_mass
[ "2aff7741b5f497114dd826f9b167f66f2cdaa329" ]
[ "python/DL_for_HTT/post_training/utils.py" ]
[ "import matplotlib.pyplot as plt\nimport numpy as np\n\nimport os\nimport pandas as pd\n\nimport DL_for_HTT.common.NN_settings as NN_default_settings\n\nimport DL_for_HTT.post_training.macros as macros\n\ndef load_model_from_json(input_json):\n model_name = input_json.split('/')[-1].replace('.json', '')\n if model_name[:3] == 'XGB':\n model_type = model_name.split(\"-\")[0]\n import xgboost as xgb\n loaded_model = xgb.XGBRegressor()\n loaded_model.load_model(input_json)\n \n # ... -inclusive-max_depth-5-eta-0.1-n_estimators-500-es-5-gamma-0-min_child_weight-1-loss-rmse\n # Get infos on the trained XGB\n infos = model_name\n infos = infos.replace('.json', '')\n\n objective = infos.split(\"-\")[-1]\n eval_ = infos.split(\"-\")[-3]\n min_child_weight = infos.split(\"-\")[-5]\n gamma = infos.split(\"-\")[-7]\n early_stopping_rounds = infos.split(\"-\")[-9]\n n_estimators = infos.split(\"-\")[-11]\n eta = infos.split(\"-\")[-13]\n max_depth = infos.split(\"-\")[-15]\n channel = infos.split(\"-\")[-17]\n \n print(\"Properties:\")\n \n print(\n \"\\t{} channel, eta = {}, max_depth = {}, n_estimators = {}, early_stopping_rounds = {},\".format(\n channel,\n eta,\n max_depth,\n n_estimators,\n early_stopping_rounds,\n )\n )\n print(\n \"\\t objective = {}, eval = {}, gamma = {}, min_child_weight = {}\".format(\n objective,\n eval_,\n gamma,\n min_child_weight,\n )\n )\n else:\n model_type = 'DNN'\n from keras.models import model_from_json\n \n # load json and create model\n NN_weights_path_and_file = input_json.split('/')\n NN_weights_path_and_file[-1] = \"NN_weights-{}\".format(NN_weights_path_and_file[-1].replace('.json', '.h5'))\n NN_weights_file = \"/\".join(NN_weights_path_and_file)\n \n json_file = open(input_json, 'r')\n loaded_model_json = json_file.read()\n json_file.close()\n loaded_model = model_from_json(loaded_model_json)\n # load weights into new model\n loaded_model.load_weights(NN_weights_file)\n print(\"Loaded model from disk:\")\n print(\"\\t{}\".format(input_json))\n \n # Get infos on the trained NN\n infos = NN_weights_path_and_file[-1]\n infos = infos.replace('.h5', '')\n infos = infos.replace('NN_weights-', '')\n \n is_bottleneck = (\"-bottleneck\" == infos[-11:])\n \n bottleneck = \"\"\n if is_bottleneck:\n infos = infos.replace('-bottleneck', '')\n bottleneck = \"-bottleneck\"\n \n Nneurons = infos.split(\"-\")[-2]\n Nlayers = infos.split(\"-\")[-4]\n channel = infos.split(\"-\")[-5]\n \n w_init_mode = infos.split(\"-\")[-6]\n optimizer = infos.split(\"-\")[-7]\n loss = infos.split(\"-\")[-8]\n \n print(\"Properties:\")\n \n print(\n \"\\t{} channel, {} hidden layers of {} neurons with{} bottleneck\".format(\n channel,\n Nlayers,\n Nneurons,\n \"\" if is_bottleneck else \"out\",\n )\n )\n print(\n \"\\ttrained with {} optimizer, w_init {} and {} loss.\".format(\n optimizer,\n w_init_mode,\n loss,\n )\n )\n return loaded_model, model_type, model_name\n\ndef load_h5_file_and_predict(input_h5, loaded_model, model_type, model_name, only=None, inputs = NN_default_settings.inputs, target = NN_default_settings.target):\n df = pd.read_hdf(input_h5)\n\n if only != None:\n df = df.loc[df['is_{}'.format(only)] == 1]\n \n if \"N_neutrinos_reco\" in inputs:\n df[\"N_neutrinos_reco\"] = 2*np.ones(len(df[\"channel_reco\"]), dtype='int')\n df.loc[(df[\"channel_reco\"] == \"mt\"), [\"N_neutrinos_reco\"]] = 3\n df.loc[(df[\"channel_reco\"] == \"et\"), [\"N_neutrinos_reco\"]] = 3\n df.loc[(df[\"channel_reco\"] == \"mm\"), [\"N_neutrinos_reco\"]] = 4\n df.loc[(df[\"channel_reco\"] == \"em\"), [\"N_neutrinos_reco\"]] = 4\n df.loc[(df[\"channel_reco\"] == \"ee\"), [\"N_neutrinos_reco\"]] = 4\n\n if \"tau1_px_reco\" in inputs:\n for ptc in [\"tau1\", \"tau2\", \"jet1\", \"jet2\", \"remaining_jets\", \"MET\", \"PuppiMET\"]:\n if \"{}_eta_reco\".format(ptc) in df.keys():\n df[\"{}_pz_reco\".format(ptc)] = df[\"{}_pt_reco\".format(ptc)] * np.sinh(df[\"{}_eta_reco\".format(ptc)])\n df[\"{}_px_reco\".format(ptc)] = df[\"{}_pt_reco\".format(ptc)] * np.cos(df[\"{}_phi_reco\".format(ptc)])\n df[\"{}_py_reco\".format(ptc)] = df[\"{}_pt_reco\".format(ptc)] * np.sin(df[\"{}_phi_reco\".format(ptc)])\n\n for leg in [\"leg1\", \"leg2\"]:\n for variable in [\"pt\", \"eta\", \"phi\"]:\n for subsample in [\"is_train\", \"is_valid\", \"is_test\"]:\n if \"{leg}_{variable}_gen\".format(leg=leg, variable=variable) in inputs:\n df.loc[(df[\"{leg}_{variable}_gen\".format(leg=leg, variable=variable)] == -10), [subsample]] = False\n \n if model_type == None:\n df[\"predictions\"] = df[model_name]\n elif model_type == 'XGBoost':\n from xgboost import DMatrix\n df[\"predictions\"] = loaded_model.predict(DMatrix(data = np.r_[df[inputs]], feature_names=inputs))\n else:\n df[\"predictions\"] = loaded_model.predict(df[inputs])\n return df\n\nfrom scipy.optimize import curve_fit\n\ndef gaus(x,a,x0,sigma):\n return a*np.exp(-(x-x0)**2/(2*sigma**2))\n\ndef make_gaussian_fit(ax_hist):\n x, y = (ax_hist[1][1:]+ax_hist[1][:-1])/2, ax_hist[0]\n popt,pcov = curve_fit(gaus, x, y, p0=[1,1,1])\n return x, popt\n\ndef tester(df, channel, model_name, min_mass, max_mass, prefix = '', target = None, **kwargs):\n\n df1 = macros.filter_channel(df, channel)\n \n medians_model = []\n CL68s_model_up = []\n CL68s_model_do = []\n CL95s_model_up = []\n CL95s_model_do = []\n xpos = []\n \n mHcuts = np.arange(min_mass, max_mass, 10) # [.200, .350]\n mHranges = [[min_mass, mHcuts[0]]]\n for mHcut in mHcuts[1:]:\n mHranges.append([mHranges[-1][1], mHcut])\n mHranges.append([mHranges[-1][1], max_mass])\n for mHrange in mHranges:\n mHrange[0] = np.round(mHrange[0],3)\n mHrange[1] = np.round(mHrange[1],3)\n \n df2 = df1.loc[(df1[target] >= mHrange[0]) & (df1[target] < mHrange[1])]\n \n predictions = np.r_[df2[\"predictions\"]]\n if len(predictions) == 0:\n continue\n\n xpos.append((mHrange[1]+mHrange[0])/2)\n\n mHs = np.r_[df2[target]]\n values_model = predictions/mHs\n \n values_model = [v for v in values_model]\n values_model.sort()\n\n try:\n medians_model.append(values_model[int(len(values_model)/2)])\n except:\n import pdb; pdb.set_trace()\n\n above_model = [v for v in values_model if v >= medians_model[-1]]\n below_model = [v for v in values_model if v <= medians_model[-1]]\n\n above_model.sort()\n below_model.sort(reverse = True)\n\n CL68s_model_up.append(above_model[int(0.68 * len(above_model))])\n CL68s_model_do.append(below_model[int(0.68 * len(below_model))])\n CL95s_model_up.append(above_model[int(0.95 * len(above_model))])\n CL95s_model_do.append(below_model[int(0.95 * len(below_model))])\n\n median_diff = 0\n CL68_width = 0\n CL95_width = 0\n CL68_calibr_width = 0\n CL95_calibr_width = 0\n\n for k in range(len(medians_model)):\n median_diff += abs(medians_model[k] - 1)\n CL68_width += CL68s_model_up[k] - CL68s_model_do[k]\n CL95_width += CL95s_model_up[k] - CL95s_model_do[k]\n CL68_calibr_width += (CL68s_model_up[k] - CL68s_model_do[k])/medians_model[k]\n CL95_calibr_width += (CL95s_model_up[k] - CL95s_model_do[k])/medians_model[k]\n\n df2= df1.loc[(df1[target] >= min_mass) & (df1[target] < max_mass)]\n N = len(df2)\n \n median_diff *= 1./len(medians_model)\n CL68_width *= 1./len(medians_model)\n CL95_width *= 1./len(medians_model)\n CL68_calibr_width *= 1./len(medians_model)\n CL95_calibr_width *= 1./len(medians_model)\n \n y_true = df2[target].array\n y_pred = df2[\"predictions\"].array\n \n mse = ((y_pred-y_true)**2).mean()\n mae = (np.abs(y_pred-y_true)).mean()\n mape = (np.abs(y_pred-y_true)/y_true).mean() * 100\n \n return median_diff, CL68_width, CL95_width, CL68_calibr_width, CL95_calibr_width, mse, mae, mape, N, len(medians_model)\n\ndef create_scores_database(args):\n command = \"find {} -type f -name \\*.perfs\".format(args.basedir)\n for filter_to_apply in args.filters_to_match.split(','):\n command = \"{} | grep {}\".format(command, filter_to_apply)\n for filter_to_apply in args.filters_to_not_match.split(','):\n command = \"{} | grep -ve {}\".format(command, filter_to_apply)\n\n perf_files = os.popen(command).readlines()\n perf_files = [f[:-1] for f in perf_files]\n\n all_data = []\n print(\"Processing on {} perf files...\".format(len(perf_files)))\n for model_perf in perf_files:\n data = {}\n\n data[\"file\"] = model_perf\n data[\"type\"] = \"XGB\" if \"xgboosts\" in model_perf else \"DNN\"\n data[\"model_inputs\"] = model_perf.split(\"/\")[-2]\n data[\"training_dataset\"] = model_perf.split(\"/\")[-3]\n model_name = model_perf.split(\"/\")[-1].replace(\".perfs\", \"\")\n if data[\"type\"] == \"XGB\":\n data[\"max_depth\"] = int(model_name.split(\"-\")[-15])\n data[\"eta\"] = float(model_name.split(\"-\")[-13])\n data[\"n_estimators\"] = int(model_name.split(\"-\")[-11])\n data[\"early_stopping_rounds\"] = int(model_name.split(\"-\")[-9])\n data[\"gamma\"] = float(model_name.split(\"-\")[-7])\n data[\"min_child_weight\"] = float(model_name.split(\"-\")[-5])\n data[\"eval\"] = model_name.split(\"-\")[-3]\n data[\"loss\"] = model_name.split(\"-\")[-1]\n elif data[\"type\"] == \"DNN\":\n is_bottleneck = (\"-bottleneck\" == model_name.split(\"-\")[-1])\n data[\"bottleneck\"] = is_bottleneck\n if is_bottleneck:\n model_name.replace('-bottleneck', '')\n data[\"Nneurons\"] = int(model_name.split(\"-\")[-2])\n data[\"Nlayers\"] = int(model_name.split(\"-\")[-4])\n data[\"loss\"] = model_name.split(\"-\")[-8]\n data[\"optimizer\"] = model_name.split(\"-\")[-7]\n data[\"w_init_mode\"] = model_name.split(\"-\")[-6]\n data[\"activation\"] = model_name.split(\"-\")[-11]\n if \"ADAM_glorot_uniform\" in model_perf:\n data[\"optimizer\"] = \"Adam\"\n data[\"w_init_mode\"] = \"gu\"\n \n for region in [\"low\", \"medium\", \"high\", \"full\"]:\n for perf in [\"median_diff\", \"CL68_width\", \"CL95_width\", \"CL68_calibr_width\", \"CL95_calibr_width\", \"mse\", \"mae\", \"mape\"]:\n key = \"_\".join([region, perf])\n key_for_data = \"_\".join([region, perf])\n key_for_data = key_for_data.replace(\"CL68\", \"1sig\")\n key_for_data = key_for_data.replace(\"CL95\", \"2sig\")\n try:\n data[key_for_data] = float(os.popen('grep {} {}'.format(key, model_perf)).readlines()[0][:-1].split(\" \")[1])\n except:\n print(\"{} not found for {}\".format(key, model_perf))\n\n all_data.append(data)\n\n print(\"Building DataFrame...\")\n df = pd.DataFrame(all_data)\n print(\"DataFrame created, saving...\")\n df.to_hdf(\"{}/{}.h5\".format(args.database_path, args.database_name), key='df')\n" ]
[ [ "pandas.read_hdf", "numpy.abs", "numpy.arange", "pandas.DataFrame", "numpy.round", "numpy.exp", "scipy.optimize.curve_fit" ] ]
EugMJang/Covid-20
[ "e56cb32bb2470c309cb983e28e8da3de5a2503c8" ]
[ "backend/nation_state/nation_state_data_plotter.py" ]
[ "import requests\nimport matplotlib.pyplot as plt\n\nurls = [\"https://api.covidtracking.com/v1/us/daily.json\", \"https://api.covidtracking.com/v1/states/il/daily.json\"]\n\n\ndef get_nation_state_data(i):\n r = requests.get(urls[i])\n j = r.json()\n\n times = []\n positives = []\n totals = []\n percentages = []\n j.sort(key=lambda x: x[\"date\"])\n for d in j:\n date_string = str(d[\"date\"])\n times.append(date_string[:4] + \"-\" + date_string[4:6] + \"-\" + date_string[6:])\n positives.append(d[\"positive\"])\n totals.append(d[\"totalTestResults\"])\n percentages.append(d[\"positive\"] / d[\"totalTestResults\"])\n\n return {\"times\": times, \"positives\": positives, \"totals\": totals, \"percentages\": percentages}\n\n\nfor i, url in enumerate(urls):\n r = requests.get(url)\n j = r.json()\n\n times = []\n positives = []\n totals = []\n percentages = []\n j.sort(key=lambda x: x[\"date\"])\n for d in j:\n date_string = str(d[\"date\"])\n times.append(date_string[:4] + \"-\" + date_string[4:6] + \"-\" + date_string[6:])\n positives.append(d[\"positive\"])\n totals.append(d[\"totalTestResults\"])\n percentages.append(d[\"positive\"] / d[\"totalTestResults\"])\n\n plt.figure(figsize=(18, 10), dpi=180)\n plt.title(\"Percentage of tests that were positive\")\n plt.plot(times, percentages)\n plt.xticks(times[::20], visible=True, rotation=\"horizontal\")\n if i == 0:\n plt.savefig(\"backend/plt/nationwide_%.png\")\n elif i == 1:\n plt.savefig(\"backend/plt/illinois_%.png\")\n\n plt.figure(figsize=(18, 10), dpi=180)\n plt.title(\"Tests and positive cases\")\n plt.plot(times, positives, label=\"Positive cases\")\n plt.plot(times, totals, label=\"Number of people tested\")\n plt.legend()\n plt.xticks(times[::20], visible=True, rotation=\"horizontal\")\n if i == 0:\n plt.savefig(\"backend/plt/nationwide_cases.png\")\n elif i == 1:\n plt.savefig(\"backend/plt/illinois_cases.png\")\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.title", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "matplotlib.pyplot.xticks", "matplotlib.pyplot.figure" ] ]
MauroSilvaPinto/A-personalized-and-evolutionary-algorithm-for-interpretable-EEG-epilepsy-seizure-prediction
[ "3bec2f06cfe0e2aff13e63c83ebc97135c079408" ]
[ "Code/Evolutionary Algorithm/Filter.py" ]
[ "\"\"\"\nFilter class.\n\nFilter class constructs the second-level features (hyper-features)\nby windowing the first-level ones. It performs this feature extraction\nchronologically\n\n\n\"\"\"\n\n\nimport numpy as np\nfrom scipy.signal import find_peaks\nfrom Feature import Feature\nimport networkx as nx\n\nclass Filter:\n \n # creates a feature with the decoded phenotype, that is\n # calculates the pre-ictal time and the time-delay entries\n # of the filter instead of event moments\n # then calculates the filter size\n # the structures:\n # pre_ictal time\n # filter size\n # filter events\n def createFilter(decoded_phenotype):\n filter=Filter.buildFilterChronology(decoded_phenotype)\n filter=Filter.addFilterLengthToFilter(filter)\n filter=Filter.addPreIctalTimeToFilter(filter,Filter.getPreIctalTime(decoded_phenotype))\n return filter\n\n # decomposes the filter into pre_ictal time, filter_size and filter_components \n def decomposeFilter(filter):\n preictal_time=int(filter[0].split(\"__\")[1])\n filter_size=int(filter[1].split(\"__\")[1])\n filter_components=filter[2:]\n \n return preictal_time, filter_size, filter_components\n \n # calculates the preictal time of the decoded phenotype, that is\n # the period more close to a seizure of the phenotype features \n def getPreIctalTime(decoded_phenotype): \n return int(min(Filter.getListOfTimeEvents(decoded_phenotype)))\n \n # retrieves the time moments of the events as a list\n def getListOfTimeEvents(decoded_phenotype):\n times=[]\n for feature in decoded_phenotype:\n times.append(feature.split(\"__\")[2])\n return times\n \n # retrieves the first event of the filter, that is, the moment more distant\n # to a seizure\n def getFirstEvent(decoded_phenotype):\n return int(max(Filter.getListOfTimeEvents(decoded_phenotype)))\n \n # builds the filter with the decoded phenotype, that is\n # updates the phenotype with the chronology of the filter\n def buildFilterChronology(decoded_phenotype):\n filter=[]\n for feature in decoded_phenotype:\n filter_component=(Filter.getPreProcessingIndex(feature) + \n \"__\" + Filter.getFeatureOperation(feature) +\n \"__\" + str(Filter.getFilterEventMoment(feature,decoded_phenotype)) +\n \"__\" + str(Filter.getFeatureEventWindow(feature)))\n \n filter.append(filter_component)\n return filter\n \n \n # adds the preictal time to the filter as the first line\n def addPreIctalTimeToFilter(filter,pre_ictal):\n filter.insert(0,\"pre_ictal__\"+str(pre_ictal))\n return filter\n \n # adds the filter length to the filter as the first line\n # there was the need to had the first event largest window since it will\n # also contribute to the filter size\n def addFilterLengthToFilter(filter):\n filter_length=0\n for feature_event in filter:\n if ((Filter.getFeatureEventMoment(feature_event) + \n Filter.getFeatureEventWindow(feature_event)) > filter_length):\n \n filter_length=(Filter.getFeatureEventMoment(feature_event) + \n Filter.getFeatureEventWindow(feature_event))\n \n filter.insert(0,\"filter_size__\"+str(filter_length))\n return filter\n \n \n \n # adds to the filter length the window event of the first event\n def addToFilterLargestWindow(filter_length, decoded_phenotype):\n largest_window=Filter.findLargestWindowFirstEvent(decoded_phenotype)\n return filter_length+largest_window\n \n # finds the largest window of the first event\n # that is, analysis all window events of the first event\n # and selects the highest\n def findLargestWindowFirstEvent(decoded_phenotype):\n first_event=Filter.getFirstEvent(decoded_phenotype)\n largest_window=0\n for feature_event in decoded_phenotype:\n if Filter.getFeatureEventMoment(feature_event)==first_event:\n if Filter.getFeatureEventWindow(feature_event)>largest_window:\n largest_window=Filter.getFeatureEventWindow(feature_event)\n return largest_window\n \n # retrieves the event moment in the filter of a certain feature for a given\n # decoded phenotype\n def getFilterEventMoment(feature,decoded_phenotype):\n return (Filter.getFirstEvent(decoded_phenotype)-Filter.getFeatureEventMoment(feature))\n \n #retrieves the feature index of the preprocessed data \n def getPreProcessingIndex(feature):\n return feature.split(\"__\")[0]\n \n # retrieves the mathematical operation of the feature\n def getFeatureOperation(feature):\n return feature.split(\"__\")[1]\n \n # retrieves the time moment of a certain feature \n def getFeatureEventMoment(feature):\n return int(feature.split(\"__\")[2])\n \n # retrieves the window-scale of a certain feature \n def getFeatureEventWindow(feature):\n return int(feature.split(\"__\")[3])\n \n # retrieves the number of filter components, that is, the number of features\n def getNumberOfFilterComponents(filter_components):\n return len(filter_components)\n \n # gets the step size of the filter in minutes in the iterictal part\n def getStepFilterInMinutesInterictal():\n return 1\n \n # gets the step size of the filter in minutes in the preictal part\n def getStepFilterInMinutesPreictal():\n return 1\n \n # performs the mathematical mean operation\n def performMeanOperation(data,starting_index,size_window):\n return np.mean(data[starting_index:starting_index+size_window])\n \n # performs the mathematical median operation\n def performMedianOperation(data,starting_index,size_window):\n return np.median(data[starting_index:starting_index+size_window])\n \n # performs the mathematical variance operation\n def performVarianceOperation(data,starting_index,size_window):\n return np.var(data[starting_index:starting_index+size_window])\n \n # performs the mathematical integral (area under the curve) operation\n # using trapz rule\n def performIntegralOperation(data,starting_index,size_window):\n return np.trapz(data[starting_index:starting_index+size_window])\n \n # performs a measure of periodism, regarding the mean distance of the locations\n # of peaks\n def performLocPeaksMeanOperation(data,starting_index,size_window):\n peaks,__=find_peaks(data[starting_index:starting_index+size_window])\n return np.mean(np.diff(peaks))\n \n # performs a measure of variance of periodism, regarding the variance of the\n # distance of the location of peaks\n def performLocPeaksVarianceOperation(data,starting_index,size_window):\n peaks,__=find_peaks(data[starting_index:starting_index+size_window])\n return np.var(np.diff(peaks))\n \n # for a chunk of data and for a filter component, calculate the feature in\n # question represented in the filter component\n def calculateFeature(data, component):\n starting_index=Filter.getFeatureEventMoment(component)\n size_window=Filter.getFeatureEventWindow(component)\n operation=Filter.getFeatureOperation(component)\n \n if operation == \"mean\":\n value=Filter.performMeanOperation(data,starting_index,size_window)\n elif operation == \"median\":\n value=Filter.performMedianOperation(data,starting_index,size_window)\n elif operation == \"variance\": \n value=Filter.performVarianceOperation(data,starting_index,size_window)\n elif operation == \"integral\":\n value=Filter.performIntegralOperation(data,starting_index,size_window)\n #elif operation == \"loc_pks_mean\":\n #value=Filter.performLocPeaksMeanOperation(data,starting_index,size_window)\n #elif operation == \"lock_pks_var\":\n #value=Filter.performLocPeaksVarianceOperation(data,starting_index,size_window)\n \n return value\n \n # calculate the label regarding the considered pre-ictal time, if it is\n # inter-ictal or pre-ictal\n def calculateLabel(index,seizure_size,filter_size,pre_ictal):\n if (index + filter_size) < (seizure_size - pre_ictal):\n return 0\n else:\n return 1\n \n # performed a moving average filter with the provided size\n def movingAverageFilter(filter_size):\n b=np.ones(filter_size)\n return Filter.normalizeFilter(b) \n \n # performed a moving average with linear decay filter with the provided size\n def movingLinearDecayFilter(filter_size):\n b=np.linspace(1,filter_size,filter_size)\n return Filter.normalizeFilter(b) \n \n # performed a moving average with exponential decay filter with the provided size\n def movingExponentialDecayFilter(filter_size):\n b=np.flip(-np.linspace(0,filter_size,filter_size))\n return Filter.normalizeFilter(b) \n \n # normalize the filter in order for the sum of its contributions is equal to 1\n def normalizeFilter(filter):\n return filter/np.sum(filter)\n \n # after the first classification step, data is post-processed in order to\n # obtain a new one, concerning a provided step size \n # since there may be more than one seizure, one must make a division between\n # seizures for not making a filter after the end of a seizure and the beginning\n # of another which don't have anything to do with this\n def getPostProcessingData(scores,labels,step):\n \n scores=np.array(scores)\n labels=np.array(labels)\n \n features=[]\n new_labels=[]\n \n indexes_end_seizure=np.where(np.diff(labels)==-1)[0]+1 \n \n #that is, if there is more than one seizure\n if len(indexes_end_seizure)>0:\n # first seizure\n for i in range(0,indexes_end_seizure[0]-step):\n features.append(np.reshape(scores[i:i+step],step))\n new_labels.append(np.clip(np.sum(labels[i:i+step]),0,1))\n \n #for middle seizures\n for i in range(0,len(indexes_end_seizure)-1):\n for j in range(indexes_end_seizure[i],indexes_end_seizure[i+1]-step):\n features.append(np.reshape(scores[j:j+step],step))\n new_labels.append(np.clip(np.sum(labels[j:j+step]),0,1))\n \n #for last seizure\n for i in range(indexes_end_seizure[-1],len(labels)-step):\n features.append(np.reshape(scores[i:i+step],step))\n new_labels.append(np.clip(np.sum(labels[i:i+step]),0,1)) \n \n # it there is only one seizure \n else:\n for i in range(0,len(labels)-step):\n features.append(np.reshape(scores[i:i+step],step))\n new_labels.append(np.clip(np.sum(labels[i:i+step]),0,1))\n \n return (np.reshape(np.array(features),[len(features),step]),\n np.array(new_labels))\n \n \n # converts the provided amount of minutes of hours by dividing by 60 \n def convertMinutesInHours(minutes):\n return minutes/60\n \n # calculates the number of triggered alarms\n def calculateNumberOfAlarms(predicted):\n return len(np.where(np.diff(predicted)==1)[0])\n \n # calculates the number of existing seizures\n def calculateNumberOfSeizures(labels):\n return len(np.where(np.diff(labels)==1)[0])\n \n \n def calculateNumberOfFalseAlarmsSurrogate(labels,predicted):\n f_alarms=0\n for i in range(0,len(labels)):\n if labels[i]==0 and predicted[i]==1:\n f_alarms=f_alarms+1\n return f_alarms\n \n # calculates the number of false alarms´\n # first it calculates the number of false alarms in the first seizure\n # then in the remaining ones\n def calculateNumberOfFalseAlarms(labels,predicted):\n indexes_ending_interictal=np.where(np.diff(labels)==1)[0]+1\n indexes_ending_preictal=np.where(np.diff(labels)==(-1))[0]+1\n\n first_seizure_predicted=predicted[0:indexes_ending_interictal[0]]\n first_seizure=len(np.where(np.diff(first_seizure_predicted)==1)[0])\n \n # if a seizure is immediatelly triggered, the diff function will have no effect\n # we must verify manually the first value\n if (first_seizure_predicted[0]==1):\n first_seizure=first_seizure+1\n \n other_seizures=0\n for i in range(1,len(indexes_ending_interictal)):\n other_seizures_predicted=predicted[indexes_ending_preictal[i-1]:indexes_ending_interictal[i]]\n other_seizures=other_seizures+len(np.where(np.diff(other_seizures_predicted)==1)[0])\n \n # if a seizure is immediatelly triggered, the diff function will have no effect\n # we must verify manually the first value\n if (len(np.where(np.diff(other_seizures_predicted)==1)[0]) ==0 and \n other_seizures_predicted[0]==1):\n other_seizures=other_seizures+1\n \n return other_seizures+first_seizure\n \n \n \n \n # calculates the number of the well triggered alarms\n # first it calculates the number of triggered alarms in all seizures except\n # the last\n # then it calculates it in the last seizure\n def calculateNumberOfTriggeredSeizures(labels,predicted):\n indexes_ending_interictal=np.where(np.diff(labels)==1)[0]+1\n indexes_ending_preictal=np.where(np.diff(labels)==(-1))[0]+1\n \n other_seizures=0\n for i in range(0,len(indexes_ending_preictal)):\n other_seizures_predicted=predicted[indexes_ending_interictal[i]:indexes_ending_preictal[i]]\n if 1 in other_seizures_predicted:\n other_seizures=other_seizures+1\n \n last_seizure_predicted=predicted[indexes_ending_interictal[-1]-1:]\n if 1 in last_seizure_predicted:\n last_seizure=1\n else:\n last_seizure=0\n \n return other_seizures+last_seizure\n \n def calculateNumberOfTriggeredSeizuresSurrogate(labels,predicted):\n triggered=0\n for i in range(0,len(predicted)):\n if labels[i]==1 and predicted[i]==1:\n triggered=1\n \n return triggered\n \n def calculateDistanceBetweenFilters(filter_a,filter_b):\n filter_a=Filter.assignFilterOrder(filter_a,filter_b)\n events_distances=[]\n for i in range (2,len(filter_a)):\n pre_ictal_a=int(filter_a[0].split(\"__\")[1])\n pre_ictal_b=int(filter_b[0].split(\"__\")[1])\n events_distances.append(Filter.calculateDistanceBetweenComponents(filter_a[i],filter_b[i],\n pre_ictal_a, pre_ictal_b))\n \n events_distances=np.array(events_distances)\n return (np.sum(events_distances))\n \n \n \n \n def assignFilterOrder(filter_a,filter_b):\n used_indexes=[]\n pre_ictal_a=int(filter_a[0].split(\"__\")[1])\n pre_ictal_b=int(filter_b[0].split(\"__\")[1])\n \n for i in range(2,len(filter_a)):\n component_distances=[]\n for j in range(2,len(filter_b)):\n component_distances.append(Filter.calculateDistanceBetweenComponents(filter_a[i],filter_b[j],\n pre_ictal_a, pre_ictal_b))\n \n component_distances=np.array(component_distances)\n indexes_sorted_distances=np.argsort(component_distances)\n \n for j in range(0,len(indexes_sorted_distances)):\n if (indexes_sorted_distances[j]+2) in used_indexes:\n continue\n else:\n used_indexes.append(indexes_sorted_distances[j]+2)\n break\n \n new_filter=[]\n new_filter.append(filter_a[0])\n new_filter.append(filter_a[1])\n for i in range(2,len(filter_a)):\n new_filter.append(filter_a[used_indexes[i-2]])\n \n return new_filter\n \n \n \n \n\n def calculateDistanceBetweenComponents(component_a,component_b, pre_ictal_a, pre_ictal_b):\n component_differences=[]\n \n component_differences.append(Filter.calculateMathematicalOperatorDistance(component_a,component_b)) \n component_differences.append(Filter.calculateElectrodeDistance(component_a,component_b))\n component_differences.append(Filter.calculateFilterTimeDistance(component_a,component_b,pre_ictal_a, pre_ictal_b)) \n component_differences.append(Filter.calculateCharacteristicDistance(component_a,component_b)) \n component_differences.append(Filter.calculateWindowLengthDistance(component_a,component_b))\n \n component_differences=np.array(component_differences)\n return (np.sum(component_differences))\n \n \n \n def calculateMathematicalOperatorDistance(component_a,component_b):\n operator_a=component_a.split('__')[1]\n operator_b=component_b.split('__')[1]\n \n if operator_a==operator_b:\n return 0\n else:\n return 1\n \n \n def calculateElectrodeDistance(component_a,component_b):\n electrode_a=component_a.split('_')[0]\n electrode_b=component_b.split('_')[0]\n \n brain_graph=Feature.getElectrodesGraph()\n paths=list(nx.all_shortest_paths(brain_graph,\n source=electrode_a,\n target=electrode_b)) \n path=paths[np.random.choice(np.arange(0,len(paths)))]\n \n return (len(path)-1)\n \n def calculateWindowLengthDistance(component_a,component_b):\n window_a=component_a.split('__')[-1]\n window_b=component_b.split('__')[-1]\n\n window_length_range=Feature.getWindowLengthRange()\n window_a_index=np.where(window_length_range==int(window_a))[0][0]\n window_b_index=np.where(window_length_range==int(window_b))[0][0]\n \n return abs(window_a_index-window_b_index)\n \n \n def calculateFilterTimeDistance(component_a, component_b,pre_ictal_a, pre_ictal_b):\n time_a=int(component_a.split('__')[-2])+pre_ictal_a\n time_b=int(component_b.split('__')[-2])+pre_ictal_b\n \n preictal_time_range=Feature.getPreIctalRange()\n time_a_index=np.where(preictal_time_range==int(time_a))[0][0]\n time_b_index=np.where(preictal_time_range==int(time_b))[0][0]\n return abs(time_a_index-time_b_index)\n \n \n def isCharacteristicWave(characteristic):\n return (characteristic in Feature.getWavesList())\n \n \n def calculateCharacteristicDistance(component_a, component_b):\n characteristic_a=component_a.split('__')[0].split('_',1)[-1]\n characteristic_b=component_b.split('__')[0].split('_',1)[-1]\n \n if (Filter.isCharacteristicWave(characteristic_a) and \n Filter.isCharacteristicWave(characteristic_b)):\n \n index_a=Feature.getWavesList().index(characteristic_a)\n index_b=Feature.getWavesList().index(characteristic_b)\n \n return abs(index_a-index_b)\n \n elif (not Filter.isCharacteristicWave(characteristic_a) and \n not Filter.isCharacteristicWave(characteristic_b)):\n if characteristic_a==characteristic_b:\n return 0\n else:\n return 1\n \n else:\n return 1\n \n \n \n \n \n \n \n " ]
[ [ "scipy.signal.find_peaks", "numpy.sum", "numpy.linspace", "numpy.reshape", "numpy.median", "numpy.ones", "numpy.mean", "numpy.diff", "numpy.var", "numpy.argsort", "numpy.array", "numpy.trapz" ] ]
LElgueddari/pisap
[ "ddd9f9f02dcd629b5615fa571ac7795c2d5e9727" ]
[ "pysap/base/loaders/nifti.py" ]
[ "# -*- coding: utf-8 -*-\n##########################################################################\n# pySAP - Copyright (C) CEA, 2017 - 2018\n# Distributed under the terms of the CeCILL-B license, as published by\n# the CEA-CNRS-INRIA. Refer to the LICENSE file or to\n# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html\n# for details.\n##########################################################################\n\n# System import\nimport nibabel\nimport numpy\n\n# Package import\nfrom .loader_base import LoaderBase\nfrom pysap.base.image import Image\n\n\nclass NIFTI(LoaderBase):\n \"\"\" Define the Nifti loader.\n \"\"\"\n allowed_extensions = [\".nii\", \".nii.gz\"]\n\n def load(self, path):\n \"\"\" A method that load the image data and associated metadata.\n\n Parameters\n ----------\n path: str\n the path to the image to be loaded.\n\n Returns\n -------\n image: Image\n the loaded image.\n \"\"\"\n _image = nibabel.load(path)\n return Image(spacing=_image.header.get_zooms(),\n data_type=\"scalar\",\n metadata={\"path\": path},\n data=_image.get_data())\n\n def save(self, image, outpath):\n \"\"\" A method that save the image data and associated metadata.\n\n Parameters\n ----------\n image: Image\n the image to be saved.\n outpath: str\n the path where the the image will be saved.\n \"\"\"\n diag = (1. / image.spacing).tolist() + [1]\n _image = nibabel.Nifti1Image(image.data, numpy.diag(diag))\n nibabel.save(_image, outpath)\n" ]
[ [ "numpy.diag" ] ]
gandie/pyAudioAnalysis
[ "1449d667806161b5dace3612cbeceec9be93d843" ]
[ "pyAudioAnalysis/analyzeMovieSound.py" ]
[ "import os, sys, shutil, glob, numpy, csv, cPickle\nimport scipy.io.wavfile as wavfile\nimport audioBasicIO\nimport audioTrainTest as aT\nimport audioSegmentation as aS\nimport matplotlib.pyplot as plt\nimport scipy.spatial.distance\nminDuration = 7;\n\ndef classifyFolderWrapper(inputFolder, modelType, modelName, outputMode=False):\n\tif not os.path.isfile(modelName):\n\t\traise Exception(\"Input modelName not found!\")\n\n\tif modelType=='svm':\n\t\t[Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, compute_beat] = aT.load_model(modelName)\n\telif modelType=='knn':\n\t\t[Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, compute_beat] = aT.load_model_knn(modelName)\n\n\tPsAll = numpy.zeros((len(classNames), ))\n\n\tfiles = \"*.wav\"\n\tif os.path.isdir(inputFolder):\n\t\tstrFilePattern = os.path.join(inputFolder, files)\n\telse:\n\t\tstrFilePattern = inputFolder + files\n\n\twavFilesList = []\n\twavFilesList.extend(glob.glob(strFilePattern))\n\twavFilesList = sorted(wavFilesList)\n\tif len(wavFilesList)==0:\n\t\tprint(\"No WAV files found!\")\n\t\treturn\n\n\tResults = []\n\tfor wavFile in wavFilesList:\n\t\t[Fs, x] = audioBasicIO.readAudioFile(wavFile)\n\t\tsignalLength = x.shape[0] / float(Fs)\n\t\t[Result, P, classNames] = aT.file_classification(wavFile, modelName, modelType)\n\t\tPsAll += (numpy.array(P) * signalLength)\n\t\tResult = int(Result)\n\t\tResults.append(Result)\n\t\tif outputMode:\n\t\t\tprint(\"{0:s}\\t{1:s}\".format(wavFile,classNames[Result]))\n\tResults = numpy.array(Results)\n\n\t# print distribution of classes:\n\t[Histogram, _] = numpy.histogram(Results, bins=numpy.arange(len(classNames)+1))\n\tif outputMode:\n\t\tfor i,h in enumerate(Histogram):\n\t\t\tprint(\"{0:20s}\\t\\t{1:d}\".format(classNames[i], h))\n\tPsAll = PsAll / numpy.sum(PsAll)\n\n\n\tif outputMode:\n\t\tfig = plt.figure()\n\t\tax = fig.add_subplot(111)\n\t\tplt.title(\"Classes percentage \" + inputFolder.replace('Segments',''))\n\t\tax.axis((0, len(classNames)+1, 0, 1))\n\t\tax.set_xticks(numpy.array(range(len(classNames)+1)))\n\t\tax.set_xticklabels([\" \"] + classNames)\n\t\tax.bar(numpy.array(range(len(classNames)))+0.5, PsAll)\n\t\tplt.show()\n\treturn classNames, PsAll\n\ndef getMusicSegmentsFromFile(inputFile):\n\tmodelType = \"svm\"\n\tmodelName = \"data/svmMovies8classes\"\n\n\tdirOutput = inputFile[0:-4] + \"_musicSegments\"\n\n\tif os.path.exists(dirOutput) and dirOutput!=\".\":\n\t\tshutil.rmtree(dirOutput)\n\tos.makedirs(dirOutput)\n\n\t[Fs, x] = audioBasicIO.readAudioFile(inputFile)\n\n\tif modelType=='svm':\n\t\t[Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, compute_beat] = aT.load_model(modelName)\n\telif modelType=='knn':\n\t\t[Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, compute_beat] = aT.load_model_knn(modelName)\n\n\tflagsInd, classNames, acc, CM = aS.mtFileClassification(inputFile, modelName, modelType, plotResults = False, gtFile = \"\")\n\tsegs, classes = aS.flags2segs(flagsInd, mtStep)\n\n\tfor i, s in enumerate(segs):\n\t\tif (classNames[int(classes[i])] == \"Music\") and (s[1] - s[0] >= minDuration):\n\t\t\tstrOut = \"{0:s}{1:.3f}-{2:.3f}.wav\".format(dirOutput+os.sep, s[0], s[1])\n\t\t\twavfile.write( strOut, Fs, x[int(Fs*s[0]):int(Fs*s[1])])\n\ndef analyzeDir(dirPath):\n\tfor i,f in enumerate(glob.glob(dirPath + os.sep + '*.wav')):\t\t\t\t# for each WAV file\n\t\tgetMusicSegmentsFromFile(f)\n\t\t[c, P]= classifyFolderWrapper(f[0:-4] + \"_musicSegments\", \"svm\", \"data/svmMusicGenre8\", False)\n\t\tif i==0:\n\t\t\tprint(\"\".ljust(100)+\"\\t\")\n\t\t\tfor C in c:\n\t\t\t\tprint(C.ljust(12)+\"\\t\")\n\t\t\tprint(\"\")\n\t\tprint(f.ljust(100)+\"\\t\")\n\t\tfor p in P:\n\t\t\t\tprint(\"{0:.2f}\".format(p).ljust(12)+\"\\t\")\n\t\tprint(\"\")\n\ndef main(argv):\n\n\tif argv[1]==\"--file\":\n\t\tgetMusicSegmentsFromFile(argv[2])\n\t\tclassifyFolderWrapper(argv[2][0:-4] + \"_musicSegments\", \"svm\", \"data/svmMusicGenre8\", True)\n\n\telif argv[1]==\"--dir\":\n\t\tanalyzeDir(argv[2])\n\n\telif argv[1]==\"--sim\":\n\t\tcsvFile = argv[2]\n\t\tf = []\n\t\tfileNames = []\n\t\twith open(csvFile, 'rb') as csvfile:\n\t\t\tspamreader = csv.reader(csvfile, delimiter='\\t', quotechar='|')\n\t\t\tfor j,row in enumerate(spamreader):\n\t\t\t\tif j>0:\n\t\t\t\t\tftemp = []\n\t\t\t\t\tfor i in range(1,9):\n\t\t\t\t\t\tftemp.append(float(row[i]))\n\t\t\t\t\tf.append(ftemp)\n\t\t\t\t\tR = row[0]\n\t\t\t\t\tII = R.find(\".wav\");\n\t\t\t\t\tfileNames.append(row[0][0:II])\n\t\t\tf = numpy.array(f)\n\n\t\t\tSim = numpy.zeros((f.shape[0], f.shape[0]))\n\t\t\tfor i in range(f.shape[0]):\n\t\t\t\tfor j in range(f.shape[0]):\n\t\t\t\t\tSim[i,j] = scipy.spatial.distance.cdist(numpy.reshape(f[i,:], (f.shape[1],1)).T, numpy.reshape(f[j,:], (f.shape[1],1)).T, 'cosine')\n\n\t\t\tSim1 = numpy.reshape(Sim, (Sim.shape[0]*Sim.shape[1], 1))\n\t\t\tplt.hist(Sim1)\n\t\t\tplt.show()\n\n\t\t\tfo = open(csvFile + \"_simMatrix\", \"wb\")\n\t\t\tcPickle.dump(fileNames, fo, protocol = cPickle.HIGHEST_PROTOCOL)\n\t\t\tcPickle.dump(f, fo, protocol = cPickle.HIGHEST_PROTOCOL)\n\t\t\tcPickle.dump(Sim, fo, protocol = cPickle.HIGHEST_PROTOCOL)\n\t\t\tfo.close()\n\n\telif argv[1]==\"--loadsim\":\n\t\ttry:\n\t\t\tfo = open(argv[2], \"rb\")\n\t\texcept IOError:\n\t\t\t\tprint(\"didn't find file\")\n\t\t\t\treturn\n\t\ttry:\n\t\t\tfileNames \t= cPickle.load(fo)\n\t\t\tf \t\t\t= cPickle.load(fo)\n\t\t\tSim \t\t= cPickle.load(fo)\n\t\texcept:\n\t\t\tfo.close()\n\t\tfo.close()\n\t\tprint(fileNames)\n\t\tSim1 = numpy.reshape(Sim, (Sim.shape[0]*Sim.shape[1], 1))\n\t\tplt.hist(Sim1)\n\t\tplt.show()\n\n\telif argv[1]==\"--audio-event-dir\":\n\t\tfiles = \"*.wav\"\n\t\tinputFolder = argv[2]\n\t\tif os.path.isdir(inputFolder):\n\t\t\tstrFilePattern = os.path.join(inputFolder, files)\n\t\telse:\n\t\t\tstrFilePattern = inputFolder + files\n\n\t\twavFilesList = []\n\t\twavFilesList.extend(glob.glob(strFilePattern))\n\t\twavFilesList = sorted(wavFilesList)\n\t\tfor i,w in enumerate(wavFilesList):\n\t\t\t[flagsInd, classesAll, acc, CM] = aS.mtFileClassification(w, \"data/svmMovies8classes\", \"svm\", False, '')\n\t\t\thistTemp = numpy.zeros( (len(classesAll), ) )\n\t\t\tfor f in flagsInd:\n\t\t\t\thistTemp[int(f)] += 1.0\n\t\t\thistTemp /= histTemp.sum()\n\n\t\t\tif i==0:\n\t\t\t\tprint(\"\".ljust(100)+\"\\t\")\n\t\t\t\tfor C in classesAll:\n\t\t\t\t\tprint(C.ljust(12)+\"\\t\")\n\t\t\t\tprint(\"\")\n\t\t\tprint(w.ljust(100)+\"\\t\")\n\t\t\tfor h in histTemp:\n\t\t\t\tprint(\"{0:.2f}\".format(h).ljust(12)+\"\\t\")\n\t\t\tprint(\"\")\n\n\n\treturn 0\n\nif __name__ == '__main__':\n\tmain(sys.argv)\n" ]
[ [ "numpy.reshape", "matplotlib.pyplot.hist", "numpy.array", "numpy.zeros", "numpy.sum", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
Stilwell-Git/Randomized-Return-Decomposition
[ "bc804736cbac0ab7ef2eb241d5b17f4a5e2e80a2" ]
[ "algorithm/replay_buffer/atari_buffer/frame_stack_buffer.py" ]
[ "import copy\nimport numpy as np\n\nclass Episode_FrameStack:\n def __init__(self, info):\n self.common_info = [\n 'obs', 'obs_next', 'frame_next',\n 'acts', 'rews', 'done'\n ]\n self.ep = {\n 'obs': [],\n 'acts': [],\n 'rews': [],\n 'done': []\n }\n for key in info.keys():\n if not(key in self.common_info):\n self.ep[key] = []\n self.ep_len = 0\n self.sum_rews = 0.0\n self.frames = info['obs'].shape[-1]\n for i in range(self.frames):\n self.ep['obs'].append(copy.deepcopy(info['obs'][:,:,i]))\n\n def insert(self, info):\n self.ep_len += 1\n self.sum_rews += info['rews']\n self.ep['obs'].append(copy.deepcopy(info['frame_next']))\n self.ep['acts'].append(copy.deepcopy(info['acts']))\n self.ep['rews'].append(copy.deepcopy(info['rews']))\n self.ep['done'].append(copy.deepcopy(info['done']))\n for key in info.keys():\n if not(key in self.common_info):\n self.ep[key].append(copy.deepcopy(info[key]))\n\n def get_obs(self, idx):\n idx += 1\n obs = np.stack(self.ep['obs'][idx:idx+self.frames], axis=-1)\n return obs.astype(np.float32)/255.0\n\n def sample(self):\n idx = np.random.randint(self.ep_len)\n info = {\n 'obs': self.get_obs(idx-1),\n 'obs_next': self.get_obs(idx),\n 'acts': copy.deepcopy(self.ep['acts'][idx]),\n 'rews': [copy.deepcopy(self.ep['rews'][idx])],\n 'done': [copy.deepcopy(self.ep['done'][idx])]\n }\n for key in self.ep.keys():\n if (not(key in self.common_info)) and (not(key in info.keys())):\n info[key] = copy.deepcopy(self.ep[key][idx])\n return info\n\n def sample_ircr(self):\n idx = np.random.randint(self.ep_len)\n info = {\n 'obs': self.get_obs(idx-1),\n 'obs_next': self.get_obs(idx),\n 'acts': copy.deepcopy(self.ep['acts'][idx]),\n 'rews': [self.sum_rews], # critical step of IRCR\n 'done': [copy.deepcopy(self.ep['done'][idx])]\n }\n for key in self.ep.keys():\n if (not(key in self.common_info)) and (not(key in info.keys())):\n info[key] = copy.deepcopy(self.ep[key][idx])\n return info\n\n def sample_rrd(self, sample_size, store_coef=False):\n idx = np.random.choice(self.ep_len, sample_size, replace=(sample_size>self.ep_len))\n info = {\n 'rrd_obs': [],\n 'rrd_obs_next': [],\n 'rrd_acts': [],\n 'rrd_rews': [self.sum_rews/self.ep_len]\n }\n for _ in range(sample_size):\n idx = np.random.randint(self.ep_len)\n info['rrd_obs'].append(self.get_obs(idx-1))\n info['rrd_obs_next'].append(self.get_obs(idx))\n info['rrd_acts'].append(copy.deepcopy(self.ep['acts'][idx]))\n if store_coef:\n if (sample_size<=self.ep_len) and (self.ep_len>1):\n info['rrd_var_coef'] = [1.0-float(sample_size)/self.ep_len]\n else:\n info['rrd_var_coef'] = [1.0 if self.ep_len>1 else 0.0]\n return info\n\nclass ReplayBuffer_FrameStack:\n def __init__(self, args):\n self.args = args\n self.in_head = True\n self.ep_counter = 0\n self.step_counter = 0\n self.buffer_size = self.args.buffer_size\n\n self.ep = []\n self.length = 0\n self.head_idx = 0\n self.ram_idx = []\n\n self.sample_batch = {\n 'dqn': self.sample_batch_dqn,\n 'ircr': self.sample_batch_ircr,\n 'rrd': self.sample_batch_rrd,\n }[args.alg]\n\n def store_transition(self, info):\n if self.in_head:\n new_ep = Episode_FrameStack(info)\n self.ep.append(new_ep)\n self.ep[-1].insert(info)\n self.ram_idx.append(self.ep_counter)\n self.length += 1\n\n if self.length>self.buffer_size:\n del_len = self.ep[0].ep_len\n self.ep.pop(0)\n self.head_idx += 1\n self.length -= del_len\n self.ram_idx = self.ram_idx[del_len:]\n\n self.step_counter += 1\n self.in_head = info['done']\n if info['done']:\n self.ep_counter += 1\n\n def sample_batch_dqn(self, batch_size=-1):\n if batch_size==-1: batch_size = self.args.batch_size\n batch = dict(obs=[], obs_next=[], acts=[], rews=[], done=[])\n\n for i in range(batch_size):\n idx = self.ram_idx[np.random.randint(self.length)]-self.head_idx\n info = self.ep[idx].sample()\n for key in info.keys():\n batch[key].append(info[key])\n\n return batch\n\n def sample_batch_ircr(self, batch_size=-1):\n if batch_size==-1: batch_size = self.args.batch_size\n batch = dict(obs=[], obs_next=[], acts=[], rews=[], done=[])\n\n for i in range(batch_size):\n idx = self.ram_idx[np.random.randint(self.length)]-self.head_idx\n info = self.ep[idx].sample_ircr() # critical step of IRCR\n for key in info.keys():\n batch[key].append(info[key])\n\n return batch\n\n def sample_batch_rrd(self, batch_size=-1, rrd_batch_size=-1, rrd_sample_size=-1):\n if batch_size==-1: batch_size = self.args.batch_size\n if rrd_batch_size==-1: rrd_batch_size = self.args.rrd_batch_size\n if rrd_sample_size==-1: rrd_sample_size = self.args.rrd_sample_size\n batch = dict(obs=[], obs_next=[], acts=[], rews=[], done=[], rrd_obs=[], rrd_obs_next=[], rrd_acts=[], rrd_rews=[])\n if self.args.rrd_bias_correction:\n batch['rrd_var_coef'] = []\n\n for i in range(batch_size):\n idx = self.ram_idx[np.random.randint(self.length)]-self.head_idx\n info = self.ep[idx].sample()\n for key in info.keys():\n batch[key].append(info[key])\n\n for i in range(rrd_batch_size//rrd_sample_size):\n idx = self.ram_idx[np.random.randint(self.length)]-self.head_idx\n info = self.ep[idx].sample_rrd(rrd_sample_size, store_coef=self.args.rrd_bias_correction)\n for key in info.keys():\n batch[key].append(info[key])\n\n return batch\n" ]
[ [ "numpy.random.choice", "numpy.stack", "numpy.random.randint" ] ]
bptripp/it-cnn
[ "aa8b9fc86de7692a02b2fa51ed5398798a63204f" ]
[ "tuning/position.py" ]
[ "__author__ = 'bptripp'\n\nimport cPickle as pickle\nfrom scipy.optimize import curve_fit\nimport numpy as np\nimport matplotlib\nmatplotlib.rcParams['xtick.labelsize'] = 16\nmatplotlib.rcParams['ytick.labelsize'] = 16\nimport matplotlib.pyplot as plt\nfrom cnn_stimuli import get_image_file_list\nfrom alexnet import preprocess, load_net, load_vgg\nfrom orientation import smooth\n\noffsets = np.linspace(-75, 75, 150/5+1, dtype=int)\n\n\ndef get_centre_of_mass(y):\n \"\"\"\n Centre of mass of a tuning function as in Op De Beeck et al. (2000), including\n only points >50% of peak.\n \"\"\"\n ind = y > .5*np.max(y)\n masked = np.zeros(y.shape)\n masked[ind] = y[ind]\n return np.sum(offsets*masked) / np.sum(masked)\n\n\ndef get_width(y):\n \"\"\"\n Width of a tuning function as in Op De Beeck et al. (2000); distance between where it falls\n to 50% of peak on each side.\n \"\"\"\n max_ind = np.argmax(y)\n below_threshold = y < .5*np.max(y)\n # print(below_threshold)\n # low_and_left = np.logical_and(range(len(y)) < max_ind, below_threshold)\n # low_and_right = np.logical_and(range(len(y)) > max_ind, below_threshold)\n #\n # if max(low_and_left):\n # low_ind = np.max(np.where(low_and_left))\n # else:\n # low_ind = 0\n #\n # if max(low_and_right):\n # high_ind = np.min(np.where(low_and_right))\n # else:\n # high_ind = len(y)-1\n\n for i in range(max_ind, -1, -1):\n if below_threshold[i]:\n break\n if i == 0:\n low_ind = i\n else:\n low_ind = i + 1\n\n for i in range(max_ind, len(y)):\n if below_threshold[i]:\n break\n if i == len(y) - 1:\n high_ind = i\n else:\n high_ind = i - 1\n\n # print(y)\n # print('threshold: ' + str(.5*np.max(y)))\n # print(max_ind)\n # print(low_ind)\n # print(high_ind)\n # print('*******')\n #\n return offsets[high_ind] - offsets[low_ind]\n\n\nif False:\n # plot selectivity vs. position tolerance\n model = load_net()\n image_files = get_image_file_list('./images/positions/banana', 'png', with_path=True)\n im = preprocess(image_files)\n out = model.predict(im)\n\n\n with open('activity-fraction.pkl', 'rb') as file:\n (ind, selectivity) = pickle.load(file)\n\n # n = 674\n n = len(ind)\n print(n)\n\n object_responses = out[:,ind]\n plt.plot(offsets, object_responses)\n plt.show()\n\n # maxima = np.max(out, axis=0)\n # ind = (-maxima).argsort()[:n]\n # smoothed = smooth(object_responses, ind)\n\n def get_sd(x, y):\n x = np.array(x, dtype=float)\n mean = sum(x*y)/n\n sigma = (sum(y*(x-mean)**2)/n)**.5\n return sigma\n\n\n widths = np.zeros(n)\n good_selectivity = []\n good_widths = []\n for i in range(n):\n widths[i] = get_sd(offsets, object_responses[:,i])\n if np.mean(object_responses[:,i]) > .001:\n good_selectivity.append(selectivity[i])\n good_widths.append(widths[i])\n # if widths[i] < 1:\n # print(object_responses[:,i])\n\n with open('selectivity-vs-pos-tolerance.pkl', 'wb') as file:\n pickle.dump((good_selectivity, good_widths), file)\n\n print(np.corrcoef(good_selectivity, good_widths))\n plt.scatter(good_selectivity, good_widths)\n plt.show()\n\n\nif True:\n # alexnet 0: mean width: 146.208333333 std centres: 3.49089969478\n # alexnet 1: mean width: 138.875 std centres: 5.96841285709\n # alexnet 2: mean width: 112.583333333 std centres: 23.4025005388\n\n # vgg 0: mean width: 150.0 std centres: 1.12932654355\n # vgg 1: mean width: 150.0 std centres: 1.422815326\n # vgg 2: mean width: 141.916666667 std centres: 11.2126510706\n\n data = np.loadtxt(open(\"../data/op-de-beeck-6.csv\",\"rb\"),delimiter=\",\")\n x = data[:,0]\n\n it_std_rf_centre = np.std(x)\n it_mean_rf_size = 10.3 # from Op De Beeck\n\n # # strongest 30 responses ...\n # alexnet_std_rf_centre = np.array([23.4025005388, 5.96841285709, 3.49089969478])\n # alexnet_mean_rf_size = np.array([112.583333333, 138.875, 146.208333333])\n # vgg_std_rf_centre = np.array([11.2126510706, 1.422815326, 1.12932654355])\n # vgg_mean_rf_size = np.array([141.916666667, 150.0, 150.0])\n\n # first 30 strong responses ...\n alexnet_std_rf_centre = np.array([30.2666393886, 14.4771892017, 5.46262378919])\n alexnet_mean_rf_size = np.array([62.5833333333, 108.625, 140.333333333])\n vgg_std_rf_centre = np.array([22.2973512678, 3.0048651618, 2.26856624582])\n vgg_mean_rf_size = np.array([97.5, 147.916666667, 149.625])\n\n\n layers = [-2, -1, 0]\n plt.figure(figsize=(5,3.5))\n plt.plot(layers, alexnet_std_rf_centre / alexnet_mean_rf_size, 'o-')\n plt.plot(layers, vgg_std_rf_centre / vgg_mean_rf_size, 's-')\n plt.plot(layers, it_std_rf_centre/it_mean_rf_size*np.array([1, 1, 1]), 'k--')\n plt.xlabel('Distance from output (layers)', fontsize=16)\n plt.ylabel('STD of centers / mean width', fontsize=16)\n plt.xticks([-2,-1,0])\n plt.tight_layout()\n plt.savefig('../figures/position-variability.eps')\n plt.show()\n\n\nif False:\n # plot tuning curve examples\n\n remove_level = 0\n # model = load_net(weights_path='../weights/alexnet_weights.h5', remove_level=remove_level)\n # use_vgg = False\n model = load_vgg(weights_path='../weights/vgg16_weights.h5', remove_level=remove_level)\n use_vgg = True\n\n out = []\n image_files = get_image_file_list('./images/positions/staple', 'png', with_path=True)\n im = preprocess(image_files, use_vgg=use_vgg)\n out.append(model.predict(im))\n image_files = get_image_file_list('./images/positions/shoe', 'png', with_path=True)\n im = preprocess(image_files, use_vgg=use_vgg)\n out.append(model.predict(im))\n image_files = get_image_file_list('./images/positions/corolla', 'png', with_path=True)\n im = preprocess(image_files, use_vgg=use_vgg)\n out.append(model.predict(im))\n image_files = get_image_file_list('./images/positions/banana', 'png', with_path=True)\n im = preprocess(image_files, use_vgg=use_vgg)\n out.append(model.predict(im))\n out = np.array(out)\n # print(out.shape)\n\n # plot example tuning curves\n n = 30\n labels = ('staple', 'shoe', 'car', 'banana')\n plt.figure(figsize=(6,6))\n centres = []\n widths = []\n for i in range(4):\n object_responses = np.squeeze(out[i,:,:])\n # print(object_responses.shape)\n maxima = np.max(object_responses, axis=0)\n\n # ind = (-maxima).argsort()[:n]\n\n print('using first large n')\n ind = []\n j = 0\n while len(ind) < n:\n if maxima[j] > 2:\n ind.append(j)\n j = j + 1\n\n smoothed = smooth(object_responses, ind)\n\n for j in range(smoothed.shape[1]):\n centres.append(get_centre_of_mass(smoothed[:,j]))\n widths.append(get_width(smoothed[:,j]))\n\n # plt.plot(offsets, object_responses[:,ind])\n plt.subplot(2,2,i+1)\n\n if i >= 2:\n plt.xlabel('Offset (pixels)', fontsize=16)\n if i == 0 | i == 3:\n plt.ylabel('Response', fontsize=16)\n\n plt.title(labels[i], fontsize=16)\n\n plt.plot(offsets, smoothed)\n plt.xticks([-75,-25,25,75])\n plt.tight_layout()\n\n print(centres)\n print(widths)\n print('mean width: ' + str(np.mean(widths)) + ' std centres: ' + str(np.std(centres)))\n\n net = 'vgg16' if use_vgg else 'alexnet'\n plt.savefig('../figures/position-tuning-' + net + '-' + str(remove_level) + '.eps')\n\n plt.show()\n\n\ndef correlations(out):\n cc = np.corrcoef(out.T)\n result = []\n for i in range(cc.shape[0]):\n for j in range(i+1,cc.shape[1]):\n result.append(cc[i][j])\n return result\n\n\ndef invariant(out):\n return np.mean(correlations(out)) > .5\n\n\ndef clear_preference(out):\n # one size is clearly preferred in that it elicits a stronger response for each shape\n max_ind = np.argmax(out, axis=1)\n votes = np.zeros((5))\n for m in max_ind:\n votes[m] = votes[m] + 1\n\n return np.max(votes) >= 5\n # return np.max(np.abs(np.diff(max_ind))) == 0\n\n\nif False:\n # plot Schwartz et al. (1983) example\n U = np.loadtxt(open('../data/U.csv', 'rb'), delimiter=',')\n L = np.loadtxt(open('../data/L.csv', 'rb'), delimiter=',')\n C = np.loadtxt(open('../data/C.csv', 'rb'), delimiter=',')\n I = np.loadtxt(open('../data/I.csv', 'rb'), delimiter=',')\n F = np.loadtxt(open('../data/F.csv', 'rb'), delimiter=',')\n o = np.zeros((6,5))\n o[:,0] = U[:,1]\n o[:,1] = L[:,1]\n o[:,2] = C[:,1]\n o[:,3] = I[:,1]\n o[:,4] = F[:,1]\n\n plt.figure(figsize=(4,3.5))\n plt.plot(range(1,7), o)\n plt.tight_layout()\n plt.xlabel('Stimulus #', fontsize=18)\n plt.ylabel('Response (spikes/s)', fontsize=18)\n plt.tight_layout()\n plt.savefig('../figures/position-schwartz-example.eps')\n plt.show()\n\n print('Schwartz correlation ' + str(np.mean(correlations(o))))\n\n\nif False:\n # plot mean correlations and fraction with clear preference\n layers = [-2, -1, 0]\n\n plt.figure(figsize=(4,3.5))\n alexnet_correlations = [0.350468586188, 0.603050738337, 0.813774571373]\n vgg_correlations = [0.578857429221, 0.8000155323, 0.928289856194]\n schwartz_correlation = 0.791299618127\n plt.plot(layers, alexnet_correlations)\n plt.plot(layers, vgg_correlations)\n plt.plot(layers, schwartz_correlation*np.array([1, 1, 1]), 'k--')\n plt.xlabel('Distance from output (layers)', fontsize=16)\n plt.ylabel('Mean correlation', fontsize=16)\n plt.xticks([-2,-1,0])\n plt.ylim([0, 1])\n plt.tight_layout()\n plt.savefig('../figures/position-correlations.eps')\n plt.show()\n\n plt.figure(figsize=(4,3.5))\n alexnet_fractions = [0.21875, 0.125, 0.046875]\n vgg_fractions = [0.078125, 0.109375, 0.0]\n plt.plot(layers, alexnet_fractions)\n plt.plot(layers, vgg_fractions)\n plt.xlabel('Distance from output (layers)', fontsize=16)\n plt.ylabel('Fraction with preference', fontsize=16)\n plt.xticks([-2,-1,0])\n plt.ylim([0, 1])\n plt.tight_layout()\n plt.savefig('../figures/position-preferences.eps')\n plt.show()\n\n\nif False:\n use_vgg = True\n remove_level = 0\n if use_vgg:\n model = load_vgg(weights_path='../weights/vgg16_weights.h5', remove_level=remove_level)\n else:\n model = load_net(weights_path='../weights/alexnet_weights.h5', remove_level=remove_level)\n\n\n out = []\n stimuli = ['f1', 'f2', 'f3', 'f4', 'f5', 'f6']\n for stimulus in stimuli:\n image_files = get_image_file_list('./images/positions/'+stimulus, 'png', with_path=True)\n im = preprocess(image_files, use_vgg=use_vgg)\n out.append(model.predict(im))\n out = np.array(out)\n print(out.shape)\n\n # plot invariance with Schwartz stimuli\n i = 0\n c = 0\n n_invariant = 0\n n_clear = 0\n mean_correlations = []\n while c < 64:\n plt.subplot(8,8,c+1)\n o = out[:,:,i]\n if np.max(o) > 1:\n plt.plot(o)\n yl = plt.gca().get_ylim()\n\n if invariant(o):\n n_invariant = n_invariant + 1\n plt.text(4.3, yl[0] + (yl[1]-yl[0])*.8, 'c', fontsize=14)\n\n if clear_preference(o):\n n_clear = n_clear + 1\n plt.text(.1, yl[0] + (yl[1]-yl[0])*.8, 'p', fontsize=14)\n\n mean_correlations.append(np.mean(correlations(o)))\n\n plt.xticks([])\n plt.yticks([])\n c = c + 1\n i = i + 1\n\n print(mean_correlations)\n print('fraction with preference ' + str(float(n_clear)/64.))\n print('mean correlation ' + str(np.nanmean(mean_correlations)))\n\n plt.tight_layout()\n network_name = 'vgg' if use_vgg else 'alexnet'\n plt.savefig('../figures/position-invariance-schwartz-' + network_name + '-' + str(remove_level) + '.eps')\n plt.show()\n\n\n" ]
[ [ "numpy.linspace", "numpy.squeeze", "matplotlib.pyplot.plot", "numpy.max", "numpy.mean", "numpy.nanmean", "matplotlib.pyplot.gca", "matplotlib.pyplot.tight_layout", "numpy.std", "numpy.argmax", "matplotlib.pyplot.subplot", "matplotlib.pyplot.text", "numpy.zeros", "matplotlib.pyplot.figure", "matplotlib.pyplot.title", "matplotlib.pyplot.ylim", "matplotlib.pyplot.savefig", "numpy.corrcoef", "numpy.array", "matplotlib.pyplot.show", "numpy.sum", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.yticks", "matplotlib.pyplot.scatter", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.xticks" ] ]
FlyEgle/retinaface_pytorch
[ "79628fc5837b888b7f3e19c97a5ceabe860b0344" ]
[ "models/retinaface.py" ]
[ "import torch\r\nimport torch.nn as nn\r\nimport torchvision.models.detection.backbone_utils as backbone_utils\r\nimport torchvision.models._utils as _utils\r\nimport torch.nn.functional as F\r\nfrom collections import OrderedDict\r\n\r\nfrom models.net import MobileNetV1 as MobileNetV1\r\nfrom models.net import FPN as FPN\r\nfrom models.net import SSH as SSH\r\n\r\n\r\n\r\nclass ClassHead(nn.Module):\r\n def __init__(self,inchannels=512, num_anchors=3):\r\n super(ClassHead,self).__init__()\r\n self.num_anchors = num_anchors\r\n self.conv1x1 = nn.Conv2d(inchannels,self.num_anchors*2,kernel_size=(1,1),stride=1,padding=0)\r\n\r\n def forward(self,x):\r\n out = self.conv1x1(x)\r\n out = out.permute(0,2,3,1).contiguous()\r\n \r\n return out.view(out.shape[0], -1, 2)\r\n\r\nclass BboxHead(nn.Module):\r\n def __init__(self,inchannels=512,num_anchors=3):\r\n super(BboxHead,self).__init__()\r\n self.conv1x1 = nn.Conv2d(inchannels,num_anchors*4,kernel_size=(1,1),stride=1,padding=0)\r\n\r\n def forward(self,x):\r\n out = self.conv1x1(x)\r\n out = out.permute(0,2,3,1).contiguous()\r\n\r\n return out.view(out.shape[0], -1, 4)\r\n\r\nclass LandmarkHead(nn.Module):\r\n def __init__(self,inchannels=512,num_anchors=3):\r\n super(LandmarkHead,self).__init__()\r\n self.conv1x1 = nn.Conv2d(inchannels,num_anchors*10,kernel_size=(1,1),stride=1,padding=0)\r\n\r\n def forward(self,x):\r\n out = self.conv1x1(x)\r\n out = out.permute(0,2,3,1).contiguous()\r\n\r\n return out.view(out.shape[0], -1, 10)\r\n\r\nclass RetinaFace(nn.Module):\r\n def __init__(self, cfg = None, phase = 'train'):\r\n \"\"\"\r\n :param cfg: Network related settings.\r\n :param phase: train or test.\r\n \"\"\"\r\n super(RetinaFace,self).__init__()\r\n self.phase = phase\r\n backbone = None\r\n if cfg['name'] == 'mobilenet0.25':\r\n backbone = MobileNetV1()\r\n if cfg['pretrain']:\r\n checkpoint = torch.load(\"/data/remote/github_code/face_detection/Pytorch_Retinaface/weights/mobilenetV1X0.25_pretrain.tar\", map_location=torch.device('cpu'))\r\n from collections import OrderedDict\r\n new_state_dict = OrderedDict()\r\n for k, v in checkpoint['state_dict'].items():\r\n name = k[7:] # remove module.\r\n new_state_dict[name] = v\r\n # load params\r\n backbone.load_state_dict(new_state_dict)\r\n elif cfg['name'] == 'Resnet50':\r\n import torchvision.models as models\r\n backbone = models.resnet50(pretrained=cfg['pretrain'])\r\n\r\n self.body = _utils.IntermediateLayerGetter(backbone, cfg['return_layers'])\r\n in_channels_stage2 = cfg['in_channel']\r\n in_channels_list = [\r\n in_channels_stage2 * 2,\r\n in_channels_stage2 * 4,\r\n in_channels_stage2 * 8,\r\n ]\r\n out_channels = cfg['out_channel']\r\n self.fpn = FPN(in_channels_list,out_channels)\r\n self.ssh1 = SSH(out_channels, out_channels)\r\n self.ssh2 = SSH(out_channels, out_channels)\r\n self.ssh3 = SSH(out_channels, out_channels)\r\n\r\n self.ClassHead = self._make_class_head(fpn_num=3, inchannels=cfg['out_channel'])\r\n self.BboxHead = self._make_bbox_head(fpn_num=3, inchannels=cfg['out_channel'])\r\n self.LandmarkHead = self._make_landmark_head(fpn_num=3, inchannels=cfg['out_channel'])\r\n\r\n def _make_class_head(self,fpn_num=3,inchannels=64,anchor_num=2):\r\n classhead = nn.ModuleList()\r\n for i in range(fpn_num):\r\n classhead.append(ClassHead(inchannels,anchor_num))\r\n return classhead\r\n \r\n def _make_bbox_head(self,fpn_num=3,inchannels=64,anchor_num=2):\r\n bboxhead = nn.ModuleList()\r\n for i in range(fpn_num):\r\n bboxhead.append(BboxHead(inchannels,anchor_num))\r\n return bboxhead\r\n\r\n def _make_landmark_head(self,fpn_num=3,inchannels=64,anchor_num=2):\r\n landmarkhead = nn.ModuleList()\r\n for i in range(fpn_num):\r\n landmarkhead.append(LandmarkHead(inchannels,anchor_num))\r\n return landmarkhead\r\n\r\n def forward(self,inputs):\r\n out = self.body(inputs)\r\n\r\n # FPN\r\n fpn = self.fpn(out)\r\n\r\n # SSH\r\n feature1 = self.ssh1(fpn[0])\r\n feature2 = self.ssh2(fpn[1])\r\n feature3 = self.ssh3(fpn[2])\r\n features = [feature1, feature2, feature3]\r\n\r\n bbox_regressions = torch.cat([self.BboxHead[i](feature) for i, feature in enumerate(features)], dim=1)\r\n classifications = torch.cat([self.ClassHead[i](feature) for i, feature in enumerate(features)],dim=1)\r\n ldm_regressions = torch.cat([self.LandmarkHead[i](feature) for i, feature in enumerate(features)], dim=1)\r\n\r\n if self.phase == 'train':\r\n output = (bbox_regressions, classifications, ldm_regressions)\r\n else:\r\n output = (bbox_regressions, F.softmax(classifications, dim=-1), ldm_regressions)\r\n return output" ]
[ [ "torch.device", "torch.nn.ModuleList", "torch.nn.Conv2d", "torch.nn.functional.softmax" ] ]
aabiddanda/gdc
[ "5e999ba5363a45a9a4e8f2edec03e5082a3cdab5" ]
[ "pyEigenstrat.py" ]
[ "# Class for reading packed and unpacked Eigenstrat/Ancestrymap format files.\n# packedancestrymap format description by Nick Patterson below:\n#\n################################################################################\n#\n# usage:\n#\n# files named root.{ind,snp,geno} either packed or unpacked\n#\n# > data=pyEigenstrat.load(\"root\", [pops=[], inds=[], snps=[]]) \n# to load the data - with optionally including only certain populations\n# individuals or snps\n#\n# > genotypes=data.geno()\n# to load all the data or iterate line by line (snp by snp) without loading\n# the whole file into memory:\n# > for snp in data: print(snp)\n# \n################################################################################\n# packedancestrymap format\n#\n#\n# nind # individuals (samples) \n# nsnp # snps \n#\n# 1. \n# record len (rlen) \n#\n# Here is a C-fragment \n# y = (double) (nind * 2) / (8 * (double) sizeof (char)) ;\n# rlen = lround(ceil(y)) ;\n# rlen = MAX(rlen, 48) ;\n#\n# The genotype file will contain 1 header record of rlen bytes and then \n# nsnp records of genotype data. \n#\n# a) Header record \n#\n# sprintf(hdr, \"GENO %7d %7d %x %x\", nind, nsnp, ihash, shash) \n# wwhere ihash and shash are hash values whose calculation we don't describe hhere. \n#\n# b) data records \n# genotype values are packed left to right across the record. \n# Order \n# byte 1: (first sample, second sample, ...\n# byte 2: (fourth sample ... \n#\n# Values 00 = 0 \n# 01 = 1\n# 10 = 2 \n# 11 = 3\n# And the last byte is padded with 11 if necessary\n#\n# Nick 7/23\n################################################################################\n# imports\n\nfrom __future__ import division\nimport numpy as np \n\n################################################################################\n\n# datatype definitions\ndt_snp1=np.dtype([(\"ID\", np.str_, 16), (\"CHR\", np.str_, 2), (\"POS\", np.int32)])\ndt_snp2=np.dtype([(\"ID\", np.str_, 16), (\"CHR\", np.str_, 2), (\"POS\", np.int32), \n (\"REF\", np.str_, 1), (\"ALT\", np.str_, 1)])\ndt_ind=np.dtype([(\"IND\", np.str_, 32), (\"POP\", np.str_, 32)])\n\n###########################################################################\n\ndef load(file_root, pops=None, inds=None, exclude_inds=None, snps=None):\n \"\"\"\n Investigate the geno file, and return either a packed\n or unpacked eigenstrat object as appropriate\n \"\"\"\n geno_file=open(file_root+\".geno\", \"rb\")\n head=geno_file.read(4)\n geno_file.close()\n if head == b\"GENO\":\n return packed_data(file_root, pops, inds, exclude_inds, snps)\n else:\n return unpacked_data(file_root, pops, inds, exclude_inds, snps)\n\n###########################################################################\n\nclass data():\n \"\"\"\n Base class. \n \"\"\"\n\n def __init__(self, file_root, pops=None, inds=None, exclude_inds=None, snps=None):\n \"\"\"\n We expect to see files file_root.{snp,ind,geno}. the .geno\n file might be either packed or unpacked. \n \"\"\"\n\n snp,snp_include=load_snp_file(file_root, snps)\n ind,ind_include=load_ind_file(file_root, pops, inds, exclude_inds)\n\n # Snp and ind data\n self.snp=snp\n self.ind=ind\n self._file_root=file_root\n self._snp_include=snp_include\n self._ind_include=ind_include\n\n # Genotypes might be set later, geno file used for iterator. \n self._geno=None\n self._geno_file=self.open_geno_file(file_root)\n # Which snp are we on. \n self._isnp=0\n \n def __iter__(self):\n return self\n\n # Interface follows: \n \n def open_geno_file(self, file_root):\n \"\"\"\n Open the genotype file. \n \"\"\"\n raise NotImplementedError(\"Don't call the base class\")\n\n def geno(self):\n \"\"\"\n If this is called, load the whole genotype matrix, and return it\n buffer it in case we want to load it again. \n \"\"\"\n raise NotImplementedError(\"Don't call the base class\")\n\n def next(self):\n raise NotImplementedError(\"Don't call the base class\")\n\n\n###########################################################################\n# END CLASS\n\nclass unpacked_data(data):\n \"\"\"\n Read unpacked data \n \"\"\"\n \n def open_geno_file(self, file_root):\n \"\"\"\n Open the genotype file. \n \"\"\"\n return open(file_root+\".geno\", \"r\")\n\n def geno(self):\n \"\"\"\n If this is called, load the whole genotype matrix, and return it\n buffer it in case we want to load it again. \n \"\"\"\n if self._geno is not None:\n return self._geno\n\n geno=np.genfromtxt(self._file_root+\".geno\", dtype='i1', delimiter=1, \n usecols=np.where(self._ind_include)[0])\n\n # If we only loaded one individual, don't drop the second dimension.\n if len(geno.shape)==1: geno.shape=(geno.shape[0],1)\n \n geno=geno[self._snp_include,:]\n self._geno=geno\n return geno \n \n # This is the key here ..\n def __next__(self):\n while True:\n line = self._geno_file.readline()\n self._isnp += 1\n if self._snp_include[self._isnp-1]:\n break\n \n gt = np.array(list(line[:-1]), dtype='i1')\n gt = gt[self._ind_include]\n return gt\n\n###########################################################################\n# END CLASS\n\nclass packed_data(data):\n \"\"\"\n Read packed data \n \"\"\"\n \n def open_geno_file(self, file_root):\n \"\"\"\n Open the genotype file (in binary mode). Read the header. \n \"\"\"\n geno_file=open(file_root+\".geno\", \"rb\")\n header=geno_file.read(20) #Ignoring hashes\n if header.split()[0] != b\"GENO\":\n raise Exception(\"This does not look like a packedancestrymap file\")\n nind,nsnp=[int(x) for x in header.split()[1:3]] \n\n self._nind=nind\n self._nsnp=nsnp\n self._rlen=max(48,int(np.ceil(nind*2/8))) #assuming sizeof(char)=1 here\n geno_file.seek(self._rlen) #set pointer to start of genotypes\n return geno_file\n \n def geno(self):\n \"\"\"\n If this is called, load the whole genotype matrix, and return it\n buffer it in case we want to load it again. \n \"\"\"\n if self._geno is not None:\n return self._geno\n\n geno=np.fromfile(self._file_root+\".geno\", dtype='uint8')[self._rlen:] #without header\n geno.shape=(self._nsnp, self._rlen)\n geno=np.unpackbits(geno, axis=1)[:,:(2*self._nind)]\n geno=2*geno[:,::2]+geno[:,1::2]\n geno=geno[:,self._ind_include]\n geno[geno==3]=9 #set missing values\n \n # If we only loaded one individual, don't drop the second dimension.\n if len(geno.shape)==1: geno.shape=(geno.shape[0],1)\n \n geno=geno[self._snp_include,:]\n self._geno=geno\n return geno \n\n def __next__(self):\n\n while True:\n if self._isnp >= self._nsnp:\n raise StopIteration()\n record=self._geno_file.read(self._rlen)\n self._isnp+=1\n if self._snp_include[self._isnp-1]:\n break\n\n gt_bits=np.unpackbits(np.fromstring(record, dtype='uint8'))\n gt=2*gt_bits[::2]+gt_bits[1::2]\n gt=gt[:self._nind][self._ind_include]\n gt[gt==3]=9 #set missing values\n \n return gt\n\n###########################################################################\n# END CLASS\n\ndef load_snp_file(file_root, snps=None):\n \"\"\"\n Load a .snp file into the right format. \n \"\"\"\n snp_file=open(file_root+\".snp\", \"r\")\n line=snp_file.readline()\n bits=line.split()\n snpdt=dt_snp1 # does the snp file have the alleles in?\n snpcol=(0,1,3)\n if len(bits) not in [4,6]:\n raise Exception(\"SNP file should have either 4 or 6 columns\")\n elif len(bits)==6:\n snpdt=dt_snp2\n snpcol=(0,1,3,4,5)\n\n snp_file.seek(0)\n snp=np.genfromtxt(snp_file, dtype=snpdt, usecols=snpcol)\n snp_file.close()\n\n include=np.ones(len(np.atleast_1d(snp)), dtype=bool)\n if snps is not None:\n include=np.in1d(snp[\"ID\"], snps)\n snp=snp[include]\n \n return snp,include\n\n###########################################################################\n\ndef load_ind_file(file_root, pops=None, inds=None, exclude_inds=None):\n \"\"\"\n Load a .ind file, restricting to the union of specified\n individuals and individuals in the specified populations. \n \"\"\"\n ind=np.genfromtxt(file_root+\".ind\", dtype=dt_ind, usecols=(0,2)) # ignore sex\n\n include=np.ones(len(ind), dtype=bool)\n if pops or inds or exclude_inds:\n include=np.zeros(len(ind), dtype=bool)\n if pops:\n include=np.in1d(ind[\"POP\"], pops)\n if inds:\n include=np.logical_or(include, np.in1d(ind[\"IND\"], inds))\n if exclude_inds:\n include=np.logical_and(include, ~np.in1d(ind[\"IND\"], exclude_inds))\n \n ind=ind[include]\n return ind,include\n\n###########################################################################\n\n" ]
[ [ "numpy.fromfile", "numpy.in1d", "numpy.dtype", "numpy.genfromtxt", "numpy.atleast_1d", "numpy.ceil", "numpy.fromstring", "numpy.where", "numpy.unpackbits" ] ]
CVUT-FS-12110/Python-for-scientific-computation-and-control
[ "b2ebae9eb3ded5c28238e39a0cac9e23eb1d25d1" ]
[ "courses/E375004/control/utils.py" ]
[ "\"\"\"\nUtility functions for pygame simulator ('simulator.py')\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef solver(x, v_x, c, m, f, delta_t, x_max, x_img_size, padding = 0):\n \"\"\"\n Euler solver of dynamical system:\n\n m*x\" + c*x' = f\n\n state space substitution:\n\n x1 = x\n x2 = v_x\n\n x1(k+1) = x1(k) + x2(k) * delta_t\n x2(k+1) = x2(k) + (-c/m * x2(k) + f(k)/m) * delta_t\n\n :param x: x position of simulated system [px]\n :param v_x: velocity in x direction of simulated system [px/s]\n :param c: damping parameter [kg/s]\n :param m: weight [kg]\n :param f: external force [N]\n :param delta_t: time step of the simulation [s]\n :param x_max: x boundary of pygame screen [px]\n :param x_img_size: image width [px]\n :param padding: pygame screen padding [px]\n :return: new x position and new x velocity\n \"\"\"\n\n x_new = x + v_x*delta_t # new x position calculation\n v_new = v_x - (c / m) * v_x * delta_t + (f / m) * delta_t # new x velocity calculation\n\n # check boundaries. If the system is at the boundary then: x(k+1) = x(k) and new velocity is 0\n if x_new + x_img_size//2 +padding > x_max:\n return x, 0\n elif x_new + x_img_size//2 < 0:\n return x, 0\n else:\n return x_new, v_new\n\ndef controller():\n # TODO your own controller\n return 0\n\ndef update_line(plot, new_data, time_step):\n plot.set_xdata(np.append(plot.get_xdata(), new_data[0]))\n plot.set_ydata(np.append(plot.get_ydata(), new_data[1]))\n plt.draw() # draw new data\n plt.pause(time_step) # update graph every 0.5 second\n\n\n" ]
[ [ "matplotlib.pyplot.pause", "matplotlib.pyplot.draw" ] ]
smbct/LOLH
[ "a9b608b500c83731db2c7dcb70e08cf9a2a94fe0" ]
[ "examples/multi_rules_old/show_solution.py" ]
[ "#!/usr/bin/python\n\nimport numpy as np\nimport pandas as pd\n\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nfrom matplotlib import colors as mcolors\n\nimport sys\nsys.path.append('../../python')\n\nfrom instance import Instance\nfrom solver import Solver\nimport visualizer\nimport histogram\n\n\nprint('\\n')\nprint('Classification of the NK cells')\nprint('\\n')\n\n# read the discrete matrix\nfilename = '../../dataset/Imagine/discrete_matrix.csv'\ndf_discrete = pd.read_csv(filename, index_col=0)\n# print(df_discrete.head())\n\n\n# read the normalized matrix\n# filename = '../../dataset/Imagine/normalized_matrix.csv'\n# df_normalized = pd.read_csv(filename, index_col=0).T\n# print(df_normalized.head())\n\n# read the UMAP coordinates\nfilename = '../../dataset/Imagine/umap_coordinates.csv'\ndf_umap = pd.read_csv(filename, index_col = 0)\n\n# read the seurat cell types\ndf_celltypes = pd.read_csv('../../dataset/Imagine/cell_types.csv', index_col = 0)\ndf_celltypes.rename(columns={'cellType_final': 'Label'}, inplace=True)\n# print(df_celltypes.head())\n\ndf_macrotypes = pd.read_csv('../../dataset/Imagine/cell_types_macro.csv', index_col = 0)\ndf_macrotypes.rename(columns={'cellType_macro': 'Label'}, inplace=True)\n\n# initialization of the classification instance: classification of the NK cells\ncelltype = 'NK'\ncelltype = 'T'\ninstance = Instance.create_cluster_instance(df_discrete.copy(deep=False), df_macrotypes, celltype)\n\nprint('Classification of the NK cells')\nprint('- ', instance.n_positives(), ' positive examples')\nprint('- ', instance.n_negatives(), ' negative examples')\nprint('\\n')\n\n# load the solution\nfile = open('T_sol.txt', 'r')\ncontent = file.read().split(' ')\ncontent = content[1:]\nsol_values = [int(elt) for elt in content]\nprint(sol_values)\n\nsolver = Solver(instance)\n\n# create a rule from a threshold\nsorted_atoms, scores = solver.select_best_atoms_fast()\nthreshold = 0.5\n\npred_col = ['blue', 'green', 'orange']\ncolors = {}\nfor elt in df_umap.index:\n colors[elt] = 'red'\nfor ind in range(len(sol_values)):\n val = sol_values[ind]\n colors[instance._pos_samples[ind]] = pred_col[val]\n\nfig, ax = plt.subplots()\nax.scatter(df_umap['UMAP_1'][:], df_umap['UMAP_2'][:], marker='o', s=1, c=[colors[index] for index in df_umap.index])\nax.set_aspect((ax.get_xlim()[1] - ax.get_xlim()[0])/(ax.get_ylim()[1] - ax.get_ylim()[0]))\n\nplt.show()\n" ]
[ [ "pandas.read_csv", "matplotlib.pyplot.subplots", "matplotlib.pyplot.show" ] ]
stjordanis/quantumflow
[ "bf965f0ca70cd69b387f9ca8407ab38da955e925" ]
[ "quantumflow/decompositions.py" ]
[ "\n# Copyright 2016-2018, Rigetti Computing\n#\n# This source code is licensed under the Apache License, Version 2.0 found in\n# the LICENSE.txt file in the root directory of this source tree.\n\n\"\"\"\nQuantumFlow Gate Decompositions\n\"\"\"\n\nfrom typing import Sequence, Tuple\nimport itertools\n\nimport numpy as np\nfrom numpy import pi\n\nfrom .qubits import asarray\nfrom .config import TOLERANCE\nfrom .gates import Gate\nfrom .measures import gates_close\nfrom .stdgates import RN, CANONICAL, TZ, TY\nfrom .circuits import Circuit\n\n__all__ = ['bloch_decomposition',\n 'zyz_decomposition',\n 'kronecker_decomposition',\n 'canonical_decomposition',\n 'canonical_coords']\n\n\ndef bloch_decomposition(gate: Gate) -> Circuit:\n \"\"\"\n Converts a 1-qubit gate into a RN gate, a 1-qubit rotation of angle theta\n about axis (nx, ny, nz) in the Bloch sphere.\n\n Returns:\n A Circuit containing a single RN gate\n \"\"\"\n if gate.qubit_nb != 1:\n raise ValueError('Expected 1-qubit gate')\n\n U = asarray(gate.asoperator())\n U /= np.linalg.det(U) ** (1/2)\n\n nx = - U[0, 1].imag\n ny = - U[0, 1].real\n nz = - U[0, 0].imag\n N = np.sqrt(nx**2 + ny**2 + nz**2)\n if N == 0: # Identity\n nx, ny, nz = 1, 1, 1\n else:\n nx /= N\n ny /= N\n nz /= N\n sin_halftheta = N\n cos_halftheta = U[0, 0].real\n theta = 2 * np.arctan2(sin_halftheta, cos_halftheta)\n\n # We return a Circuit (rather than just a gate) to keep the\n # interface of decomposition routines uniform.\n return Circuit([RN(theta, nx, ny, nz, *gate.qubits)])\n\n\n# DOCME TESTME\ndef zyz_decomposition(gate: Gate) -> Circuit:\n \"\"\"\n Returns the Euler Z-Y-Z decomposition of a local 1-qubit gate.\n \"\"\"\n if gate.qubit_nb != 1:\n raise ValueError('Expected 1-qubit gate')\n\n q, = gate.qubits\n\n U = asarray(gate.asoperator())\n U /= np.linalg.det(U) ** (1/2) # SU(2)\n\n if abs(U[0, 0]) > abs(U[1, 0]):\n theta1 = 2 * np.arccos(min(abs(U[0, 0]), 1))\n else:\n theta1 = 2 * np.arcsin(min(abs(U[1, 0]), 1))\n\n cos_halftheta1 = np.cos(theta1/2)\n if not np.isclose(cos_halftheta1, 0.0):\n phase = U[1, 1] / cos_halftheta1\n theta0_plus_theta2 = 2 * np.arctan2(np.imag(phase), np.real(phase))\n else:\n theta0_plus_theta2 = 0.0\n\n sin_halftheta1 = np.sin(theta1/2)\n if not np.isclose(sin_halftheta1, 0.0):\n phase = U[1, 0] / sin_halftheta1\n theta0_sub_theta2 = 2 * np.arctan2(np.imag(phase), np.real(phase))\n else:\n theta0_sub_theta2 = 0.0\n\n theta0 = (theta0_plus_theta2 + theta0_sub_theta2) / 2\n theta2 = (theta0_plus_theta2 - theta0_sub_theta2) / 2\n\n t0 = theta0/np.pi\n t1 = theta1/np.pi\n t2 = theta2/np.pi\n\n circ1 = Circuit()\n circ1 += TZ(t2, q)\n circ1 += TY(t1, q)\n circ1 += TZ(t0, q)\n\n return circ1\n\n\ndef kronecker_decomposition(gate: Gate) -> Circuit:\n \"\"\"\n Decompose a 2-qubit unitary composed of two 1-qubit local gates.\n\n Uses the \"Nearest Kronecker Product\" algorithm. Will give erratic\n results if the gate is not the direct product of two 1-qubit gates.\n \"\"\"\n # An alternative approach would be to take partial traces, but\n # this approach appears to be more robust.\n\n if gate.qubit_nb != 2:\n raise ValueError('Expected 2-qubit gate')\n\n U = asarray(gate.asoperator())\n rank = 2**gate.qubit_nb\n U /= np.linalg.det(U) ** (1/rank)\n\n R = np.stack([U[0:2, 0:2].reshape(4),\n U[0:2, 2:4].reshape(4),\n U[2:4, 0:2].reshape(4),\n U[2:4, 2:4].reshape(4)])\n u, s, vh = np.linalg.svd(R)\n v = vh.transpose()\n A = (np.sqrt(s[0]) * u[:, 0]).reshape(2, 2)\n B = (np.sqrt(s[0]) * v[:, 0]).reshape(2, 2)\n\n q0, q1 = gate.qubits\n g0 = Gate(A, qubits=[q0])\n g1 = Gate(B, qubits=[q1])\n\n if not gates_close(gate, Circuit([g0, g1]).asgate()):\n raise ValueError(\"Gate cannot be decomposed into two 1-qubit gates\")\n\n circ = Circuit()\n circ += zyz_decomposition(g0)\n circ += zyz_decomposition(g1)\n\n assert gates_close(gate, circ.asgate()) # Sanity check\n\n return circ\n\n\ndef canonical_coords(gate: Gate) -> Sequence[float]:\n \"\"\"Returns the canonical coordinates of a 2-qubit gate\"\"\"\n circ = canonical_decomposition(gate)\n gate = circ.elements[6] # type: ignore\n params = [gate.params[key] for key in ('tx', 'ty', 'tz')]\n return params\n\n\ndef canonical_decomposition(gate: Gate) -> Circuit:\n \"\"\"Decompose a 2-qubit gate by removing local 1-qubit gates to leave\n the non-local canonical two-qubit gate. [1]_ [2]_ [3]_ [4]_\n\n Returns: A Circuit of 5 gates: two initial 1-qubit gates; a CANONICAL\n gate, with coordinates in the Weyl chamber; two final 1-qubit gates\n\n The canonical coordinates can be found in circ.elements[2].params\n\n More or less follows the algorithm outlined in [2]_.\n\n .. [1] A geometric theory of non-local two-qubit operations, J. Zhang,\n J. Vala, K. B. Whaley, S. Sastry quant-ph/0291120\n .. [2] An analytical decomposition protocol for optimal implementation of\n two-qubit entangling gates. M. Blaauboer, R.L. de Visser,\n cond-mat/0609750\n .. [3] Metric structure of two-qubit gates, perfect entangles and quantum\n control, P. Watts, M. O'Conner, J. Vala, Entropy (2013)\n .. [4] Constructive Quantum Shannon Decomposition from Cartan Involutions\n B. Drury, P. Love, arXiv:0806.4015\n \"\"\"\n\n # Implementation note: The canonical decomposition is easy. Constraining\n # canonical coordinates to the Weyl chamber is easy. But doing the\n # canonical decomposition with the canonical gate in the Weyl chamber\n # proved to be surprisingly tricky.\n\n # Unitary transform to Magic Basis of Bell states\n Q = np.asarray([[1, 0, 0, 1j],\n [0, 1j, 1, 0],\n [0, 1j, -1, 0],\n [1, 0, 0, -1j]]) / np.sqrt(2)\n Q_H = Q.conj().T\n\n if gate.qubit_nb != 2:\n raise ValueError('Expected 2-qubit gate')\n\n U = asarray(gate.asoperator())\n rank = 2**gate.qubit_nb\n U /= np.linalg.det(U) ** (1/rank) # U is in SU(4) so det U = 1\n\n U_mb = Q_H @ U @ Q # Transform gate to Magic Basis [1, (eq. 17, 18)]\n M = U_mb.transpose() @ U_mb # Construct M matrix [1, (eq. 22)]\n\n # Diagonalize symmetric complex matrix\n eigvals, eigvecs = _eig_complex_symmetric(M)\n\n lambdas = np.sqrt(eigvals) # Eigenvalues of F\n # Lambdas only fixed up to a sign. So make sure det F = 1 as it should\n det_F = np.prod(lambdas)\n if det_F.real < 0:\n lambdas[0] *= -1\n\n coords, signs, perm = _constrain_to_weyl(lambdas)\n\n # Construct local and canonical gates in magic basis\n lambdas = (lambdas*signs)[perm]\n O2 = (np.diag(signs) @ eigvecs.transpose())[perm]\n F = np.diag(lambdas)\n O1 = U_mb @ O2.transpose() @ F.conj()\n\n # Sanity check: Make sure O1 and O2 are orthogonal\n assert np.allclose(np.eye(4), O2.transpose() @ O2) # Sanity check\n assert np.allclose(np.eye(4), O1.transpose() @ O1) # Sanity check\n\n # Sometimes O1 & O2 end up with det = -1, instead of +1 as they should.\n # We can commute a diagonal matrix through F to fix this up.\n neg = np.diag([-1, 1, 1, 1])\n if np.linalg.det(O2).real < 0:\n O2 = neg @ O2\n O1 = O1 @ neg\n\n # Transform gates back from magic basis\n K1 = Q @ O1 @ Q_H\n A = Q @ F @ Q_H\n K2 = Q @ O2 @ Q_H\n\n assert gates_close(Gate(U), Gate(K1 @ A @ K2)) # Sanity check\n canon = CANONICAL(coords[0], coords[1], coords[2], 0, 1)\n\n # Sanity check\n assert gates_close(Gate(A, qubits=gate.qubits), canon, tolerance=1e-4)\n\n # Decompose local gates into the two component 1-qubit gates\n gateK1 = Gate(K1, qubits=gate.qubits)\n circK1 = kronecker_decomposition(gateK1)\n assert gates_close(gateK1, circK1.asgate()) # Sanity check\n\n gateK2 = Gate(K2, qubits=gate.qubits)\n circK2 = kronecker_decomposition(gateK2)\n assert gates_close(gateK2, circK2.asgate()) # Sanity check\n\n # Build and return circuit\n circ = Circuit()\n circ += circK2\n circ += canon\n circ += circK1\n\n return circ\n\n\ndef _eig_complex_symmetric(M: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Diagonalize a complex symmetric matrix. The eigenvalues are\n complex, and the eigenvectors form an orthogonal matrix.\n\n Returns:\n eigenvalues, eigenvectors\n \"\"\"\n if not np.allclose(M, M.transpose()):\n raise np.linalg.LinAlgError('Not a symmetric matrix')\n\n # The matrix of eigenvectors should be orthogonal.\n # But the standard 'eig' method will fail to return an orthogonal\n # eigenvector matrix when the eigenvalues are degenerate. However,\n # both the real and\n # imaginary part of M must be symmetric with the same orthogonal\n # matrix of eigenvectors. But either the real or imaginary part could\n # vanish. So we use a randomized algorithm where we diagonalize a\n # random linear combination of real and imaginary parts to find the\n # eigenvectors, taking advantage of the 'eigh' subroutine for\n # diagonalizing symmetric matrices.\n # This can fail if we're very unlucky with our random coefficient, so we\n # give the algorithm a few chances to succeed.\n\n # Empirically, never seems to fail on randomly sampled complex\n # symmetric 4x4 matrices.\n # If failure rate is less than 1 in a million, then 16 rounds\n # will have overall failure rate less than 1 in a googol.\n # However, cannot (yet) guarantee that there aren't special cases\n # which have much higher failure rates.\n\n # GEC 2018\n\n max_attempts = 16\n for _ in range(max_attempts):\n c = np.random.uniform(0, 1)\n matrix = c * M.real + (1-c) * M.imag\n _, eigvecs = np.linalg.eigh(matrix)\n eigvecs = np.array(eigvecs, dtype=complex)\n eigvals = np.diag(eigvecs.transpose() @ M @ eigvecs)\n\n # Finish if we got a correct answer.\n reconstructed = eigvecs @ np.diag(eigvals) @ eigvecs.transpose()\n if np.allclose(M, reconstructed):\n return eigvals, eigvecs\n\n # Should never happen. Hopefully.\n raise np.linalg.LinAlgError(\n 'Cannot diagonalize complex symmetric matrix.') # pragma: no cover\n\n\ndef _lambdas_to_coords(lambdas: Sequence[float]) -> np.ndarray:\n # [2, eq.11], but using [1]s coordinates.\n l1, l2, _, l4 = lambdas\n c1 = np.real(1j * np.log(l1 * l2))\n c2 = np.real(1j * np.log(l2 * l4))\n c3 = np.real(1j * np.log(l1 * l4))\n coords = np.asarray((c1, c2, c3))/pi\n\n coords[np.abs(coords-1) < TOLERANCE] = -1\n if all(coords < 0):\n coords += 1\n\n # If we're close to the boundary, floating point errors can conspire\n # to make it seem that we're never on the inside\n # Fix: If near boundary, reset to boundary\n\n # Left\n if np.abs(coords[0] - coords[1]) < TOLERANCE:\n coords[1] = coords[0]\n\n # Front\n if np.abs(coords[1] - coords[2]) < TOLERANCE:\n coords[2] = coords[1]\n\n # Right\n if np.abs(coords[0]-coords[1]-1/2) < TOLERANCE:\n coords[1] = coords[0]-1/2\n\n # Base\n coords[np.abs(coords) < TOLERANCE] = 0\n\n return coords\n\n\ndef _constrain_to_weyl(lambdas: Sequence[float]) \\\n -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n\n for permutation in itertools.permutations(range(4)):\n for signs in ([1, 1, 1, 1], [1, 1, -1, -1],\n [-1, 1, -1, 1], [1, -1, -1, 1]):\n signed_lambdas = lambdas * np.asarray(signs)\n perm = list(permutation)\n lambas_perm = signed_lambdas[perm]\n\n coords = _lambdas_to_coords(lambas_perm)\n\n if _in_weyl(*coords):\n return coords, np.asarray(signs), perm\n\n # Should never get here\n assert False # pragma: no cover\n return None, None, None # pragma: no cover\n\n\ndef _in_weyl(tx: float, ty: float, tz: float) -> bool:\n # Note 'tz>0' in second term. This takes care of symmetry across base\n # when tz==0\n return (1/2 >= tx >= ty >= tz >= 0) or (1/2 >= (1-tx) >= ty >= tz > 0)\n" ]
[ [ "numpy.diag", "numpy.imag", "numpy.sqrt", "numpy.asarray", "numpy.arctan2", "numpy.linalg.LinAlgError", "numpy.linalg.svd", "numpy.allclose", "numpy.eye", "numpy.sin", "numpy.linalg.det", "numpy.real", "numpy.isclose", "numpy.log", "numpy.linalg.eigh", "numpy.array", "numpy.abs", "numpy.cos", "numpy.prod", "numpy.random.uniform" ] ]
tzamalisp/tasks_opensource
[ "73dee9c506c388d6c5b8d65e0d8053320978ce81" ]
[ "tasks/utils/files_saving.py" ]
[ "import os\nimport matplotlib.pyplot as plt\n# plot pretty figures\nimport matplotlib as mpl\nmpl.rc('axes', labelsize=14)\nmpl.rc('xtick', labelsize=12)\nmpl.rc('ytick', labelsize=12)\n\n\ndef save_fig(saving_path, fig_id, tight_layout=True, fig_extension=\"png\", resolution=300):\n path = os.path.join(saving_path, fig_id + \".\" + fig_extension)\n print(\"Saving figure\", fig_id)\n if tight_layout:\n plt.tight_layout()\n plt.savefig(path, format=fig_extension, dpi=resolution)\n" ]
[ [ "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.savefig", "matplotlib.rc" ] ]
HDNua/kwin
[ "33ce866c2b37faa1a5940354a0e5b3919e5eecc8" ]
[ "choi/object_search/video_detection.py" ]
[ "\"\"\"\n video recognizer\n\n Developer: HeeJun Choi, DoYoung Han\n Version: 0.6.5\n Release Date: 2017-01-09\n\"\"\"\n\nimport numpy as np\nimport cv2\nimport tensorflow as tf\nimport sys\nfrom kwin import *\nimport time\nimport dataset\n\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\n########################################################################\n# 프로그램을 초기화합니다.\n########################################################################\n# 변수를 선언합니다.\ntest_video_path = dataset.test_data_path(\"testing2.mp4\")\n\nsess = None # 세션에 대한 참조입니다.\nsoftmax_tensor = None # 세션의 그래프 구조에 대한 참조입니다.\n\ngrayscale = False # 프레임을 회색으로 변환하려면 grayscale을 True로 설정합니다.\n\nmoved = False # 프레임과 출력 창의 시작 위치를 지정하려면 값을 False로 둡니다.\nFRAME_X_ORIGIN = 0\nFRAME_Y_ORIGIN = 0\nBOARD_X_ORIGIN = FRAME_X_ORIGIN + 960\nBOARD_Y_ORIGIN = 0\n\nOUT_X_ORIGIN = 50 # x 시작점입니다.\nOUT_Y_ORIGIN = 50 # y 시작점입니다.\nOUT_LINE_SPACE = 50 # 출력의 줄 간격입니다.\nOUT_FONT_SIZE = 1.5 # 폰트 크기입니다\n\n# 출력 창을 위한 흰색 배경 이미지입니다.\nwhite = cv2.imread(\"white-640x480.jpg\")\nif white is None:\n print(\"cannot read white\")\n sys.exit()\n\n# 웹 캠을 엽니다.\n# video = cv2.VideoCapture(0)\nvideo = cv2.VideoCapture(test_video_path)\n\nframe_list = []\nframe_time = []\n\n#\ntrain_label_path = dataset.train_path(\"video_detection/train_labels.txt\")\ntrain_graph_path = dataset.train_path(\"video_detection/train_graph.pb\")\n\n# 레이블 파일을 가져오고(GFile) 모든 줄의 끝에 있는 캐리지 리턴을 제거합니다(rstrip).\n## label_lines = [line.rstrip() for line in tf.gfile.GFile(\"./train/train_labels.txt\")]\nlabel_lines = [line.rstrip() for line in tf.gfile.GFile(train_label_path)]\n\n# 그래프 파일로부터 그래프를 생성합니다.\n## with tf.gfile.FastGFile(\"./train/train_graph.pb\", 'rb') as f:\nwith tf.gfile.FastGFile(train_graph_path, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(graph_def, name='')\n\n\n# 매 프레임을 세는 카운트 및 1프레임 당 시간은 0.041666666666666666667초 ( 1/24초 )\nprint (label_lines)\nobj = input(\"찾고자 하는 물체 입력 : \")\ncount = 1\ntime = 0.04166666666666666666666666666667\n########################################################################\n# 파일이 열려있는 동안 작업을 진행합니다.\n# !!!!! IMPORTANT !!!!\n# Python에서 None과의 비교를 위해 != 연산자를 사용하지 마십시오.\nwhile video is not None and video.isOpened():\n # 영상 파일로부터 데이터를 가져옵니다.\n ret, frame = video.read()\n count = count + 1\n if count % 4 != 0:\n continue\n\n if frame is None:\n break\n\n # 화면에 출력할 텍스트의 리스트를 정의합니다.\n text_list = []\n\n # 프레임을 회색으로 변환하려면 grayscale을 True로 설정합니다.\n if grayscale is True:\n # 프레임을 흑백으로 변환합니다.\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n # cv2.waitKey(int delay) -> https://goo.gl/lhjVHH\n # delay 밀리초마다 키 이벤트를 대기합니다.\n # ch = cv2.waitKey(1)\n #\n # # Q를 누르면 종료합니다.\n # if ch == 113:\n # break\n\n # Numpy 행렬 형태로 프레임을 변환합니다.\n # 0:3 -> RGB 값을 유지합니다.\n pil_img = np.array(frame)[:, :, 0:3]\n\n # softmax_tensor를 단 한 번만 초기화합니다.\n if softmax_tensor is None:\n sess = tf.Session()\n softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')\n\n # 1차 예측의 확률을 획득합니다.\n predictions = sess.run(softmax_tensor, {'DecodeJpeg:0': pil_img})\n\n # 신뢰도 순서로 1차 예측의 레이블을 보여주기 위해 정렬합니다.\n # (Sort to show labels of first prediction in order of confidence)\n # https://docs.python.org/2.3/whatsnew/section-slices.html\n # * [start:end:step], start(in)부터 end(ex)까지 step 단위로 가져옵니다.\n # > [::-1] -> 모든 범위의 원소를 거꾸로 정렬합니다.\n top_k = predictions[0].argsort()[-len(predictions[0]):][::-1]\n\n # 정렬된 레이블을 출력합니다.\n for node_id in top_k:\n human_string = label_lines[node_id]\n score = predictions[0][node_id]\n\n # 출력을 보관합니다.\n text_list.append('%s (score = %.5f)' % (human_string, score))\n if human_string == obj and score > 0.8:\n frame_list.append(frame)\n frame_time.append(str(time*count))\n print (str(obj) + '가 나타난 시간 : ' + str(time * count) + '초')\n # 보드를 생성하고 결과를 출력합니다.\n board = white.copy()\n for i in range(len(text_list)):\n cv2.putText(board, text_list[i], (OUT_X_ORIGIN, OUT_LINE_SPACE * i + OUT_Y_ORIGIN),\n cv2.FONT_HERSHEY_SIMPLEX, OUT_FONT_SIZE, 255)\n\n # # 프레임 및 분석 결과를 출력합니다.\n # cv2.imshow('frame', frame)\n # cv2.imshow('board', board)\n #\n # # 프레임의 위치를 초기화합니다.\n # if not moved:\n # cv2.moveWindow('frame', FRAME_X_ORIGIN, FRAME_Y_ORIGIN)\n # cv2.moveWindow('board', BOARD_X_ORIGIN, BOARD_Y_ORIGIN)\n # moved = True\n\n# 프로그램을 끝냅니다.\nif video is not None:\n video.release()\ncv2.destroyAllWindows()\n\ncnt = 0\nfor frame in frame_list:\n cv2.imshow('video_detection', frame)\n print(obj + \"나온 시간 : \" + str(frame_time[cnt]))\n cnt = cnt+1\n if cv2.waitKey(1000) & 0xFF == ord('q'):\n break" ]
[ [ "tensorflow.import_graph_def", "tensorflow.gfile.GFile", "tensorflow.Session", "tensorflow.GraphDef", "numpy.array", "tensorflow.gfile.FastGFile" ] ]
madsbk/cuml
[ "fab74ca94fdbc5b49281660ce32a48cfd3d66f46" ]
[ "python/cuml/test/test_pickle.py" ]
[ "# Copyright (c) 2019, NVIDIA CORPORATION.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport pytest\nimport cuml\nfrom cuml.test.utils import array_equal\nimport numpy as np\nfrom sklearn.datasets import load_iris\nfrom sklearn.datasets import make_regression\nimport pickle\nfrom sklearn.manifold.t_sne import trustworthiness\n\nregression_models = dict(\n LinearRegression=cuml.LinearRegression(),\n Lasso=cuml.Lasso(),\n Ridge=cuml.Ridge(),\n ElasticNet=cuml.ElasticNet()\n)\n\nsolver_models = dict(\n CD=cuml.CD(),\n SGD=cuml.SGD(eta0=0.005)\n)\n\ncluster_models = dict(\n KMeans=cuml.KMeans()\n)\n\ndecomposition_models = dict(\n PCA=cuml.PCA(),\n TruncatedSVD=cuml.TruncatedSVD(),\n)\n\n\ndecomposition_models_xfail = dict(\n GaussianRandomProjection=cuml.GaussianRandomProjection(),\n SparseRandomProjection=cuml.SparseRandomProjection()\n)\n\nneighbor_models = dict(\n NearestNeighbors=cuml.NearestNeighbors()\n)\n\ndbscan_model = dict(\n DBSCAN=cuml.DBSCAN()\n)\n\numap_model = dict(\n UMAP=cuml.UMAP()\n)\n\n\ndef unit_param(*args, **kwargs):\n return pytest.param(*args, **kwargs, marks=pytest.mark.unit)\n\n\ndef quality_param(*args, **kwargs):\n return pytest.param(*args, **kwargs, marks=pytest.mark.quality)\n\n\ndef stress_param(*args, **kwargs):\n return pytest.param(*args, **kwargs, marks=pytest.mark.stress)\n\n\ndef pickle_save_load(tmpdir, model):\n pickle_file = tmpdir.join('cu_model.pickle')\n\n try:\n with open(pickle_file, 'wb') as pf:\n pickle.dump(model, pf)\n except (TypeError, ValueError) as e:\n pf.close()\n pytest.fail(e)\n\n with open(pickle_file, 'rb') as pf:\n cu_after_pickle_model = pickle.load(pf)\n\n return cu_after_pickle_model\n\n\ndef make_dataset(datatype, nrows, ncols):\n train_rows = np.int32(nrows*0.8)\n X, y = make_regression(n_samples=nrows, n_features=ncols,\n random_state=0)\n X_test = np.asarray(X[train_rows:, :]).astype(datatype)\n X_train = np.asarray(X[:train_rows, :]).astype(datatype)\n y_train = np.asarray(y[:train_rows, ]).astype(datatype)\n\n return X_train, y_train, X_test\n\n\[email protected]('datatype', [np.float32, np.float64])\[email protected]('model', regression_models.values())\[email protected]('nrows', [unit_param(20)])\[email protected]('ncols', [unit_param(3)])\ndef test_regressor_pickle(tmpdir, datatype, model, nrows, ncols):\n X_train, y_train, X_test = make_dataset(datatype, nrows, ncols)\n\n model.fit(X_train, y_train)\n cu_before_pickle_predict = model.predict(X_test).to_array()\n\n cu_after_pickle_model = pickle_save_load(tmpdir, model)\n\n cu_after_pickle_predict = cu_after_pickle_model.predict(X_test).to_array()\n\n assert array_equal(cu_before_pickle_predict, cu_after_pickle_predict)\n\n\[email protected]('datatype', [np.float32, np.float64])\[email protected]('model', solver_models.values())\[email protected]('nrows', [unit_param(20)])\[email protected]('ncols', [unit_param(3)])\ndef test_solver_pickle(tmpdir, datatype, model, nrows, ncols):\n X_train, y_train, X_test = make_dataset(datatype, nrows, ncols)\n\n model.fit(X_train, y_train)\n cu_before_pickle_predict = model.predict(X_test).to_array()\n\n cu_after_pickle_model = pickle_save_load(tmpdir, model)\n\n cu_after_pickle_predict = cu_after_pickle_model.predict(X_test).to_array()\n\n assert array_equal(cu_before_pickle_predict, cu_after_pickle_predict)\n\n\[email protected]('datatype', [np.float32, np.float64])\[email protected]('model', cluster_models.values())\[email protected]('nrows', [unit_param(20)])\[email protected]('ncols', [unit_param(3)])\ndef test_cluster_pickle(tmpdir, datatype, model, nrows, ncols):\n X_train, _, X_test = make_dataset(datatype, nrows, ncols)\n\n model.fit(X_train)\n cu_before_pickle_predict = model.predict(X_test).to_array()\n\n cu_after_pickle_model = pickle_save_load(tmpdir, model)\n\n cu_after_pickle_predict = cu_after_pickle_model.predict(X_test).to_array()\n\n assert array_equal(cu_before_pickle_predict, cu_after_pickle_predict)\n\n\[email protected]('datatype', [np.float32, np.float64])\[email protected]('model', decomposition_models_xfail.values())\[email protected]('nrows', [unit_param(20)])\[email protected]('ncols', [unit_param(3)])\[email protected]\ndef test_decomposition_pickle(tmpdir, datatype, model, nrows,\n ncols):\n X_train, _, _ = make_dataset(datatype, nrows, ncols)\n\n cu_before_pickle_transform = model.fit_transform(X_train)\n\n cu_after_pickle_model = pickle_save_load(tmpdir, model)\n\n cu_after_pickle_transform = cu_after_pickle_model.transform(X_train)\n\n assert array_equal(cu_before_pickle_transform, cu_after_pickle_transform)\n\n\[email protected]('datatype', [np.float32, np.float64])\[email protected]('model', umap_model.values())\[email protected]('nrows', [unit_param(20)])\[email protected]('ncols', [unit_param(3)])\ndef test_umap_pickle(tmpdir, datatype, model, nrows, ncols):\n\n iris = load_iris()\n iris_selection = np.random.RandomState(42).choice(\n [True, False], 150, replace=True, p=[0.75, 0.25])\n X_train = iris.data[iris_selection]\n\n cu_before_pickle_transform = model.fit_transform(X_train)\n\n cu_before_embed = model.arr_embed\n\n cu_trust_before = trustworthiness(X_train,\n cu_before_pickle_transform, 10)\n\n cu_after_pickle_model = pickle_save_load(tmpdir, model)\n\n cu_after_pickle_transform = cu_after_pickle_model.transform(X_train)\n\n cu_after_embed = model.arr_embed\n\n cu_trust_after = trustworthiness(X_train, cu_after_pickle_transform, 10)\n\n assert array_equal(cu_before_embed, cu_after_embed)\n assert cu_trust_after >= cu_trust_before - 0.2\n\n\[email protected]('datatype', [np.float32, np.float64])\[email protected]('model', decomposition_models.values())\[email protected]('nrows', [unit_param(20)])\[email protected]('ncols', [unit_param(3)])\[email protected]\ndef test_decomposition_pickle_xfail(tmpdir, datatype, model, nrows, ncols):\n X_train, _, _ = make_dataset(datatype, nrows, ncols)\n\n cu_before_pickle_transform = model.fit_transform(X_train)\n\n cu_after_pickle_model = pickle_save_load(tmpdir, model)\n\n cu_after_pickle_transform = cu_after_pickle_model.transform(X_train)\n\n assert array_equal(cu_before_pickle_transform, cu_after_pickle_transform)\n\n\[email protected]('datatype', [np.float32, np.float64])\[email protected]('model', neighbor_models.values())\[email protected]('nrows', [unit_param(20)])\[email protected]('ncols', [unit_param(3)])\[email protected]('k', [unit_param(3)])\ndef test_neighbors_pickle(tmpdir, datatype, model, nrows,\n ncols, k):\n X_train, _, X_test = make_dataset(datatype, nrows, ncols)\n\n model.fit(X_train)\n D_before, I_before = model.kneighbors(X_test, k=k)\n\n cu_after_pickle_model = pickle_save_load(tmpdir, model)\n\n D_after, I_after = cu_after_pickle_model.kneighbors(X_test, k=k)\n\n assert array_equal(D_before, D_after)\n assert array_equal(I_before, I_after)\n\n\[email protected]('datatype', [np.float32, np.float64])\[email protected]('nrows', [unit_param(20)])\[email protected]('ncols', [unit_param(3)])\[email protected]('k', [unit_param(3)])\ndef test_neighbors_pickle_nofit(tmpdir, datatype, nrows, ncols, k):\n\n \"\"\"\n Note: This test digs down a bit far into the\n internals of the implementation, but it's\n important that regressions do not occur\n from changes to the class.\n \"\"\"\n\n model = cuml.neighbors.NearestNeighbors()\n\n unpickled = pickle_save_load(tmpdir, model)\n\n state = unpickled.__dict__\n\n print(str(state))\n\n assert state[\"n_indices\"] == 0\n assert \"X_m\" not in state\n assert state[\"sizes\"] is None\n assert state[\"input\"] is None\n\n X_train, _, X_test = make_dataset(datatype, nrows, ncols)\n\n model.fit(X_train)\n\n unpickled = pickle_save_load(tmpdir, model)\n\n state = unpickled.__dict__\n\n assert state[\"n_indices\"] == 1\n assert \"X_m\" in state\n assert state[\"sizes\"] is not None\n assert state[\"input\"] is not None\n\n\[email protected]('datatype', [np.float32, np.float64])\[email protected]('nrows', [unit_param(20)])\[email protected]('ncols', [unit_param(3)])\[email protected]('k', [unit_param(3)])\[email protected](strict=True)\ndef test_neighbors_mg_fails(tmpdir, datatype, nrows, ncols, k):\n\n model = cuml.neighbors.NearestNeighbors()\n model.n_indices = 2\n\n pickle_save_load(tmpdir, model)\n\n\[email protected]('datatype', [np.float32, np.float64])\[email protected]('model', dbscan_model.values())\[email protected]('nrows', [unit_param(20)])\[email protected]('ncols', [unit_param(3)])\ndef test_dbscan_pickle(tmpdir, datatype, model, nrows, ncols):\n X_train, _, _ = make_dataset(datatype, nrows, ncols)\n\n cu_before_pickle_predict = model.fit_predict(X_train).to_array()\n\n cu_after_pickle_model = pickle_save_load(tmpdir, model)\n\n cu_after_pickle_predict = cu_after_pickle_model.fit_predict(\n X_train\n ).to_array()\n\n assert array_equal(cu_before_pickle_predict, cu_after_pickle_predict)\n\n\[email protected]('datatype', [np.float32, np.float64])\[email protected]('nrows', [unit_param(20)])\[email protected]('ncols', [unit_param(3)])\ndef test_tsne_pickle(tmpdir, datatype, nrows, ncols):\n iris = load_iris()\n iris_selection = np.random.RandomState(42).choice(\n [True, False], 150, replace=True, p=[0.75, 0.25])\n X = iris.data[iris_selection]\n\n model = cuml.manifold.TSNE(n_components=2, random_state=199)\n\n # Pickle the model\n model_pickle = pickle_save_load(tmpdir, model)\n model_params = model_pickle.__dict__\n if \"handle\" in model_params:\n del model_params[\"handle\"]\n\n # Confirm params in model are identical\n new_keys = set(model_params.keys())\n for key, value in zip(model_params.keys(), model_params.values()):\n assert (model_params[key] == value)\n new_keys -= set([key])\n\n # Check all keys have been checked\n assert(len(new_keys) == 0)\n\n # Transform data\n model.fit(X)\n trust_before = trustworthiness(X, model.Y, 10)\n\n # Save model + embeddings\n model = pickle_save_load(tmpdir, model)\n trust_after = trustworthiness(X, model.Y.to_pandas(), 10)\n\n assert trust_before == trust_after\n" ]
[ [ "numpy.asarray", "numpy.int32", "sklearn.datasets.load_iris", "sklearn.datasets.make_regression", "numpy.random.RandomState", "sklearn.manifold.t_sne.trustworthiness" ] ]
waldemarmeier/easy-efficientdet
[ "1329f497fdfd0542ee0db074020c4db351b1aa57" ]
[ "easy_efficientdet/factory.py" ]
[ "import json\nimport traceback\nfrom inspect import isgeneratorfunction\nfrom typing import Optional, Sequence, Tuple, Union\n\nimport tensorflow as tf\n\nfrom easy_efficientdet._third_party.training import CosineLrSchedule\nfrom easy_efficientdet.anchors import generate_anchor_boxes\nfrom easy_efficientdet.config import ObjectDetectionConfig\nfrom easy_efficientdet.data.preprocessing import (\n TFDATA_AUTOTUNE,\n build_data_pipeline,\n create_image_generator,\n load_tfrecords,\n parse_od_record,\n)\nfrom easy_efficientdet.inference import build_inference_model\nfrom easy_efficientdet.losses import ObjectDetectionLoss\nfrom easy_efficientdet.model import EfficientDet\nfrom easy_efficientdet.quantization import ExportModel, OptimzationType, quantize\nfrom easy_efficientdet.utils import (\n DataSplit,\n ImageDataGenertor,\n LabelMapType,\n setup_default_logger,\n)\n\nlogger = setup_default_logger(\"efficientdet-factory\")\n\n\nclass EfficientDetFactory:\n def __init__(self, config: ObjectDetectionConfig):\n self.config = config\n self._dist_strategy = None\n\n def reset_dist_strategy(self, ) -> None:\n self._dist_strategy = None\n\n @property\n def dist_strategy(self, ) -> tf.distribute.MirroredStrategy:\n logger.info(\"using mirrored strategy for mutli GPU training\")\n if self._dist_strategy is None:\n self._dist_strategy = tf.distribute.MirroredStrategy()\n logger.info(f\"created new mirrored strategy scope {self._dist_strategy}\")\n else:\n logger.info(\"using existing mirrored strategy scope \"\n f\"{self._dist_strategy}\")\n return self._dist_strategy\n\n def build_model(self) -> tf.keras.Model:\n\n if self.config.multi_gpu:\n with self.dist_strategy.scope():\n return EfficientDet(**self.config.get_model_config())\n else:\n return EfficientDet(**self.config.get_model_config())\n\n def restore_from_checkpoint(\n self,\n model: tf.keras.Model,\n checkpoint_dir: str,\n mult_checkpoints_dir: bool = True,\n ) -> None:\n\n if mult_checkpoints_dir:\n path_latest_chpkt = tf.train.latest_checkpoint(checkpoint_dir)\n if path_latest_chpkt is None:\n raise Exception(\"No valid checkpoint found in directory \"\n f\"{checkpoint_dir}\")\n\n else:\n path_latest_chpkt = checkpoint_dir\n\n logger.info(f\"using checkpoint with path {path_latest_chpkt}\")\n\n if self.config.multi_gpu is True:\n with self.dist_strategy.scope():\n self._restore_checkpoint(model, path_latest_chpkt)\n else:\n self._restore_checkpoint(model, path_latest_chpkt)\n\n def _restore_checkpoint(\n self,\n model: tf.keras.Model,\n checkpoint_path: str,\n ) -> None:\n checkpoint = tf.train.Checkpoint(model)\n try:\n checkpoint.restore(checkpoint_path).assert_consumed()\n except AssertionError:\n logger.warning(traceback.format_exc())\n logger.warning(\"an error occurred during restore of checkpoint \"\n f\"{checkpoint_path}. Usually, issues with \"\n \"'save_counter' variable can be ignored.\")\n\n def build_data_pipeline(\n self,\n data_split: Union[DataSplit, str],\n auto_train_data_size: bool = True,\n ) -> Union[tf.data.Dataset, Tuple[tf.data.Dataset]]:\n\n if data_split in (DataSplit.TRAIN, DataSplit.TRAIN_VAL):\n if (not auto_train_data_size) and (self.config.train_data_size is None):\n logger.warning(\n \"Training data size is neither inferred nor set in config\")\n\n if data_split == DataSplit.TRAIN_VAL:\n\n if self.config.train_data_path is not None \\\n and self.config.val_data_path is not None:\n train_data, val_data = build_data_pipeline(self.config, data_split,\n auto_train_data_size)\n else:\n raise ValueError(f\"For data split {data_split} 'train_data_path' and \"\n \"'val_data_path' properties have to be set\")\n\n if auto_train_data_size:\n _cardinality_num = \\\n train_data.cardinality().numpy() * self.config.batch_size\n self.config._update_train_data_size(_cardinality_num)\n\n return train_data, val_data\n\n elif data_split == DataSplit.TRAIN:\n\n train_data = build_data_pipeline(self.config, DataSplit.TRAIN)\n if auto_train_data_size:\n _cardinality_num = \\\n train_data.cardinality().numpy() * self.config.batch_size\n self.config._update_train_data_size(_cardinality_num)\n return train_data\n elif data_split == DataSplit.VALIDATION:\n return build_data_pipeline(self.config, DataSplit.VALIDATION)\n elif data_split == DataSplit.TEST:\n raise NotImplementedError(\"test data split is not implemented\")\n\n def build_data_eval(\n self,\n path: str = None,\n tfrecord_suffix: str = None,\n ) -> tf.data.Dataset:\n\n if path is None:\n path = self.config.val_data_path\n logger.info(\n f\"using default eval data path from config {self.config.val_data_path}\")\n if tfrecord_suffix is None:\n logger.info(\"using default tfrecord_suffix from config \"\n f\"{self.config.tfrecord_suffix}\")\n tfrecord_suffix = self.config.tfrecord_suffix\n\n data = load_tfrecords(path, tfrecord_suffix)\n data = data.map(parse_od_record, TFDATA_AUTOTUNE)\n return data\n\n def create_optimizer(self, ) -> tf.keras.optimizers.Optimizer:\n\n if isinstance(self.config.learning_rate, float):\n logger.warning(\"Setting learning rate to a constant value is not \"\n \"recommended\")\n learning_rate = self.config.learning_rate\n elif self.config.learning_rate == \"auto\":\n if self.config.train_data_size is None:\n # TODO should throw an exception here, when data is not initalized\n # this fails\n logger.warning(\"If learning rate is set to 'auto' training data size \"\n \"should be known\")\n raise ValueError(\"with learning rate_schedule 'auto' training data\"\n \" size should be known. You can either set the \"\n \"property 'train_data_size' in the configuration \"\n \"or initialize the training data using this \"\n \"factoary\")\n learning_rate = CosineLrSchedule.get_effdet_lr_scheduler(\n self.config.train_data_size, self.config.batch_size, self.config.epochs)\n\n if self.config.multi_gpu:\n with self.dist_strategy.scope():\n optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate,\n momentum=self.config.momentum,\n decay=self.config.weight_decay)\n else:\n optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate,\n momentum=self.config.momentum,\n decay=self.config.weight_decay)\n\n return optimizer\n\n def create_loss_fn(self, ) -> tf.keras.losses.Loss:\n return ObjectDetectionLoss(**self.config.get_loss_config())\n\n def create_anchor_boxes(self, ):\n return generate_anchor_boxes(**self.config.get_anchor_box_config())\n\n def load_labelmap(self, path: str) -> LabelMapType:\n with open(path) as fp:\n return json.load(fp)\n\n def quantize_model(\n self,\n model: tf.keras.Model,\n filename: str,\n score_thresh: float = .01,\n iou_thresh: float = .5,\n max_detections: int = 100,\n image_shape: Sequence[int] = None,\n opt_type: OptimzationType = OptimzationType.FLOAT32,\n representative_dataset: Optional[Union[DataSplit, ImageDataGenertor]] = None,\n size_limit: Optional[int] = None,\n ) -> bytes:\n\n if \"decode\" in model.layers[-1].name.lower():\n logger.warning(\"provide an object detection model without final \"\n \"detection layer for NMS because this method \"\n \"provides its own tflite compatible NMS implementation\")\n\n if image_shape is None:\n image_shape = self.config.image_shape\n logger.info(f\"Using image_shape {image_shape} from config\")\n\n anchors = self.create_anchor_boxes()\n # normalize anchors\n anchors = anchors / [*self.config.image_shape[:2], *self.config.image_shape[:2]]\n export_model = ExportModel(self.config.num_cls, iou_thresh, score_thresh,\n max_detections, model, anchors)\n\n if opt_type not in OptimzationType:\n raise ValueError(f\"opt_type {opt_type} is not. \"\n f\"Should be in {OptimzationType.valid_types()}\")\n\n quant_data = None\n if opt_type == OptimzationType.INT8:\n if isinstance(representative_dataset, str):\n if representative_dataset not in \\\n (DataSplit.TRAIN, DataSplit.VALIDATION):\n raise ValueError(\"ony supports train or val datasets\")\n\n if representative_dataset == DataSplit.TRAIN:\n data_path = self.config.train_data_path\n elif representative_dataset == DataSplit.VALIDATION:\n data_path = self.config.val_data_path\n\n quant_data = create_image_generator(data_path, image_shape[:2],\n self.config.tfrecord_suffix,\n size_limit)\n\n elif isgeneratorfunction(representative_dataset):\n quant_data = representative_dataset\n else:\n raise ValueError(\"For optimzation type int8 a representative_dataset \"\n \"has to be provided which is either a dataset \"\n \"from config (train/val) or a generator function \"\n \"for a dataset\")\n\n logger.info(f\"starting quantization of type {opt_type}\")\n\n return quantize(export_model, opt_type, image_shape, quant_data, filename)\n\n @staticmethod\n def build_inference_model(\n model: tf.keras.Model,\n num_cls: int,\n image_shape: Sequence[int] = (512, 512, 3),\n confidence_threshold: float = 0.05,\n nms_iou_threshold: float = 0.5,\n max_detections_per_class: int = 100,\n max_detections: int = 100,\n box_variance: Optional[Sequence[float]] = None,\n resize: bool = False,\n ) -> tf.keras.Model:\n # TODO use parameters from config as default values\n #\n return build_inference_model(model, num_cls, image_shape, confidence_threshold,\n nms_iou_threshold, max_detections_per_class,\n max_detections, box_variance, resize)\n" ]
[ [ "tensorflow.train.Checkpoint", "tensorflow.train.latest_checkpoint", "tensorflow.keras.optimizers.SGD", "tensorflow.distribute.MirroredStrategy" ] ]
namiwa/QFHousing
[ "cbe65902ee42e1c0be09f4d5e255bcf2b404ef6a" ]
[ "housing/ModelChecking.py" ]
[ "import seaborn as sns\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport statsmodels.api as sm\nfrom scipy import stats\nfrom housing.ML import ML_Model\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.linear_model import Lasso, BayesianRidge\n\ndef statistical_plots(fitted_y, y):\n #Predicted vs Actual\n plot_1 = sns.scatterplot(fitted_y,y)\n plot_1.set_title('Actual vs Predicted')\n plot_1.set_xlabel('Actual values')\n plot_1.set_ylabel('Predicted');\n\n \n #Residual vs Fitted\n plot_2 = plt.figure()\n plot_2 = sns.residplot(fitted_y, y,\n scatter_kws={'alpha': 0.5},\n line_kws={'color': 'red', 'lw': 1, 'alpha': 0.8})\n plot_2.set_title('Residuals vs Fitted')\n plot_2.set_xlabel('Fitted values')\n plot_2.set_ylabel('Residuals');\n\n #QQPlot\n res = pd.DataFrame(y - fitted_y)\n plot_3 = sm.qqplot(res, stats.t, distargs=(4,), line='s');\n\ndef ttest(model, X):\n '''\n model refers to ML_Model\n X refers X_train\n '''\n ML_model = model.get_model()\n if (isinstance(ML_model, Lasso) or isinstance(ML_model, BayesianRidge)):\n coeffs = np.append(ML_model.intercept_, ML_model.coef_)\n elif (isinstance(ML_model, RandomForestRegressor)):\n coeffs = ML_model.estimators_\n else:\n return \"Wrong ML Model\"\n MSE = model.get_MSE()\n newX = np.append(np.ones((len(X),1)), X, axis=1)\n\n var = np.linalg.inv(np.dot(newX.T,newX))\n standard_error = np.sqrt(MSE*(var.diagonal()))\n test_statistic = coeffs / standard_error\n\n p_values =[2*(1-stats.t.cdf(np.abs(i),(len(newX)-1))) for i in test_statistic]\n\n standard_error = np.round(standard_error, 3)\n test_statistic = np.round(test_statistic, 3)\n p_values = np.round(p_values, 3)\n coeffs = np.round(coeffs, 3)\n\n ttestDF = pd.DataFrame({'Coefficients': coeffs})\n ttestDF['Standard Errors'] = standard_error\n ttestDF['t values'] = test_statistic\n ttestDF['Probabilities'] = p_values\n return ttestDF\n\ndef AIC_BIC(fitted_y, y, k):\n '''\n k refers to k number of variables in model\n '''\n EPSILON = 1e-4 #if SSE is very small\n resid = y - fitted_y\n SSE = sum(resid**2) + EPSILON\n n = len(fitted_y)\n AIC = 2*k - 2*np.log(SSE)\n BIC = n*np.log(SSE/n) + k*np.log(n)\n # \"AIC: \"+ str(AIC) + \" BIC: \" + str(BIC)\n return AIC, BIC\n " ]
[ [ "numpy.dot", "numpy.log", "numpy.abs", "pandas.DataFrame", "numpy.round", "numpy.append", "matplotlib.pyplot.figure" ] ]
ashish-code/structured-sparsity-visual-classification
[ "49e7bd24dab8d4f6e5b012ab333c6012e7404b84" ]
[ "util/writefeaturevectorVOC2010.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# writefeaturevectorVOC2010.py\n# \n\nimport os\nimport string\nimport numpy as np\nimport csv\n\ndef main():\n\trootDir = '/vol/vssp/diplecs/ash/Data/'\n\tdataSets = ['VOC2010']\n\timageSets = '/ImageSets/Main/'\n\toutDir = '/FeatureMatrix/'\n\tinDir = '/Feature/'\n\tftype = '.sift'\n\t#loop through each dataset\n\tfor dataset in dataSets:\n\t\tfcatid=rootDir+dataset+'/'+'catidlist.txt'\n\t\tcatnames=np.genfromtxt(fcatid,usecols=[0],dtype='string',delimiter=',')\n\t\tcatids=np.genfromtxt(fcatid,usecols=[1],dtype='int',delimiter=',')\n\t\tcats = dict(zip(catnames,catids))\n\t\tfor catname in cats.keys():\n\t\t\toutfilename = rootDir+dataset+outDir+catname+ftype\n\t\t\toutfile = open(outfilename,'w')\n\t\t\tcatimgsetfilename = rootDir+dataset+imageSets+catname+'_trainval.txt'\n\t\t\timgnames=np.genfromtxt(catimgsetfilename,usecols=[0],dtype='string')\n\t\t\timgids=np.genfromtxt(catimgsetfilename,usecols=[1],dtype='int')\n\t\t\timages=dict(zip(imgnames,imgids))\n\t\t\tfor imgname in images:\n\t\t\t\tif(images.get(imgname)==1):\n\t\t\t\t\t#open the imgname file and write to the outfile\n\t\t\t\t\tfeatfilename=rootDir+dataset+inDir+imgname+ftype\n\t\t\t\t\tfeatdata=np.loadtxt(featfilename,delimiter=' ',skiprows=2,\\\n\t\t\t\t\tdtype=np.int,usecols=np.concatenate((np.arange(0,2),np.arange(5,133))))\n\t\t\t\t\timgnameX = str(imgname.split('_')[0])[2:4]\n\t\t\t\t\timgnameY = str(imgname.split('_')[1])[2:6]\n\t\t\t\t\timgnameID = imgnameX+imgnameY\n\t\t\t\t\tfor vector in featdata:\n\t\t\t\t\t\tfor elem in vector:\n\t\t\t\t\t\t\toutfile.write('%d ' % elem)\t\t\n\t\t\t\t\t\toutfile.write('%s %d\\n' % (imgnameID,cats.get(catname)))\n\t\t\t\t\tprint(dataset + ' : ' + catname + ' : ' + imgnameID)\n\t\t\toutfile.close()\n\treturn 0\n\nif __name__ == '__main__':\n\tmain()\n\n" ]
[ [ "numpy.arange", "numpy.genfromtxt" ] ]
istiakshihab/api-covid19-rt-bd
[ "10d059bb28b7955ddac3c77a52376ee7bc5900a9" ]
[ "api/rt_data_processing.py" ]
[ "import pandas as pd\nimport numpy as np\nfrom scipy import stats as sps\n\ndef prepare_cases(cases, cutoff=5):\n new_cases = cases.diff()\n\n smoothed = new_cases.rolling(7,\n win_type='gaussian',\n min_periods=1,\n center=True).mean(std=2).round()\n \n idx_start = np.searchsorted(smoothed, cutoff)\n \n smoothed = smoothed.iloc[idx_start:]\n original = new_cases.loc[smoothed.index]\n \n return original, smoothed\n\ndef prepare_data(fileLoc,sheetLoc):\n datasetxl = pd.read_excel(fileLoc,sheetLoc)\n datasetxl = datasetxl.dropna(how='all', axis='columns')\n datasetxl = datasetxl.loc[:, ~datasetxl.columns.str.contains('^Unnamed',na=False)]\n datasetxl = datasetxl.drop([0,67,68,69])\n datasetxl.to_csv(\"static/data/dataset.csv\")\n datasetxl = pd.read_csv(\"static/data/dataset.csv\",header=None)\n datasetxl = datasetxl.transpose()\n datasetxl = datasetxl.drop([0,1])\n datasetxl = datasetxl.reindex(index=datasetxl.index[::-1])\n datasetxl.columns=[\"Date\",\"B. Baria\",\"Bagerhat\",\"Bandarban\",\"Barguna\",\"Barisal\",\"Bhola\",\"Bogra\",\"Chandpur\",\"Chapainawabganj\",\"Chattogram\",\"Chuadanga\",\"Cox’s bazar\",\"Cumilla\",\"Dhaka (District)\",\"Dhaka City\",\"Dinajpur\",\"Faridpur\",\"Feni\",\"Gaibandha\",\"Gazipur\",\"Gopalganj\",\"Habiganj\",\"Jamalpur\",\"Jessore\",\"Jhalokathi\",\"Jhenaidah\",\"Joypurhat\",\"Khagrachhari\",\"Khulna\",\"Kishoreganj\",\"Kurigram\",\"Kushtia\",\"Lakshmipur\",\"Lalmonirhat\",\"Madaripur\",\"Magura\",\"Manikganj\",\"Meherpur\",\"Moulvibazar\",\"Munshiganj\",\"Mymensingh\",\"Naogaon\",\"Narail\",\"Narayanganj\",\"Narsingdi\",\"Natore\",\"Netrokona\",\"Nilphamari\",\"Noakhali\",\"Pabna\",\"Panchagarh\",\"Pirojpur\",\"Potuakhali\",\"Rajbari\",\"Rajshahi\",\"Rangamati\",\"Rangpur\",\"Satkhira\",\"Shariatpur\",\"Sherpur\",\"Sirajganj\",\"Sunamganj\",\"Sylhet\",\"Tangail\",\"Thakurgaon\",\"total\"]\n datasetxl['Date'] = pd.to_datetime(datasetxl['Date']).dt.strftime('%Y-%m-%d')\n districts = ['B. Baria','Bagerhat','Bandarban','Barguna','Barisal','Bhola','Bogra','Chandpur','Chapainawabganj','Chattogram','Chuadanga','Cox’s bazar','Cumilla','Dhaka (District)','Dhaka City','Dinajpur','Faridpur','Feni','Gaibandha','Gazipur','Gopalganj','Habiganj','Jamalpur','Jessore','Jhalokathi','Jhenaidah','Joypurhat','Khagrachhari','Khulna','Kishoreganj','Kurigram','Kushtia','Lakshmipur','Lalmonirhat','Madaripur','Magura','Manikganj','Meherpur','Moulvibazar','Munshiganj','Mymensingh','Naogaon','Narail','Narayanganj','Narsingdi','Natore','Netrokona','Nilphamari','Noakhali','Pabna','Panchagarh','Pirojpur','Potuakhali','Rajbari','Rajshahi','Rangamati','Rangpur','Satkhira','Shariatpur','Sherpur','Sirajganj','Sunamganj','Sylhet','Tangail','Thakurgaon','total']\n datasetxl[districts] = datasetxl[districts].fillna(0.0)\n datasetxl[districts] = datasetxl[districts].apply(pd.to_numeric, errors='coerce')\n datasetxl[districts] = datasetxl[districts].cumsum()\n return datasetxl\n\ndef get_posteriors(sr, GAMMA, r_t_range, sigma=0.15):\n\n # (1) Calculate Lambda\n # Map Rt into lambda so we can substitute it into the equation below\n # Note that we have N-1 lambdas because on the first day of an outbreak\n # you do not know what to expect.\n lam = sr[:-1].values * np.exp(GAMMA * (r_t_range[:, None] - 1))\n\n # (2) Calculate each day's likelihood\n likelihoods = pd.DataFrame(\n data = sps.poisson.pmf(sr[1:].values, lam),\n index = r_t_range,\n columns = sr.index[1:])\n \n # (3) Create the Gaussian Matrix\n process_matrix = sps.norm(loc=r_t_range,\n scale=sigma\n ).pdf(r_t_range[:, None]) \n\n # (3a) Normalize all rows to sum to 1\n process_matrix /= process_matrix.sum(axis=0)\n \n # (4) Calculate the initial prior\n #prior0 = sps.gamma(a=4).pdf(r_t_range)\n prior0 = np.ones_like(r_t_range)/len(r_t_range)\n prior0 /= prior0.sum()\n\n # Create a DataFrame that will hold our posteriors for each day\n # Insert our prior as the first posterior.\n posteriors = pd.DataFrame(\n index=r_t_range,\n columns=sr.index,\n data={sr.index[0]: prior0}\n )\n \n # We said we'd keep track of the sum of the log of the probability\n # of the data for maximum likelihood calculation.\n log_likelihood = 0.0\n \n # (5) Iteratively apply Bayes' rule\n for previous_day, current_day in zip(sr.index[:-1], sr.index[1:]):\n\n #(5a) Calculate the new prior\n current_prior = process_matrix @ posteriors[previous_day]\n \n #(5b) Calculate the numerator of Bayes' Rule: P(k|R_t)P(R_t)\n numerator = likelihoods[current_day] * current_prior\n \n #(5c) Calcluate the denominator of Bayes' Rule P(k)\n denominator = np.sum(numerator)\n \n # Execute full Bayes' Rule\n posteriors[current_day] = numerator/denominator\n \n # Add to the running sum of log likelihoods\n log_likelihood += np.log(denominator)\n \n return posteriors, log_likelihood\n\n#hide_input\n# Calculate High density interval\ndef highest_density_interval(pmf, p=.9, debug=False):\n # If we pass a DataFrame, just call this recursively on the columns\n if(isinstance(pmf, pd.DataFrame)):\n return pd.DataFrame([highest_density_interval(pmf[col], p=p) for col in pmf],\n index=pmf.columns)\n \n cumsum = np.cumsum(pmf.values)\n \n # N x N matrix of total probability mass for each low, high\n total_p = cumsum - cumsum[:, None]\n \n # Return all indices with total_p > p\n lows, highs = (total_p > p).nonzero()\n \n # Find the smallest range (highest density)\n best = (highs - lows).argmin()\n \n low = pmf.index[lows[best]]\n high = pmf.index[highs[best]]\n \n return pd.Series([low, high],\n index=[f'Low_{p*100:.0f}',\n f'High_{p*100:.0f}'])\n\n\ndef rolling_mean(dataframe):\n for column in dataframe:\n if(column != \"Date\"):\n dataframe[column] = dataframe[column].rolling(7).mean()\n return dataframe" ]
[ [ "numpy.log", "pandas.read_excel", "pandas.read_csv", "pandas.Series", "numpy.ones_like", "pandas.to_datetime", "numpy.cumsum", "pandas.DataFrame", "scipy.stats.norm", "numpy.searchsorted", "scipy.stats.poisson.pmf", "numpy.exp", "numpy.sum" ] ]
rbuxman/new-refactor
[ "c2254fa637fbe54a54e445a0a872d2d0bbe4165a" ]
[ "citrine_converters/tools/hough.py" ]
[ "from __future__ import division\n\nimport numpy as np\n\n\nclass HoughSpace(np.ndarray):\n __doc__ = r\"\"\"\n Constructs a Hough transform space of the `xdata` and\n `ydata`.\n\n Construct a Hough space that, given an orientation,\n determines the distance to a point.\n\n For $ax + by + c = 0$, $a = \\sin \\phi$ and $b = \\cos \\phi$,\\*\n\n $$\n d = \\frac{|a x_0 + b y_0 + c|}{\\sqrt{a^2 + b^2}}\n $$\n\n Then the distance from a line oriented at $\\phi$ at\n the origin to a point $(x_0, y_0)$ is\n\n $$\n d = |x_0 \\sin \\phi + y_0 \\cos \\phi|\n $$\n\n *Note* The Hough transform can be performed on the scaled\n data, not the original data, because of the extreme\n compression of the Hough space that is a consequence of\n the highly disparate x and y axes.\n\n \\* This can be cast into a more familiar form:\n\n $$\n \\begin{align*}\n ax + by + c &= 0 \\\\\n y &= -\\frac{a}{b} x - \\frac{c}{b} \\\\\n &= -\\frac{\\sin (\\phi)}{\\cos (\\phi)} x - \\frac{c}{\\cos (\\phi)} \\\\\n &= \\frac{\\sin (-\\phi)}{\\cos (-\\phi)} x - \\frac{c}{\\cos (-\\phi)} \\\\\n &= \\tan (\\pi-\\phi) x + \\frac{c}{\\cos(\\pi - \\phi)} \\\\\n &= \\tan (\\theta) x + \\frac{c}{\\cos(\\theta)} \\\\\n y &= m x + b\n \\end{align*}\n $$\n\n $+\\phi$ is counterclockwise.\n\n Input\n =====\n :xdata, array-like: x data\n :ydata, array-like: y data\n\n Options\n =======\n :nq, int (optional): number of theta divisions.\n Default: 1801.\n :nq, int (optional): number of radial divisions.\n Default: 1801.\n \"\"\"\n\n @staticmethod\n def distance(x, y, phi):\n \"\"\"\n Shortest distance between the origin and the line that\n forms an angle $\\phi$ with the x-axis and passes through\n the point (x,y).\n\n IN\n ==\n :x, float or ndarray: x coordinate(s)\n :y, float or ndarray: y coordinate(s)\n :phi, float or ndarray: angle(s) of the line(s)\n that pass through (x,y).\n \"\"\"\n return np.abs(x*np.sin(phi) + y*np.cos(phi))\n\n def __new__(cls, xdata, ydata, **kwds):\n # handle options\n nq = kwds.get('nq', 1801)\n nr = kwds.get('nr', 1801)\n # set number of theta divisions\n try:\n nq = int(nq)\n except ValueError:\n msg = 'The number of theta divisions must be an integer.'\n raise ValueError(msg)\n # set number of radial divisions\n try:\n nr = int(nr)\n except ValueError:\n msg = 'The number of radial divisions must be an integer.'\n raise ValueError(msg)\n # initialize the hough space\n obj = np.zeros((nq, nr), dtype=int).view(cls)\n obj.theta = (0, np.pi)\n obj.radius = (0, 1)\n obj.nq = nq\n obj.nr = nr\n # build conditions based on options\n if not isinstance(xdata, np.ndarray):\n # why not just use asarray? in case xdata is a subclass of\n # ndarray we don't want to construct a new ndarray view\n obj.x = np.asarray(xdata)\n else:\n obj.x = xdata\n if not isinstance(ydata, np.ndarray):\n # why not just use asarray? in case ydata is a subclass of\n # ndarray we don't want to construct a new ndarray view\n obj.y = np.asarray(ydata)\n else:\n obj.y = ydata\n # construct the hough space\n obj.construct()\n return obj\n\n def __array_finalize__(self, obj):\n if obj is None: return\n try:\n self.nq = getattr(obj, 'nq', obj.shape[0])\n except IndexError:\n self.nq = 0\n try:\n self.nr = getattr(obj, 'nr', obj.shape[1])\n except IndexError:\n self.nr = 0\n self.x = getattr(obj, 'x', np.array([], dtype=float))\n self.y = getattr(obj, 'y', np.array([], dtype=float))\n return obj\n\n def theta_distance(self, iq, ir):\n \"\"\"\n Returns the theta and distance values for a given coordinate\n in the Hough space.\n\n Input\n =====\n :iq, int: theta index for the point in the Hough space\n :ir, int: radius/distance index for the point in the Hough space.\n\n Output\n ======\n (theta, distance) as floats.\n \"\"\"\n qlo, qhi = self.theta\n rlo, rhi = self.radius\n theta = iq/self.nq * (qhi - qlo) + qlo\n distance = ir/self.nr * (rhi - rlo) + rlo\n return (theta, distance)\n\n def construct(self):\n \"\"\"\n Constructs the Hough space from the x and y point data\n stored as part of `self`.\n\n IN\n ==\n :self: this instance\n\n OUT\n ===\n None. `self.hough` is created/updated on this call.\n \"\"\"\n assert self.x.shape == self.y.shape, \\\n \"The shapes of the x and y vectors must match.\"\n nq = self.nq\n nr = self.nr\n # construct the Hough space\n #+ what range of theta and r are appropriate?\n #+ at worst, the line eminating from no point will be\n #+ farther away from the origin that the distance to the\n #+ point itself.\n radius = np.linspace(0,\n np.sqrt(self.x**2 + self.y**2).max(),\n num=nr-1)\n self.radius = (radius.min(), radius.max())\n #+ since each line extends in both directions from the point\n #+ there is only need to explore 180 degrees (pi radians)\n theta = np.linspace(0,\n np.pi,\n num=nq-1)\n self.theta = (theta.min(), theta.max())\n # see the doc string for the HoughSpace class for a detailed\n # description of the role of phi. In short, the theta from\n # the theta -> phi conversion is what one would expect from\n # $y = mx + b$ where $m = \\tan \\theta$ for $+\\theta$\n # counterclockwise.\n phi = np.pi - theta\n # with what indices do the theta values correspond?\n qlo, qhi = theta[0], theta[-1]\n iq = ((theta - qlo)/(qhi - qlo)*(nq-1)).astype(int)\n # range of the radial values\n rlo, rhi = radius[0], radius[-1]\n # populate the Hough space\n self.fill(0)\n for x,y in zip(self.x, self.y):\n # vectorized calculation of all distances. Note the\n # use of $\\phi$, not $\\theta$ in this equation. The\n # reason can be found in the HoughSpace doc string.\n d = (HoughSpace.distance(x, y, phi) - rlo)/(rhi - rlo)\n # To which index does each distance correspond\n ir = (d*(nr-1)).astype(int)\n self[iq, ir] += 1\n#end 'class HoughSpace(object):'\n" ]
[ [ "numpy.sqrt", "numpy.linspace", "numpy.asarray", "numpy.cos", "numpy.sin", "numpy.array", "numpy.zeros" ] ]
mprego/NBA
[ "6b065f0bafb72da2a9a8e4e01304e0097e6dfd77" ]
[ "Classification/NBA_API.py" ]
[ "# -*- coding: utf-8 -*-\n'''\nThis code contains methods for pulling data from NBA.com\nIt depends on the NBA_PY package\n'''\n\n#Imports Packages\nfrom nba_py import *\nfrom nba_py import team\nfrom nba_py import game\nfrom nba_py.constants import *\nfrom Dunks_experiment import *\nimport pandas as pd\nimport datetime as dt\nimport numpy as np\nimport time\n\n# Creates schedule of games from the NBA website, given a date range (inclusive)\n# Returns a DF where each row contains the game, teams, and score\ndef create_schedule(start_dt, end_dt):\n \n master_game_list=pd.DataFrame() #will eventually contain a row for each game\n \n for date in pd.date_range(start_dt, end_dt).tolist(): #iterates through given date range\n sb=Scoreboard(month=date.month, day=date.day, year=date.year)\n #time.sleep(1) #waits for 1 second\n ls=pd.DataFrame(sb.line_score())\n \n # game_list holds all games from the selected date\n game_list=pd.DataFrame(index=range(0,len(ls.index)/2), columns=['Game_ID', 'Date', 'Home Team', 'Home ID', 'Away Team', 'Away ID', 'Home Team Score', 'Away Team Score', 'Home EFG', 'Home TOV', 'Home ORB', 'Home FTFGA', 'Away EFG', 'Away TOV', 'Away ORB', 'Away FTFGA', 'Home Team Win'])\n \n for i in range(0,len(ls.index)/2): #iterates through each unique game (skips every other entry)\n home_index=2*i \n away_index=2*i+1\n game=ls.ix[2*i:(2*i+1),:] #holds the 2 rows corresponding to a single game\n game_list.ix[i,'Game_ID']=ls.ix[home_index,'GAME_ID']\n game_list.ix[i,'Date']=date\n game_list.ix[i,'Home Team']=ls.ix[home_index, 'TEAM_ABBREVIATION']\n game_list.ix[i,'Home ID']=ls.ix[home_index, 'TEAM_ID']\n game_list.ix[i,'Away Team']=ls.ix[away_index, 'TEAM_ABBREVIATION']\n game_list.ix[i,'Away ID']=ls.ix[away_index, 'TEAM_ID']\n game_list.ix[i,'Home Team Score']=sum(ls.ix[home_index,7:21]) #sums up all of the quarters/OTs\n game_list.ix[i,'Away Team Score']=sum(ls.ix[away_index,7:21])\n if game_list.ix[i,'Home Team Score']>game_list.ix[i,'Away Team Score']:\n game_list.ix[i, \"Home Team Win\"]=1\n else:\n game_list.ix[i, \"Home Team Win\"]=0\n master_game_list=master_game_list.append(game_list, ignore_index=True) #appends each game\n return master_game_list\n \n# Adds the 4 factors to the given dataset of games\n# Requires Game_ID, and Team_ID for Home Team \ndef add_factors(schedule):\n new_data=schedule.reset_index(drop=True)\n row=0\n for i in new_data.iterrows():\n game_ID='00'+str(int(new_data.ix[row,'Game_ID']))\n four_factors=game.BoxscoreFourFactors(game_ID)\n #time.sleep(1)\n factors_data=four_factors.sql_team_four_factors()\n home_ID=int(new_data.ix[row, 'Home ID'])\n if home_ID==factors_data.ix[0,'TEAM_ID']:\n h_index=0\n a_index=1\n else:\n a_index=0\n h_index=1\n new_data.ix[row,'Home EFG']=factors_data.ix[h_index,'EFG_PCT']\n new_data.ix[row,'Home TOV']=factors_data.ix[h_index,'TM_TOV_PCT']\n new_data.ix[row,'Home ORB']=factors_data.ix[h_index,'OREB_PCT']\n new_data.ix[row,'Home FTFGA']=factors_data.ix[h_index,'FTA_RATE']\n new_data.ix[row,'Away EFG']=factors_data.ix[a_index,'EFG_PCT']\n new_data.ix[row,'Away TOV']=factors_data.ix[a_index,'TM_TOV_PCT']\n new_data.ix[row,'Away ORB']=factors_data.ix[a_index,'OREB_PCT']\n new_data.ix[row,'Away FTFGA']=factors_data.ix[a_index,'FTA_RATE']\n row=row+1\n return new_data\n \n \n# Selects subset of schedule for last n games for a specified team\n# Requires Team_ID, current date (finds games before), number of games, and full schedule\ndef lastNGames(team_id, end_dt, n, schedule):\n home_last=schedule.ix[schedule['Home ID']==team_id] #filters for only games where specified team is involved\n away_last=schedule.ix[schedule['Away ID']==team_id]\n last=pd.concat([home_last, away_last])\n last['Date']=pd.to_datetime(last['Date']) #converts the date columns to datetime\n last=last[last['Date']<end_dt] #filters by games that occur before date specified\n last=last.sort_values('Date', ascending=False)\n last=last.reset_index(drop=True)\n length=min(n,len(last))\n lastn=pd.DataFrame(index=range(0,length), columns=['Date', 'Game ID', 'Team ID', 'Home', 'Score', 'Opp Score', 'Win', 'EFG', 'Opp EFG', 'TOV', 'Opp TOV', 'ORB', 'Opp ORB', 'FTFGA', 'Opp FTFGA', 'Dunk Score', 'Opp Dunk Score', 'Dunk Win']) \n for i in range(0,length):\n lastn.ix[i,'Date']=last.ix[i, 'Date']\n lastn.ix[i,'Team ID']=team_id\n lastn.ix[i,'Game ID']=last.ix[i,'Game_ID']\n if last.ix[i, 'Home ID']==team_id:\n lastn.ix[i,'Home']=1\n lastn.ix[i,'Score']=last.ix[i,'Home Team Score']\n lastn.ix[i,'Opp Score']=last.ix[i,'Away Team Score']\n lastn.ix[i,'EFG']=last.ix[i,'Home EFG']\n lastn.ix[i,'Opp EFG']=last.ix[i,'Away EFG']\n lastn.ix[i,'TOV']=last.ix[i,'Home TOV']\n lastn.ix[i,'Opp TOV']=last.ix[i,'Away TOV']\n lastn.ix[i,'ORB']=last.ix[i,'Home ORB']\n lastn.ix[i,'Opp ORB']=last.ix[i,'Away ORB']\n lastn.ix[i,'FTFGA']=last.ix[i,'Home FTFGA']\n lastn.ix[i,'Opp FTFGA']=last.ix[i,'Away FTFGA']\n lastn.ix[i,'Dunk Score']=last.ix[i,'h_dunk_score']\n lastn.ix[i,'Opp Dunk Score']=last.ix[i,'a_dunk_score']\n else:\n lastn.ix[i,'Home']=0\n lastn.ix[i,'Score']=last.ix[i,'Away Team Score']\n lastn.ix[i,'Opp Score']=last.ix[i,'Home Team Score']\n lastn.ix[i,'EFG']=last.ix[i,'Away EFG']\n lastn.ix[i,'Opp EFG']=last.ix[i,'Home EFG']\n lastn.ix[i,'TOV']=last.ix[i,'Away TOV']\n lastn.ix[i,'Opp TOV']=last.ix[i,'Home TOV']\n lastn.ix[i,'ORB']=last.ix[i,'Away ORB']\n lastn.ix[i,'Opp ORB']=last.ix[i,'Home ORB']\n lastn.ix[i,'FTFGA']=last.ix[i,'Away FTFGA']\n lastn.ix[i,'Opp FTFGA']=last.ix[i,'Home FTFGA']\n lastn.ix[i,'Dunk Score']=last.ix[i,'a_dunk_score']\n lastn.ix[i,'Opp Dunk Score']=last.ix[i,'h_dunk_score']\n if lastn.ix[i,'Score']>lastn.ix[i,'Opp Score']:\n lastn.ix[i,'Win']=1\n else:\n lastn.ix[i,'Win']=0\n if lastn.ix[i,'Dunk Score']>lastn.ix[i,'Opp Dunk Score']:\n lastn.ix[i,'Dunk Win']=1\n else:\n lastn.ix[i,'Dunk Win']=0\n return lastn\n\n\n# Returns stats for the last n games for a given team and game\n# Requires team, current date, game on current date, number of games to go back, and full schedule\ndef lastNStats(team_id, end_dt, game_id, n, schedule):\n lastngames=lastNGames(team_id, end_dt, n, schedule)\n lastnstats=pd.DataFrame(index=range(0,1), columns=['Game_ID', 'BTB', 'n games', 'Win Pct', 'EFG', 'Def EFG', 'TOV', 'Def TOV', 'ORB', 'Def ORB', 'FTFGA', 'Def FTFGA', 'Dunk Score', 'Def Dunk Score'])\n lastnstats.ix[0,'Game_ID']=game_id\n lastnstats.ix[0,'n games']=len(lastngames.ix[:,0])\n if lastnstats.ix[0,'n games'] ==0: #if there are no previous games\n lastnstats.ix[0, 'BTB']=0\n for c in range(0,11):\n lastnstats.ix[0,c+3]=0\n else:\n if (end_dt-lastngames.ix[0,'Date']).days==1:\n lastnstats.ix[0,'BTB']=1\n else:\n lastnstats.ix[0,'BTB']=0\n for c in range(0,11):\n lastnstats.ix[0,c+3]=np.mean(lastngames.ix[:,c+6])\n return lastnstats\n \n \n# Creates the 2014-2015 NBA schedule with the 4 factors added in\ndef create_2014_schedule():\n create_schedule(dt.datetime(2014,10,28), dt.datetime(2014,11,30)).to_csv('sched1.csv', index=False)\n sched1=add_factors(pd.read_csv('sched1.csv'))\n sched1.to_csv('sched1.csv', index=False)\n \n create_schedule(dt.datetime(2014,12,1), dt.datetime(2014,12,31)).to_csv('sched2.csv', index=False)\n sched2=add_factors(pd.read_csv('sched2.csv'))\n sched2.to_csv('sched2.csv', index=False)\n \n create_schedule(dt.datetime(2015,1,1), dt.datetime(2015,1,31)).to_csv('sched3.csv', index=False)\n sched3=add_factors(pd.read_csv('sched3.csv'))\n sched3.to_csv('sched3.csv', index=False)\n \n create_schedule(dt.datetime(2015,2,1), dt.datetime(2015,2,28)).to_csv('sched4.csv', index=False)\n sched4=add_factors(pd.read_csv('sched4.csv'))\n sched4.to_csv('sched4.csv', index=False)\n \n create_schedule(dt.datetime(2015,3,1), dt.datetime(2015,3,31)).to_csv('sched5.csv', index=False)\n sched5=add_factors(pd.read_csv('sched5.csv'))\n sched5.to_csv('sched5.csv', index=False)\n \n create_schedule(dt.datetime(2015,4,1), dt.datetime(2015,4,15)).to_csv('sched6.csv', index=False)\n sched6=add_factors(pd.read_csv('sched6.csv'))\n sched6.to_csv('sched6.csv', index=False)\n \n schedule=pd.concat([sched1, sched2, sched3, sched4, sched5, sched6])\n \n # Adds in Dunk Data for each team \n schedule_dd=create_dunk_data(schedule)\n \n return scheudle_dd\n \n\n# Creates DF of games with all input variables necessary for stats model\n# Requires start and end date (inclusive), number of n games back, and DF of entire schedule\ndef create_train_data(st_dt, end_dt, n, schedule):\n schedule['Date']=pd.to_datetime(schedule['Date'])\n score_set=schedule[schedule['Date']>=st_dt]\n score_set=score_set[score_set['Date']<=end_dt]\n score_set=score_set.reset_index(drop=True)\n data=pd.DataFrame()\n for row in range(len(score_set)):\n data_h=lastNStats(score_set.ix[row, 'Home ID'], score_set.ix[row,'Date'], score_set.ix[row, 'Game_ID'], n,schedule)\n data_a=lastNStats(score_set.ix[row, 'Away ID'], score_set.ix[row,'Date'], score_set.ix[row, 'Game_ID'], n,schedule)\n data=data.append(pd.merge(data_h, data_a, how='inner', on='Game_ID', suffixes=('_h', '_a')))\n data=data.reset_index(drop=True)\n data.ix[row, 'Win']=score_set.ix[row, 'Home Team Win']\n return data \n \n\n\n# Code to test the methods made above\nif __name__ == '__main__':\n \n #code to make dataset of 5 and 15 last games in datset\n season_start=dt.datetime(2014,10,28) #start date for 2014-2015 season\n season_end=dt.datetime(2015,4,15) #end date for 2014-2015 season\n \n #schedule=create_2014_schedule()\n# data5=create_train_data(dt.datetime(2014, 11,15), dt.datetime(2015, 2,12), 5, schedule)\n# data15=create_train_data(dt.datetime(2014, 11,15), dt.datetime(2015, 2,12), 15, schedule)\n# \n# data515=pd.merge(data5, data15, how='inner', on='Game_ID', suffixes=('_5', '_15'))\n# data515.to_csv('data515.csv', index=False)\n schedule=pd.read_csv('schedule.csv')\n #data5=create_train_data(dt.datetime(2014,11,15), dt.datetime(2015,4,10), 5, schedule)\n #data15=create_train_data(dt.datetime(2014,11,15), dt.datetime(2015,4,10), 15, schedule)\n data_all=create_train_data(dt.datetime(2014,11,15), dt.datetime(2015,4,10), 82, schedule)\n #data=pd.merge(data5, data15, how='inner', on='Game_ID', suffixes=('_5', '_15'))\n data=pd.merge(data, data_all, how='inner', on='Game_ID', suffixes=('', '_all'))\n data.to_csv('data.csv', index=False)\n \n# data5t=create_train_data(dt.datetime(2015, 3,15), dt.datetime(2015, 4,10), 5, schedule)\n# data15t=create_train_data(dt.datetime(2015, 3,15), dt.datetime(2015, 4,10), 15, schedule)\n# \n# data515test=pd.merge(data5t, data15t, how='inner', on='Game_ID', suffixes=('_5', '_15'))\n# data515test.to_csv('data515test.csv', index=False)\n# \n #some of my games has a suspiciously low number of x previous games. see why that is\n# \n# #signals code is done\n# from os import system\n# system(\"say Code is done\")\n \n \n# dunk_data=pd.read_csv('dunk_data.csv')\n# dunk_data=dunk_data[['Game_ID', 'h_dunk_score', 'a_dunk_score', 'h_dunk_win']] \n# new_sched=pd.merge(schedule, dunk_data, how='inner', on='Game_ID')\n# new_sched.to_csv('schedule.csv', index=False)\n# \n" ]
[ [ "pandas.concat", "pandas.to_datetime", "pandas.read_csv", "pandas.merge", "pandas.DataFrame", "numpy.mean", "pandas.date_range" ] ]
sjtututu/QUANTAXIS
[ "e9e20cdeda8b8d132433037b639a7e60f286a190" ]
[ "QUANTAXIS/QAUtil/QADate_trade.py" ]
[ "# coding:utf-8\r\n#\r\n# The MIT License (MIT)\r\n#\r\n# Copyright (c) 2016-2019 yutiansut/QUANTAXIS\r\n#\r\n# Permission is hereby granted, free of charge, to any person obtaining a copy\r\n# of this software and associated documentation files (the \"Software\"), to deal\r\n# in the Software without restriction, including without limitation the rights\r\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\r\n# copies of the Software, and to permit persons to whom the Software is\r\n# furnished to do so, subject to the following conditions:\r\n#\r\n# The above copyright notice and this permission notice shall be included in all\r\n# copies or substantial portions of the Software.\r\n#\r\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r\n# SOFTWARE.\r\n\r\nimport datetime\r\nimport pandas as pd\r\n\r\nfrom QUANTAXIS.QAUtil.QAParameter import MARKET_TYPE\r\n\r\n# todo 🛠 只记录非交易日,其余的用程序迭代 生成交易日\r\n\r\ntrade_date_sse = [\r\n '1990-12-19',\r\n '1990-12-20',\r\n '1990-12-21',\r\n '1990-12-24',\r\n '1990-12-25',\r\n '1990-12-26',\r\n '1990-12-27',\r\n '1990-12-28',\r\n '1990-12-31',\r\n '1991-01-02',\r\n '1991-01-03',\r\n '1991-01-04',\r\n '1991-01-07',\r\n '1991-01-08',\r\n '1991-01-09',\r\n '1991-01-10',\r\n '1991-01-11',\r\n '1991-01-14',\r\n '1991-01-15',\r\n '1991-01-16',\r\n '1991-01-17',\r\n '1991-01-18',\r\n '1991-01-21',\r\n '1991-01-22',\r\n '1991-01-23',\r\n '1991-01-24',\r\n '1991-01-25',\r\n '1991-01-28',\r\n '1991-01-29',\r\n '1991-01-30',\r\n '1991-01-31',\r\n '1991-02-01',\r\n '1991-02-04',\r\n '1991-02-05',\r\n '1991-02-06',\r\n '1991-02-07',\r\n '1991-02-08',\r\n '1991-02-11',\r\n '1991-02-12',\r\n '1991-02-13',\r\n '1991-02-14',\r\n '1991-02-19',\r\n '1991-02-20',\r\n '1991-02-21',\r\n '1991-02-22',\r\n '1991-02-25',\r\n '1991-02-26',\r\n '1991-02-27',\r\n '1991-02-28',\r\n '1991-03-01',\r\n '1991-03-04',\r\n '1991-03-05',\r\n '1991-03-06',\r\n '1991-03-07',\r\n '1991-03-08',\r\n '1991-03-11',\r\n '1991-03-12',\r\n '1991-03-13',\r\n '1991-03-14',\r\n '1991-03-15',\r\n '1991-03-18',\r\n '1991-03-19',\r\n '1991-03-20',\r\n '1991-03-21',\r\n '1991-03-22',\r\n '1991-03-25',\r\n '1991-03-26',\r\n '1991-03-27',\r\n '1991-03-28',\r\n '1991-03-29',\r\n '1991-04-01',\r\n '1991-04-02',\r\n '1991-04-03',\r\n '1991-04-04',\r\n '1991-04-05',\r\n '1991-04-08',\r\n '1991-04-09',\r\n '1991-04-10',\r\n '1991-04-11',\r\n '1991-04-12',\r\n '1991-04-15',\r\n '1991-04-16',\r\n '1991-04-17',\r\n '1991-04-18',\r\n '1991-04-19',\r\n '1991-04-22',\r\n '1991-04-23',\r\n '1991-04-24',\r\n '1991-04-25',\r\n '1991-04-26',\r\n '1991-04-29',\r\n '1991-04-30',\r\n '1991-05-02',\r\n '1991-05-03',\r\n '1991-05-06',\r\n '1991-05-07',\r\n '1991-05-08',\r\n '1991-05-09',\r\n '1991-05-10',\r\n '1991-05-13',\r\n '1991-05-14',\r\n '1991-05-15',\r\n '1991-05-16',\r\n '1991-05-17',\r\n '1991-05-20',\r\n '1991-05-21',\r\n '1991-05-22',\r\n '1991-05-23',\r\n '1991-05-24',\r\n '1991-05-27',\r\n '1991-05-28',\r\n '1991-05-29',\r\n '1991-05-30',\r\n '1991-05-31',\r\n '1991-06-03',\r\n '1991-06-04',\r\n '1991-06-05',\r\n '1991-06-06',\r\n '1991-06-07',\r\n '1991-06-10',\r\n '1991-06-11',\r\n '1991-06-12',\r\n '1991-06-13',\r\n '1991-06-14',\r\n '1991-06-17',\r\n '1991-06-18',\r\n '1991-06-19',\r\n '1991-06-20',\r\n '1991-06-21',\r\n '1991-06-24',\r\n '1991-06-25',\r\n '1991-06-26',\r\n '1991-06-27',\r\n '1991-06-28',\r\n '1991-07-01',\r\n '1991-07-02',\r\n '1991-07-03',\r\n '1991-07-04',\r\n '1991-07-05',\r\n '1991-07-08',\r\n '1991-07-09',\r\n '1991-07-10',\r\n '1991-07-11',\r\n '1991-07-12',\r\n '1991-07-15',\r\n '1991-07-16',\r\n '1991-07-17',\r\n '1991-07-18',\r\n '1991-07-19',\r\n '1991-07-22',\r\n '1991-07-23',\r\n '1991-07-24',\r\n '1991-07-25',\r\n '1991-07-26',\r\n '1991-07-29',\r\n '1991-07-30',\r\n '1991-07-31',\r\n '1991-08-01',\r\n '1991-08-02',\r\n '1991-08-05',\r\n '1991-08-06',\r\n '1991-08-07',\r\n '1991-08-08',\r\n '1991-08-09',\r\n '1991-08-12',\r\n '1991-08-13',\r\n '1991-08-14',\r\n '1991-08-15',\r\n '1991-08-16',\r\n '1991-08-19',\r\n '1991-08-20',\r\n '1991-08-21',\r\n '1991-08-22',\r\n '1991-08-23',\r\n '1991-08-26',\r\n '1991-08-27',\r\n '1991-08-28',\r\n '1991-08-29',\r\n '1991-08-30',\r\n '1991-09-02',\r\n '1991-09-03',\r\n '1991-09-04',\r\n '1991-09-05',\r\n '1991-09-06',\r\n '1991-09-09',\r\n '1991-09-10',\r\n '1991-09-11',\r\n '1991-09-12',\r\n '1991-09-13',\r\n '1991-09-16',\r\n '1991-09-17',\r\n '1991-09-18',\r\n '1991-09-19',\r\n '1991-09-20',\r\n '1991-09-23',\r\n '1991-09-24',\r\n '1991-09-25',\r\n '1991-09-26',\r\n '1991-09-27',\r\n '1991-09-30',\r\n '1991-10-03',\r\n '1991-10-04',\r\n '1991-10-07',\r\n '1991-10-08',\r\n '1991-10-09',\r\n '1991-10-10',\r\n '1991-10-11',\r\n '1991-10-14',\r\n '1991-10-15',\r\n '1991-10-16',\r\n '1991-10-17',\r\n '1991-10-18',\r\n '1991-10-21',\r\n '1991-10-22',\r\n '1991-10-23',\r\n '1991-10-24',\r\n '1991-10-25',\r\n '1991-10-28',\r\n '1991-10-29',\r\n '1991-10-30',\r\n '1991-10-31',\r\n '1991-11-01',\r\n '1991-11-04',\r\n '1991-11-05',\r\n '1991-11-06',\r\n '1991-11-07',\r\n '1991-11-08',\r\n '1991-11-11',\r\n '1991-11-12',\r\n '1991-11-13',\r\n '1991-11-14',\r\n '1991-11-15',\r\n '1991-11-18',\r\n '1991-11-19',\r\n '1991-11-20',\r\n '1991-11-21',\r\n '1991-11-22',\r\n '1991-11-25',\r\n '1991-11-26',\r\n '1991-11-27',\r\n '1991-11-28',\r\n '1991-11-29',\r\n '1991-12-02',\r\n '1991-12-03',\r\n '1991-12-04',\r\n '1991-12-05',\r\n '1991-12-06',\r\n '1991-12-09',\r\n '1991-12-10',\r\n '1991-12-11',\r\n '1991-12-12',\r\n '1991-12-13',\r\n '1991-12-16',\r\n '1991-12-17',\r\n '1991-12-18',\r\n '1991-12-19',\r\n '1991-12-20',\r\n '1991-12-23',\r\n '1991-12-24',\r\n '1991-12-25',\r\n '1991-12-26',\r\n '1991-12-27',\r\n '1991-12-30',\r\n '1991-12-31',\r\n '1992-01-02',\r\n '1992-01-03',\r\n '1992-01-06',\r\n '1992-01-07',\r\n '1992-01-08',\r\n '1992-01-09',\r\n '1992-01-10',\r\n '1992-01-13',\r\n '1992-01-14',\r\n '1992-01-15',\r\n '1992-01-16',\r\n '1992-01-17',\r\n '1992-01-20',\r\n '1992-01-21',\r\n '1992-01-22',\r\n '1992-01-23',\r\n '1992-01-24',\r\n '1992-01-27',\r\n '1992-01-28',\r\n '1992-01-29',\r\n '1992-01-30',\r\n '1992-01-31',\r\n '1992-02-03',\r\n '1992-02-07',\r\n '1992-02-10',\r\n '1992-02-11',\r\n '1992-02-12',\r\n '1992-02-13',\r\n '1992-02-14',\r\n '1992-02-17',\r\n '1992-02-18',\r\n '1992-02-19',\r\n '1992-02-20',\r\n '1992-02-21',\r\n '1992-02-24',\r\n '1992-02-25',\r\n '1992-02-26',\r\n '1992-02-27',\r\n '1992-02-28',\r\n '1992-03-02',\r\n '1992-03-03',\r\n '1992-03-04',\r\n '1992-03-05',\r\n '1992-03-06',\r\n '1992-03-09',\r\n '1992-03-10',\r\n '1992-03-11',\r\n '1992-03-12',\r\n '1992-03-13',\r\n '1992-03-16',\r\n '1992-03-17',\r\n '1992-03-18',\r\n '1992-03-19',\r\n '1992-03-20',\r\n '1992-03-23',\r\n '1992-03-24',\r\n '1992-03-25',\r\n '1992-03-26',\r\n '1992-03-27',\r\n '1992-03-30',\r\n '1992-03-31',\r\n '1992-04-01',\r\n '1992-04-02',\r\n '1992-04-03',\r\n '1992-04-06',\r\n '1992-04-07',\r\n '1992-04-08',\r\n '1992-04-09',\r\n '1992-04-10',\r\n '1992-04-13',\r\n '1992-04-14',\r\n '1992-04-15',\r\n '1992-04-16',\r\n '1992-04-17',\r\n '1992-04-20',\r\n '1992-04-21',\r\n '1992-04-22',\r\n '1992-04-23',\r\n '1992-04-24',\r\n '1992-04-27',\r\n '1992-04-28',\r\n '1992-04-29',\r\n '1992-04-30',\r\n '1992-05-04',\r\n '1992-05-05',\r\n '1992-05-06',\r\n '1992-05-07',\r\n '1992-05-08',\r\n '1992-05-11',\r\n '1992-05-12',\r\n '1992-05-13',\r\n '1992-05-14',\r\n '1992-05-15',\r\n '1992-05-18',\r\n '1992-05-19',\r\n '1992-05-20',\r\n '1992-05-21',\r\n '1992-05-22',\r\n '1992-05-25',\r\n '1992-05-26',\r\n '1992-05-27',\r\n '1992-05-28',\r\n '1992-05-29',\r\n '1992-06-01',\r\n '1992-06-02',\r\n '1992-06-03',\r\n '1992-06-04',\r\n '1992-06-05',\r\n '1992-06-08',\r\n '1992-06-09',\r\n '1992-06-10',\r\n '1992-06-11',\r\n '1992-06-12',\r\n '1992-06-15',\r\n '1992-06-16',\r\n '1992-06-17',\r\n '1992-06-18',\r\n '1992-06-19',\r\n '1992-06-22',\r\n '1992-06-23',\r\n '1992-06-24',\r\n '1992-06-25',\r\n '1992-06-26',\r\n '1992-06-29',\r\n '1992-06-30',\r\n '1992-07-01',\r\n '1992-07-02',\r\n '1992-07-03',\r\n '1992-07-06',\r\n '1992-07-07',\r\n '1992-07-08',\r\n '1992-07-09',\r\n '1992-07-10',\r\n '1992-07-13',\r\n '1992-07-14',\r\n '1992-07-15',\r\n '1992-07-16',\r\n '1992-07-17',\r\n '1992-07-20',\r\n '1992-07-21',\r\n '1992-07-22',\r\n '1992-07-23',\r\n '1992-07-24',\r\n '1992-07-27',\r\n '1992-07-28',\r\n '1992-07-29',\r\n '1992-07-30',\r\n '1992-07-31',\r\n '1992-08-03',\r\n '1992-08-04',\r\n '1992-08-05',\r\n '1992-08-06',\r\n '1992-08-07',\r\n '1992-08-10',\r\n '1992-08-11',\r\n '1992-08-12',\r\n '1992-08-13',\r\n '1992-08-14',\r\n '1992-08-17',\r\n '1992-08-18',\r\n '1992-08-19',\r\n '1992-08-20',\r\n '1992-08-21',\r\n '1992-08-24',\r\n '1992-08-25',\r\n '1992-08-26',\r\n '1992-08-27',\r\n '1992-08-28',\r\n '1992-08-31',\r\n '1992-09-01',\r\n '1992-09-02',\r\n '1992-09-03',\r\n '1992-09-04',\r\n '1992-09-07',\r\n '1992-09-08',\r\n '1992-09-09',\r\n '1992-09-10',\r\n '1992-09-11',\r\n '1992-09-14',\r\n '1992-09-15',\r\n '1992-09-16',\r\n '1992-09-17',\r\n '1992-09-18',\r\n '1992-09-21',\r\n '1992-09-22',\r\n '1992-09-23',\r\n '1992-09-24',\r\n '1992-09-25',\r\n '1992-09-28',\r\n '1992-09-29',\r\n '1992-09-30',\r\n '1992-10-05',\r\n '1992-10-06',\r\n '1992-10-07',\r\n '1992-10-08',\r\n '1992-10-09',\r\n '1992-10-12',\r\n '1992-10-13',\r\n '1992-10-14',\r\n '1992-10-15',\r\n '1992-10-16',\r\n '1992-10-19',\r\n '1992-10-20',\r\n '1992-10-21',\r\n '1992-10-22',\r\n '1992-10-23',\r\n '1992-10-26',\r\n '1992-10-27',\r\n '1992-10-28',\r\n '1992-10-29',\r\n '1992-10-30',\r\n '1992-11-02',\r\n '1992-11-03',\r\n '1992-11-04',\r\n '1992-11-05',\r\n '1992-11-06',\r\n '1992-11-09',\r\n '1992-11-10',\r\n '1992-11-11',\r\n '1992-11-12',\r\n '1992-11-13',\r\n '1992-11-16',\r\n '1992-11-17',\r\n '1992-11-18',\r\n '1992-11-19',\r\n '1992-11-20',\r\n '1992-11-23',\r\n '1992-11-24',\r\n '1992-11-25',\r\n '1992-11-26',\r\n '1992-11-27',\r\n '1992-11-30',\r\n '1992-12-01',\r\n '1992-12-02',\r\n '1992-12-03',\r\n '1992-12-04',\r\n '1992-12-07',\r\n '1992-12-08',\r\n '1992-12-09',\r\n '1992-12-10',\r\n '1992-12-11',\r\n '1992-12-14',\r\n '1992-12-15',\r\n '1992-12-16',\r\n '1992-12-17',\r\n '1992-12-18',\r\n '1992-12-21',\r\n '1992-12-22',\r\n '1992-12-23',\r\n '1992-12-24',\r\n '1992-12-25',\r\n '1992-12-28',\r\n '1992-12-29',\r\n '1992-12-30',\r\n '1992-12-31',\r\n '1993-01-04',\r\n '1993-01-05',\r\n '1993-01-06',\r\n '1993-01-07',\r\n '1993-01-08',\r\n '1993-01-11',\r\n '1993-01-12',\r\n '1993-01-13',\r\n '1993-01-14',\r\n '1993-01-15',\r\n '1993-01-18',\r\n '1993-01-19',\r\n '1993-01-20',\r\n '1993-01-21',\r\n '1993-01-22',\r\n '1993-01-27',\r\n '1993-01-28',\r\n '1993-01-29',\r\n '1993-02-01',\r\n '1993-02-02',\r\n '1993-02-03',\r\n '1993-02-04',\r\n '1993-02-05',\r\n '1993-02-08',\r\n '1993-02-09',\r\n '1993-02-10',\r\n '1993-02-11',\r\n '1993-02-12',\r\n '1993-02-15',\r\n '1993-02-16',\r\n '1993-02-17',\r\n '1993-02-18',\r\n '1993-02-19',\r\n '1993-02-22',\r\n '1993-02-23',\r\n '1993-02-24',\r\n '1993-02-25',\r\n '1993-02-26',\r\n '1993-03-01',\r\n '1993-03-02',\r\n '1993-03-03',\r\n '1993-03-04',\r\n '1993-03-05',\r\n '1993-03-08',\r\n '1993-03-09',\r\n '1993-03-10',\r\n '1993-03-11',\r\n '1993-03-12',\r\n '1993-03-15',\r\n '1993-03-16',\r\n '1993-03-17',\r\n '1993-03-18',\r\n '1993-03-19',\r\n '1993-03-22',\r\n '1993-03-23',\r\n '1993-03-24',\r\n '1993-03-25',\r\n '1993-03-26',\r\n '1993-03-29',\r\n '1993-03-30',\r\n '1993-03-31',\r\n '1993-04-01',\r\n '1993-04-02',\r\n '1993-04-05',\r\n '1993-04-06',\r\n '1993-04-07',\r\n '1993-04-08',\r\n '1993-04-09',\r\n '1993-04-12',\r\n '1993-04-13',\r\n '1993-04-14',\r\n '1993-04-15',\r\n '1993-04-16',\r\n '1993-04-19',\r\n '1993-04-20',\r\n '1993-04-21',\r\n '1993-04-22',\r\n '1993-04-23',\r\n '1993-04-26',\r\n '1993-04-27',\r\n '1993-04-28',\r\n '1993-04-29',\r\n '1993-04-30',\r\n '1993-05-03',\r\n '1993-05-04',\r\n '1993-05-05',\r\n '1993-05-06',\r\n '1993-05-07',\r\n '1993-05-10',\r\n '1993-05-11',\r\n '1993-05-12',\r\n '1993-05-13',\r\n '1993-05-14',\r\n '1993-05-17',\r\n '1993-05-18',\r\n '1993-05-19',\r\n '1993-05-20',\r\n '1993-05-21',\r\n '1993-05-24',\r\n '1993-05-25',\r\n '1993-05-26',\r\n '1993-05-27',\r\n '1993-05-28',\r\n '1993-05-31',\r\n '1993-06-01',\r\n '1993-06-02',\r\n '1993-06-03',\r\n '1993-06-04',\r\n '1993-06-07',\r\n '1993-06-08',\r\n '1993-06-09',\r\n '1993-06-10',\r\n '1993-06-11',\r\n '1993-06-14',\r\n '1993-06-15',\r\n '1993-06-16',\r\n '1993-06-17',\r\n '1993-06-18',\r\n '1993-06-21',\r\n '1993-06-22',\r\n '1993-06-23',\r\n '1993-06-24',\r\n '1993-06-25',\r\n '1993-06-28',\r\n '1993-06-29',\r\n '1993-06-30',\r\n '1993-07-01',\r\n '1993-07-02',\r\n '1993-07-05',\r\n '1993-07-06',\r\n '1993-07-07',\r\n '1993-07-08',\r\n '1993-07-09',\r\n '1993-07-12',\r\n '1993-07-13',\r\n '1993-07-14',\r\n '1993-07-15',\r\n '1993-07-16',\r\n '1993-07-19',\r\n '1993-07-20',\r\n '1993-07-21',\r\n '1993-07-22',\r\n '1993-07-23',\r\n '1993-07-26',\r\n '1993-07-27',\r\n '1993-07-28',\r\n '1993-07-29',\r\n '1993-07-30',\r\n '1993-08-02',\r\n '1993-08-03',\r\n '1993-08-04',\r\n '1993-08-05',\r\n '1993-08-06',\r\n '1993-08-09',\r\n '1993-08-10',\r\n '1993-08-11',\r\n '1993-08-12',\r\n '1993-08-13',\r\n '1993-08-16',\r\n '1993-08-17',\r\n '1993-08-18',\r\n '1993-08-19',\r\n '1993-08-20',\r\n '1993-08-23',\r\n '1993-08-24',\r\n '1993-08-25',\r\n '1993-08-26',\r\n '1993-08-27',\r\n '1993-08-30',\r\n '1993-08-31',\r\n '1993-09-01',\r\n '1993-09-02',\r\n '1993-09-03',\r\n '1993-09-06',\r\n '1993-09-07',\r\n '1993-09-08',\r\n '1993-09-09',\r\n '1993-09-10',\r\n '1993-09-13',\r\n '1993-09-14',\r\n '1993-09-15',\r\n '1993-09-16',\r\n '1993-09-17',\r\n '1993-09-20',\r\n '1993-09-21',\r\n '1993-09-22',\r\n '1993-09-23',\r\n '1993-09-24',\r\n '1993-09-27',\r\n '1993-09-28',\r\n '1993-09-29',\r\n '1993-09-30',\r\n '1993-10-04',\r\n '1993-10-05',\r\n '1993-10-06',\r\n '1993-10-07',\r\n '1993-10-08',\r\n '1993-10-11',\r\n '1993-10-12',\r\n '1993-10-13',\r\n '1993-10-14',\r\n '1993-10-15',\r\n '1993-10-18',\r\n '1993-10-19',\r\n '1993-10-20',\r\n '1993-10-21',\r\n '1993-10-22',\r\n '1993-10-25',\r\n '1993-10-26',\r\n '1993-10-27',\r\n '1993-10-28',\r\n '1993-10-29',\r\n '1993-11-01',\r\n '1993-11-02',\r\n '1993-11-03',\r\n '1993-11-04',\r\n '1993-11-05',\r\n '1993-11-08',\r\n '1993-11-09',\r\n '1993-11-10',\r\n '1993-11-11',\r\n '1993-11-12',\r\n '1993-11-15',\r\n '1993-11-16',\r\n '1993-11-17',\r\n '1993-11-18',\r\n '1993-11-19',\r\n '1993-11-22',\r\n '1993-11-23',\r\n '1993-11-24',\r\n '1993-11-25',\r\n '1993-11-26',\r\n '1993-11-29',\r\n '1993-11-30',\r\n '1993-12-01',\r\n '1993-12-02',\r\n '1993-12-03',\r\n '1993-12-06',\r\n '1993-12-07',\r\n '1993-12-08',\r\n '1993-12-09',\r\n '1993-12-10',\r\n '1993-12-13',\r\n '1993-12-14',\r\n '1993-12-15',\r\n '1993-12-16',\r\n '1993-12-17',\r\n '1993-12-20',\r\n '1993-12-21',\r\n '1993-12-22',\r\n '1993-12-23',\r\n '1993-12-24',\r\n '1993-12-27',\r\n '1993-12-28',\r\n '1993-12-29',\r\n '1993-12-30',\r\n '1993-12-31',\r\n '1994-01-03',\r\n '1994-01-04',\r\n '1994-01-05',\r\n '1994-01-06',\r\n '1994-01-07',\r\n '1994-01-10',\r\n '1994-01-11',\r\n '1994-01-12',\r\n '1994-01-13',\r\n '1994-01-14',\r\n '1994-01-17',\r\n '1994-01-18',\r\n '1994-01-19',\r\n '1994-01-20',\r\n '1994-01-21',\r\n '1994-01-24',\r\n '1994-01-25',\r\n '1994-01-26',\r\n '1994-01-27',\r\n '1994-01-28',\r\n '1994-01-31',\r\n '1994-02-01',\r\n '1994-02-02',\r\n '1994-02-03',\r\n '1994-02-04',\r\n '1994-02-14',\r\n '1994-02-15',\r\n '1994-02-16',\r\n '1994-02-17',\r\n '1994-02-18',\r\n '1994-02-21',\r\n '1994-02-22',\r\n '1994-02-23',\r\n '1994-02-24',\r\n '1994-02-25',\r\n '1994-02-28',\r\n '1994-03-01',\r\n '1994-03-02',\r\n '1994-03-03',\r\n '1994-03-04',\r\n '1994-03-07',\r\n '1994-03-08',\r\n '1994-03-09',\r\n '1994-03-10',\r\n '1994-03-11',\r\n '1994-03-14',\r\n '1994-03-15',\r\n '1994-03-16',\r\n '1994-03-17',\r\n '1994-03-18',\r\n '1994-03-21',\r\n '1994-03-22',\r\n '1994-03-23',\r\n '1994-03-24',\r\n '1994-03-25',\r\n '1994-03-28',\r\n '1994-03-29',\r\n '1994-03-30',\r\n '1994-03-31',\r\n '1994-04-01',\r\n '1994-04-04',\r\n '1994-04-05',\r\n '1994-04-06',\r\n '1994-04-07',\r\n '1994-04-08',\r\n '1994-04-11',\r\n '1994-04-12',\r\n '1994-04-13',\r\n '1994-04-14',\r\n '1994-04-15',\r\n '1994-04-18',\r\n '1994-04-19',\r\n '1994-04-20',\r\n '1994-04-21',\r\n '1994-04-22',\r\n '1994-04-25',\r\n '1994-04-26',\r\n '1994-04-27',\r\n '1994-04-28',\r\n '1994-04-29',\r\n '1994-05-03',\r\n '1994-05-04',\r\n '1994-05-05',\r\n '1994-05-06',\r\n '1994-05-09',\r\n '1994-05-10',\r\n '1994-05-11',\r\n '1994-05-12',\r\n '1994-05-13',\r\n '1994-05-16',\r\n '1994-05-17',\r\n '1994-05-18',\r\n '1994-05-19',\r\n '1994-05-20',\r\n '1994-05-23',\r\n '1994-05-24',\r\n '1994-05-25',\r\n '1994-05-26',\r\n '1994-05-27',\r\n '1994-05-30',\r\n '1994-05-31',\r\n '1994-06-01',\r\n '1994-06-02',\r\n '1994-06-03',\r\n '1994-06-06',\r\n '1994-06-07',\r\n '1994-06-08',\r\n '1994-06-09',\r\n '1994-06-10',\r\n '1994-06-13',\r\n '1994-06-14',\r\n '1994-06-15',\r\n '1994-06-16',\r\n '1994-06-17',\r\n '1994-06-20',\r\n '1994-06-21',\r\n '1994-06-22',\r\n '1994-06-23',\r\n '1994-06-24',\r\n '1994-06-27',\r\n '1994-06-28',\r\n '1994-06-29',\r\n '1994-06-30',\r\n '1994-07-01',\r\n '1994-07-04',\r\n '1994-07-05',\r\n '1994-07-06',\r\n '1994-07-07',\r\n '1994-07-08',\r\n '1994-07-11',\r\n '1994-07-12',\r\n '1994-07-13',\r\n '1994-07-14',\r\n '1994-07-15',\r\n '1994-07-18',\r\n '1994-07-19',\r\n '1994-07-20',\r\n '1994-07-21',\r\n '1994-07-22',\r\n '1994-07-25',\r\n '1994-07-26',\r\n '1994-07-27',\r\n '1994-07-28',\r\n '1994-07-29',\r\n '1994-08-01',\r\n '1994-08-02',\r\n '1994-08-03',\r\n '1994-08-04',\r\n '1994-08-05',\r\n '1994-08-08',\r\n '1994-08-09',\r\n '1994-08-10',\r\n '1994-08-11',\r\n '1994-08-12',\r\n '1994-08-15',\r\n '1994-08-16',\r\n '1994-08-17',\r\n '1994-08-18',\r\n '1994-08-19',\r\n '1994-08-22',\r\n '1994-08-23',\r\n '1994-08-24',\r\n '1994-08-25',\r\n '1994-08-26',\r\n '1994-08-29',\r\n '1994-08-30',\r\n '1994-08-31',\r\n '1994-09-01',\r\n '1994-09-02',\r\n '1994-09-05',\r\n '1994-09-06',\r\n '1994-09-07',\r\n '1994-09-08',\r\n '1994-09-09',\r\n '1994-09-12',\r\n '1994-09-13',\r\n '1994-09-14',\r\n '1994-09-15',\r\n '1994-09-16',\r\n '1994-09-19',\r\n '1994-09-20',\r\n '1994-09-21',\r\n '1994-09-22',\r\n '1994-09-23',\r\n '1994-09-26',\r\n '1994-09-27',\r\n '1994-09-28',\r\n '1994-09-29',\r\n '1994-09-30',\r\n '1994-10-05',\r\n '1994-10-06',\r\n '1994-10-07',\r\n '1994-10-10',\r\n '1994-10-11',\r\n '1994-10-12',\r\n '1994-10-13',\r\n '1994-10-14',\r\n '1994-10-17',\r\n '1994-10-18',\r\n '1994-10-19',\r\n '1994-10-20',\r\n '1994-10-21',\r\n '1994-10-24',\r\n '1994-10-25',\r\n '1994-10-26',\r\n '1994-10-27',\r\n '1994-10-28',\r\n '1994-10-31',\r\n '1994-11-01',\r\n '1994-11-02',\r\n '1994-11-03',\r\n '1994-11-04',\r\n '1994-11-07',\r\n '1994-11-08',\r\n '1994-11-09',\r\n '1994-11-10',\r\n '1994-11-11',\r\n '1994-11-14',\r\n '1994-11-15',\r\n '1994-11-16',\r\n '1994-11-17',\r\n '1994-11-18',\r\n '1994-11-21',\r\n '1994-11-22',\r\n '1994-11-23',\r\n '1994-11-24',\r\n '1994-11-25',\r\n '1994-11-28',\r\n '1994-11-29',\r\n '1994-11-30',\r\n '1994-12-01',\r\n '1994-12-02',\r\n '1994-12-05',\r\n '1994-12-06',\r\n '1994-12-07',\r\n '1994-12-08',\r\n '1994-12-09',\r\n '1994-12-12',\r\n '1994-12-13',\r\n '1994-12-14',\r\n '1994-12-15',\r\n '1994-12-16',\r\n '1994-12-19',\r\n '1994-12-20',\r\n '1994-12-21',\r\n '1994-12-22',\r\n '1994-12-23',\r\n '1994-12-26',\r\n '1994-12-27',\r\n '1994-12-28',\r\n '1994-12-29',\r\n '1994-12-30',\r\n '1995-01-03',\r\n '1995-01-04',\r\n '1995-01-05',\r\n '1995-01-06',\r\n '1995-01-09',\r\n '1995-01-10',\r\n '1995-01-11',\r\n '1995-01-12',\r\n '1995-01-13',\r\n '1995-01-16',\r\n '1995-01-17',\r\n '1995-01-18',\r\n '1995-01-19',\r\n '1995-01-20',\r\n '1995-01-23',\r\n '1995-01-24',\r\n '1995-01-25',\r\n '1995-01-26',\r\n '1995-01-27',\r\n '1995-02-06',\r\n '1995-02-07',\r\n '1995-02-08',\r\n '1995-02-09',\r\n '1995-02-10',\r\n '1995-02-13',\r\n '1995-02-14',\r\n '1995-02-15',\r\n '1995-02-16',\r\n '1995-02-17',\r\n '1995-02-20',\r\n '1995-02-21',\r\n '1995-02-22',\r\n '1995-02-23',\r\n '1995-02-24',\r\n '1995-02-27',\r\n '1995-02-28',\r\n '1995-03-01',\r\n '1995-03-02',\r\n '1995-03-03',\r\n '1995-03-06',\r\n '1995-03-07',\r\n '1995-03-08',\r\n '1995-03-09',\r\n '1995-03-10',\r\n '1995-03-13',\r\n '1995-03-14',\r\n '1995-03-15',\r\n '1995-03-16',\r\n '1995-03-17',\r\n '1995-03-20',\r\n '1995-03-21',\r\n '1995-03-22',\r\n '1995-03-23',\r\n '1995-03-24',\r\n '1995-03-27',\r\n '1995-03-28',\r\n '1995-03-29',\r\n '1995-03-30',\r\n '1995-03-31',\r\n '1995-04-03',\r\n '1995-04-04',\r\n '1995-04-05',\r\n '1995-04-06',\r\n '1995-04-07',\r\n '1995-04-10',\r\n '1995-04-11',\r\n '1995-04-12',\r\n '1995-04-13',\r\n '1995-04-14',\r\n '1995-04-17',\r\n '1995-04-18',\r\n '1995-04-19',\r\n '1995-04-20',\r\n '1995-04-21',\r\n '1995-04-24',\r\n '1995-04-25',\r\n '1995-04-26',\r\n '1995-04-27',\r\n '1995-04-28',\r\n '1995-05-02',\r\n '1995-05-03',\r\n '1995-05-04',\r\n '1995-05-05',\r\n '1995-05-08',\r\n '1995-05-09',\r\n '1995-05-10',\r\n '1995-05-11',\r\n '1995-05-12',\r\n '1995-05-15',\r\n '1995-05-16',\r\n '1995-05-17',\r\n '1995-05-18',\r\n '1995-05-19',\r\n '1995-05-22',\r\n '1995-05-23',\r\n '1995-05-24',\r\n '1995-05-25',\r\n '1995-05-26',\r\n '1995-05-29',\r\n '1995-05-30',\r\n '1995-05-31',\r\n '1995-06-01',\r\n '1995-06-02',\r\n '1995-06-05',\r\n '1995-06-06',\r\n '1995-06-07',\r\n '1995-06-08',\r\n '1995-06-09',\r\n '1995-06-12',\r\n '1995-06-13',\r\n '1995-06-14',\r\n '1995-06-15',\r\n '1995-06-16',\r\n '1995-06-19',\r\n '1995-06-20',\r\n '1995-06-21',\r\n '1995-06-22',\r\n '1995-06-23',\r\n '1995-06-26',\r\n '1995-06-27',\r\n '1995-06-28',\r\n '1995-06-29',\r\n '1995-06-30',\r\n '1995-07-03',\r\n '1995-07-04',\r\n '1995-07-05',\r\n '1995-07-06',\r\n '1995-07-07',\r\n '1995-07-10',\r\n '1995-07-11',\r\n '1995-07-12',\r\n '1995-07-13',\r\n '1995-07-14',\r\n '1995-07-17',\r\n '1995-07-18',\r\n '1995-07-19',\r\n '1995-07-20',\r\n '1995-07-21',\r\n '1995-07-24',\r\n '1995-07-25',\r\n '1995-07-26',\r\n '1995-07-27',\r\n '1995-07-28',\r\n '1995-07-31',\r\n '1995-08-01',\r\n '1995-08-02',\r\n '1995-08-03',\r\n '1995-08-04',\r\n '1995-08-07',\r\n '1995-08-08',\r\n '1995-08-09',\r\n '1995-08-10',\r\n '1995-08-11',\r\n '1995-08-14',\r\n '1995-08-15',\r\n '1995-08-16',\r\n '1995-08-17',\r\n '1995-08-18',\r\n '1995-08-21',\r\n '1995-08-22',\r\n '1995-08-23',\r\n '1995-08-24',\r\n '1995-08-25',\r\n '1995-08-28',\r\n '1995-08-29',\r\n '1995-08-30',\r\n '1995-08-31',\r\n '1995-09-01',\r\n '1995-09-04',\r\n '1995-09-05',\r\n '1995-09-06',\r\n '1995-09-07',\r\n '1995-09-08',\r\n '1995-09-11',\r\n '1995-09-12',\r\n '1995-09-13',\r\n '1995-09-14',\r\n '1995-09-15',\r\n '1995-09-18',\r\n '1995-09-19',\r\n '1995-09-20',\r\n '1995-09-21',\r\n '1995-09-22',\r\n '1995-09-25',\r\n '1995-09-26',\r\n '1995-09-27',\r\n '1995-09-28',\r\n '1995-09-29',\r\n '1995-10-04',\r\n '1995-10-05',\r\n '1995-10-06',\r\n '1995-10-09',\r\n '1995-10-10',\r\n '1995-10-11',\r\n '1995-10-12',\r\n '1995-10-13',\r\n '1995-10-16',\r\n '1995-10-17',\r\n '1995-10-18',\r\n '1995-10-19',\r\n '1995-10-20',\r\n '1995-10-23',\r\n '1995-10-24',\r\n '1995-10-25',\r\n '1995-10-26',\r\n '1995-10-27',\r\n '1995-10-30',\r\n '1995-10-31',\r\n '1995-11-01',\r\n '1995-11-02',\r\n '1995-11-03',\r\n '1995-11-06',\r\n '1995-11-07',\r\n '1995-11-08',\r\n '1995-11-09',\r\n '1995-11-10',\r\n '1995-11-13',\r\n '1995-11-14',\r\n '1995-11-15',\r\n '1995-11-16',\r\n '1995-11-17',\r\n '1995-11-20',\r\n '1995-11-21',\r\n '1995-11-22',\r\n '1995-11-23',\r\n '1995-11-24',\r\n '1995-11-27',\r\n '1995-11-28',\r\n '1995-11-29',\r\n '1995-11-30',\r\n '1995-12-01',\r\n '1995-12-04',\r\n '1995-12-05',\r\n '1995-12-06',\r\n '1995-12-07',\r\n '1995-12-08',\r\n '1995-12-11',\r\n '1995-12-12',\r\n '1995-12-13',\r\n '1995-12-14',\r\n '1995-12-15',\r\n '1995-12-18',\r\n '1995-12-19',\r\n '1995-12-20',\r\n '1995-12-21',\r\n '1995-12-22',\r\n '1995-12-25',\r\n '1995-12-26',\r\n '1995-12-27',\r\n '1995-12-28',\r\n '1995-12-29',\r\n '1996-01-02',\r\n '1996-01-03',\r\n '1996-01-04',\r\n '1996-01-05',\r\n '1996-01-08',\r\n '1996-01-09',\r\n '1996-01-10',\r\n '1996-01-11',\r\n '1996-01-12',\r\n '1996-01-15',\r\n '1996-01-16',\r\n '1996-01-17',\r\n '1996-01-18',\r\n '1996-01-19',\r\n '1996-01-22',\r\n '1996-01-23',\r\n '1996-01-24',\r\n '1996-01-25',\r\n '1996-01-26',\r\n '1996-01-29',\r\n '1996-01-30',\r\n '1996-01-31',\r\n '1996-02-01',\r\n '1996-02-02',\r\n '1996-02-05',\r\n '1996-02-06',\r\n '1996-02-07',\r\n '1996-02-08',\r\n '1996-02-09',\r\n '1996-02-12',\r\n '1996-02-13',\r\n '1996-02-14',\r\n '1996-02-15',\r\n '1996-02-16',\r\n '1996-03-04',\r\n '1996-03-05',\r\n '1996-03-06',\r\n '1996-03-07',\r\n '1996-03-08',\r\n '1996-03-11',\r\n '1996-03-12',\r\n '1996-03-13',\r\n '1996-03-14',\r\n '1996-03-15',\r\n '1996-03-18',\r\n '1996-03-19',\r\n '1996-03-20',\r\n '1996-03-21',\r\n '1996-03-22',\r\n '1996-03-25',\r\n '1996-03-26',\r\n '1996-03-27',\r\n '1996-03-28',\r\n '1996-03-29',\r\n '1996-04-01',\r\n '1996-04-02',\r\n '1996-04-03',\r\n '1996-04-04',\r\n '1996-04-05',\r\n '1996-04-08',\r\n '1996-04-09',\r\n '1996-04-10',\r\n '1996-04-11',\r\n '1996-04-12',\r\n '1996-04-15',\r\n '1996-04-16',\r\n '1996-04-17',\r\n '1996-04-18',\r\n '1996-04-19',\r\n '1996-04-22',\r\n '1996-04-23',\r\n '1996-04-24',\r\n '1996-04-25',\r\n '1996-04-26',\r\n '1996-04-29',\r\n '1996-04-30',\r\n '1996-05-02',\r\n '1996-05-03',\r\n '1996-05-06',\r\n '1996-05-07',\r\n '1996-05-08',\r\n '1996-05-09',\r\n '1996-05-10',\r\n '1996-05-13',\r\n '1996-05-14',\r\n '1996-05-15',\r\n '1996-05-16',\r\n '1996-05-17',\r\n '1996-05-20',\r\n '1996-05-21',\r\n '1996-05-22',\r\n '1996-05-23',\r\n '1996-05-24',\r\n '1996-05-27',\r\n '1996-05-28',\r\n '1996-05-29',\r\n '1996-05-30',\r\n '1996-05-31',\r\n '1996-06-03',\r\n '1996-06-04',\r\n '1996-06-05',\r\n '1996-06-06',\r\n '1996-06-07',\r\n '1996-06-10',\r\n '1996-06-11',\r\n '1996-06-12',\r\n '1996-06-13',\r\n '1996-06-14',\r\n '1996-06-17',\r\n '1996-06-18',\r\n '1996-06-19',\r\n '1996-06-20',\r\n '1996-06-21',\r\n '1996-06-24',\r\n '1996-06-25',\r\n '1996-06-26',\r\n '1996-06-27',\r\n '1996-06-28',\r\n '1996-07-01',\r\n '1996-07-02',\r\n '1996-07-03',\r\n '1996-07-04',\r\n '1996-07-05',\r\n '1996-07-08',\r\n '1996-07-09',\r\n '1996-07-10',\r\n '1996-07-11',\r\n '1996-07-12',\r\n '1996-07-15',\r\n '1996-07-16',\r\n '1996-07-17',\r\n '1996-07-18',\r\n '1996-07-19',\r\n '1996-07-22',\r\n '1996-07-23',\r\n '1996-07-24',\r\n '1996-07-25',\r\n '1996-07-26',\r\n '1996-07-29',\r\n '1996-07-30',\r\n '1996-07-31',\r\n '1996-08-01',\r\n '1996-08-02',\r\n '1996-08-05',\r\n '1996-08-06',\r\n '1996-08-07',\r\n '1996-08-08',\r\n '1996-08-09',\r\n '1996-08-12',\r\n '1996-08-13',\r\n '1996-08-14',\r\n '1996-08-15',\r\n '1996-08-16',\r\n '1996-08-19',\r\n '1996-08-20',\r\n '1996-08-21',\r\n '1996-08-22',\r\n '1996-08-23',\r\n '1996-08-26',\r\n '1996-08-27',\r\n '1996-08-28',\r\n '1996-08-29',\r\n '1996-08-30',\r\n '1996-09-02',\r\n '1996-09-03',\r\n '1996-09-04',\r\n '1996-09-05',\r\n '1996-09-06',\r\n '1996-09-09',\r\n '1996-09-10',\r\n '1996-09-11',\r\n '1996-09-12',\r\n '1996-09-13',\r\n '1996-09-16',\r\n '1996-09-17',\r\n '1996-09-18',\r\n '1996-09-19',\r\n '1996-09-20',\r\n '1996-09-23',\r\n '1996-09-24',\r\n '1996-09-25',\r\n '1996-09-26',\r\n '1996-09-27',\r\n '1996-10-03',\r\n '1996-10-04',\r\n '1996-10-07',\r\n '1996-10-08',\r\n '1996-10-09',\r\n '1996-10-10',\r\n '1996-10-11',\r\n '1996-10-14',\r\n '1996-10-15',\r\n '1996-10-16',\r\n '1996-10-17',\r\n '1996-10-18',\r\n '1996-10-21',\r\n '1996-10-22',\r\n '1996-10-23',\r\n '1996-10-24',\r\n '1996-10-25',\r\n '1996-10-28',\r\n '1996-10-29',\r\n '1996-10-30',\r\n '1996-10-31',\r\n '1996-11-01',\r\n '1996-11-04',\r\n '1996-11-05',\r\n '1996-11-06',\r\n '1996-11-07',\r\n '1996-11-08',\r\n '1996-11-11',\r\n '1996-11-12',\r\n '1996-11-13',\r\n '1996-11-14',\r\n '1996-11-15',\r\n '1996-11-18',\r\n '1996-11-19',\r\n '1996-11-20',\r\n '1996-11-21',\r\n '1996-11-22',\r\n '1996-11-25',\r\n '1996-11-26',\r\n '1996-11-27',\r\n '1996-11-28',\r\n '1996-11-29',\r\n '1996-12-02',\r\n '1996-12-03',\r\n '1996-12-04',\r\n '1996-12-05',\r\n '1996-12-06',\r\n '1996-12-09',\r\n '1996-12-10',\r\n '1996-12-11',\r\n '1996-12-12',\r\n '1996-12-13',\r\n '1996-12-16',\r\n '1996-12-17',\r\n '1996-12-18',\r\n '1996-12-19',\r\n '1996-12-20',\r\n '1996-12-23',\r\n '1996-12-24',\r\n '1996-12-25',\r\n '1996-12-26',\r\n '1996-12-27',\r\n '1996-12-30',\r\n '1996-12-31',\r\n '1997-01-02',\r\n '1997-01-03',\r\n '1997-01-06',\r\n '1997-01-07',\r\n '1997-01-08',\r\n '1997-01-09',\r\n '1997-01-10',\r\n '1997-01-13',\r\n '1997-01-14',\r\n '1997-01-15',\r\n '1997-01-16',\r\n '1997-01-17',\r\n '1997-01-20',\r\n '1997-01-21',\r\n '1997-01-22',\r\n '1997-01-23',\r\n '1997-01-24',\r\n '1997-01-27',\r\n '1997-01-28',\r\n '1997-01-29',\r\n '1997-01-30',\r\n '1997-01-31',\r\n '1997-02-17',\r\n '1997-02-18',\r\n '1997-02-19',\r\n '1997-02-20',\r\n '1997-02-21',\r\n '1997-02-24',\r\n '1997-02-25',\r\n '1997-02-26',\r\n '1997-02-27',\r\n '1997-02-28',\r\n '1997-03-03',\r\n '1997-03-04',\r\n '1997-03-05',\r\n '1997-03-06',\r\n '1997-03-07',\r\n '1997-03-10',\r\n '1997-03-11',\r\n '1997-03-12',\r\n '1997-03-13',\r\n '1997-03-14',\r\n '1997-03-17',\r\n '1997-03-18',\r\n '1997-03-19',\r\n '1997-03-20',\r\n '1997-03-21',\r\n '1997-03-24',\r\n '1997-03-25',\r\n '1997-03-26',\r\n '1997-03-27',\r\n '1997-03-28',\r\n '1997-03-31',\r\n '1997-04-01',\r\n '1997-04-02',\r\n '1997-04-03',\r\n '1997-04-04',\r\n '1997-04-07',\r\n '1997-04-08',\r\n '1997-04-09',\r\n '1997-04-10',\r\n '1997-04-11',\r\n '1997-04-14',\r\n '1997-04-15',\r\n '1997-04-16',\r\n '1997-04-17',\r\n '1997-04-18',\r\n '1997-04-21',\r\n '1997-04-22',\r\n '1997-04-23',\r\n '1997-04-24',\r\n '1997-04-25',\r\n '1997-04-28',\r\n '1997-04-29',\r\n '1997-04-30',\r\n '1997-05-05',\r\n '1997-05-06',\r\n '1997-05-07',\r\n '1997-05-08',\r\n '1997-05-09',\r\n '1997-05-12',\r\n '1997-05-13',\r\n '1997-05-14',\r\n '1997-05-15',\r\n '1997-05-16',\r\n '1997-05-19',\r\n '1997-05-20',\r\n '1997-05-21',\r\n '1997-05-22',\r\n '1997-05-23',\r\n '1997-05-26',\r\n '1997-05-27',\r\n '1997-05-28',\r\n '1997-05-29',\r\n '1997-05-30',\r\n '1997-06-02',\r\n '1997-06-03',\r\n '1997-06-04',\r\n '1997-06-05',\r\n '1997-06-06',\r\n '1997-06-09',\r\n '1997-06-10',\r\n '1997-06-11',\r\n '1997-06-12',\r\n '1997-06-13',\r\n '1997-06-16',\r\n '1997-06-17',\r\n '1997-06-18',\r\n '1997-06-19',\r\n '1997-06-20',\r\n '1997-06-23',\r\n '1997-06-24',\r\n '1997-06-25',\r\n '1997-06-26',\r\n '1997-06-27',\r\n '1997-07-02',\r\n '1997-07-03',\r\n '1997-07-04',\r\n '1997-07-07',\r\n '1997-07-08',\r\n '1997-07-09',\r\n '1997-07-10',\r\n '1997-07-11',\r\n '1997-07-14',\r\n '1997-07-15',\r\n '1997-07-16',\r\n '1997-07-17',\r\n '1997-07-18',\r\n '1997-07-21',\r\n '1997-07-22',\r\n '1997-07-23',\r\n '1997-07-24',\r\n '1997-07-25',\r\n '1997-07-28',\r\n '1997-07-29',\r\n '1997-07-30',\r\n '1997-07-31',\r\n '1997-08-01',\r\n '1997-08-04',\r\n '1997-08-05',\r\n '1997-08-06',\r\n '1997-08-07',\r\n '1997-08-08',\r\n '1997-08-11',\r\n '1997-08-12',\r\n '1997-08-13',\r\n '1997-08-14',\r\n '1997-08-15',\r\n '1997-08-18',\r\n '1997-08-19',\r\n '1997-08-20',\r\n '1997-08-21',\r\n '1997-08-22',\r\n '1997-08-25',\r\n '1997-08-26',\r\n '1997-08-27',\r\n '1997-08-28',\r\n '1997-08-29',\r\n '1997-09-01',\r\n '1997-09-02',\r\n '1997-09-03',\r\n '1997-09-04',\r\n '1997-09-05',\r\n '1997-09-08',\r\n '1997-09-09',\r\n '1997-09-10',\r\n '1997-09-11',\r\n '1997-09-12',\r\n '1997-09-15',\r\n '1997-09-16',\r\n '1997-09-17',\r\n '1997-09-18',\r\n '1997-09-19',\r\n '1997-09-22',\r\n '1997-09-23',\r\n '1997-09-24',\r\n '1997-09-25',\r\n '1997-09-26',\r\n '1997-09-29',\r\n '1997-09-30',\r\n '1997-10-06',\r\n '1997-10-07',\r\n '1997-10-08',\r\n '1997-10-09',\r\n '1997-10-10',\r\n '1997-10-13',\r\n '1997-10-14',\r\n '1997-10-15',\r\n '1997-10-16',\r\n '1997-10-17',\r\n '1997-10-20',\r\n '1997-10-21',\r\n '1997-10-22',\r\n '1997-10-23',\r\n '1997-10-24',\r\n '1997-10-27',\r\n '1997-10-28',\r\n '1997-10-29',\r\n '1997-10-30',\r\n '1997-10-31',\r\n '1997-11-03',\r\n '1997-11-04',\r\n '1997-11-05',\r\n '1997-11-06',\r\n '1997-11-07',\r\n '1997-11-10',\r\n '1997-11-11',\r\n '1997-11-12',\r\n '1997-11-13',\r\n '1997-11-14',\r\n '1997-11-17',\r\n '1997-11-18',\r\n '1997-11-19',\r\n '1997-11-20',\r\n '1997-11-21',\r\n '1997-11-24',\r\n '1997-11-25',\r\n '1997-11-26',\r\n '1997-11-27',\r\n '1997-11-28',\r\n '1997-12-01',\r\n '1997-12-02',\r\n '1997-12-03',\r\n '1997-12-04',\r\n '1997-12-05',\r\n '1997-12-08',\r\n '1997-12-09',\r\n '1997-12-10',\r\n '1997-12-11',\r\n '1997-12-12',\r\n '1997-12-15',\r\n '1997-12-16',\r\n '1997-12-17',\r\n '1997-12-18',\r\n '1997-12-19',\r\n '1997-12-22',\r\n '1997-12-23',\r\n '1997-12-24',\r\n '1997-12-25',\r\n '1997-12-26',\r\n '1997-12-29',\r\n '1997-12-30',\r\n '1997-12-31',\r\n '1998-01-05',\r\n '1998-01-06',\r\n '1998-01-07',\r\n '1998-01-08',\r\n '1998-01-09',\r\n '1998-01-12',\r\n '1998-01-13',\r\n '1998-01-14',\r\n '1998-01-15',\r\n '1998-01-16',\r\n '1998-01-19',\r\n '1998-01-20',\r\n '1998-01-21',\r\n '1998-01-22',\r\n '1998-01-23',\r\n '1998-02-09',\r\n '1998-02-10',\r\n '1998-02-11',\r\n '1998-02-12',\r\n '1998-02-13',\r\n '1998-02-16',\r\n '1998-02-17',\r\n '1998-02-18',\r\n '1998-02-19',\r\n '1998-02-20',\r\n '1998-02-23',\r\n '1998-02-24',\r\n '1998-02-25',\r\n '1998-02-26',\r\n '1998-02-27',\r\n '1998-03-02',\r\n '1998-03-03',\r\n '1998-03-04',\r\n '1998-03-05',\r\n '1998-03-06',\r\n '1998-03-09',\r\n '1998-03-10',\r\n '1998-03-11',\r\n '1998-03-12',\r\n '1998-03-13',\r\n '1998-03-16',\r\n '1998-03-17',\r\n '1998-03-18',\r\n '1998-03-19',\r\n '1998-03-20',\r\n '1998-03-23',\r\n '1998-03-24',\r\n '1998-03-25',\r\n '1998-03-26',\r\n '1998-03-27',\r\n '1998-03-30',\r\n '1998-03-31',\r\n '1998-04-01',\r\n '1998-04-02',\r\n '1998-04-03',\r\n '1998-04-06',\r\n '1998-04-07',\r\n '1998-04-08',\r\n '1998-04-09',\r\n '1998-04-10',\r\n '1998-04-13',\r\n '1998-04-14',\r\n '1998-04-15',\r\n '1998-04-16',\r\n '1998-04-17',\r\n '1998-04-20',\r\n '1998-04-21',\r\n '1998-04-22',\r\n '1998-04-23',\r\n '1998-04-24',\r\n '1998-04-27',\r\n '1998-04-28',\r\n '1998-04-29',\r\n '1998-04-30',\r\n '1998-05-04',\r\n '1998-05-05',\r\n '1998-05-06',\r\n '1998-05-07',\r\n '1998-05-08',\r\n '1998-05-11',\r\n '1998-05-12',\r\n '1998-05-13',\r\n '1998-05-14',\r\n '1998-05-15',\r\n '1998-05-18',\r\n '1998-05-19',\r\n '1998-05-20',\r\n '1998-05-21',\r\n '1998-05-22',\r\n '1998-05-25',\r\n '1998-05-26',\r\n '1998-05-27',\r\n '1998-05-28',\r\n '1998-05-29',\r\n '1998-06-01',\r\n '1998-06-02',\r\n '1998-06-03',\r\n '1998-06-04',\r\n '1998-06-05',\r\n '1998-06-08',\r\n '1998-06-09',\r\n '1998-06-10',\r\n '1998-06-11',\r\n '1998-06-12',\r\n '1998-06-15',\r\n '1998-06-16',\r\n '1998-06-17',\r\n '1998-06-18',\r\n '1998-06-19',\r\n '1998-06-22',\r\n '1998-06-23',\r\n '1998-06-24',\r\n '1998-06-25',\r\n '1998-06-26',\r\n '1998-06-29',\r\n '1998-06-30',\r\n '1998-07-01',\r\n '1998-07-02',\r\n '1998-07-03',\r\n '1998-07-06',\r\n '1998-07-07',\r\n '1998-07-08',\r\n '1998-07-09',\r\n '1998-07-10',\r\n '1998-07-13',\r\n '1998-07-14',\r\n '1998-07-15',\r\n '1998-07-16',\r\n '1998-07-17',\r\n '1998-07-20',\r\n '1998-07-21',\r\n '1998-07-22',\r\n '1998-07-23',\r\n '1998-07-24',\r\n '1998-07-27',\r\n '1998-07-28',\r\n '1998-07-29',\r\n '1998-07-30',\r\n '1998-07-31',\r\n '1998-08-03',\r\n '1998-08-04',\r\n '1998-08-05',\r\n '1998-08-06',\r\n '1998-08-07',\r\n '1998-08-10',\r\n '1998-08-11',\r\n '1998-08-12',\r\n '1998-08-13',\r\n '1998-08-14',\r\n '1998-08-17',\r\n '1998-08-18',\r\n '1998-08-19',\r\n '1998-08-20',\r\n '1998-08-21',\r\n '1998-08-24',\r\n '1998-08-25',\r\n '1998-08-26',\r\n '1998-08-27',\r\n '1998-08-28',\r\n '1998-08-31',\r\n '1998-09-01',\r\n '1998-09-02',\r\n '1998-09-03',\r\n '1998-09-04',\r\n '1998-09-07',\r\n '1998-09-08',\r\n '1998-09-09',\r\n '1998-09-10',\r\n '1998-09-11',\r\n '1998-09-14',\r\n '1998-09-15',\r\n '1998-09-16',\r\n '1998-09-17',\r\n '1998-09-18',\r\n '1998-09-21',\r\n '1998-09-22',\r\n '1998-09-23',\r\n '1998-09-24',\r\n '1998-09-25',\r\n '1998-09-28',\r\n '1998-09-29',\r\n '1998-09-30',\r\n '1998-10-05',\r\n '1998-10-06',\r\n '1998-10-07',\r\n '1998-10-08',\r\n '1998-10-09',\r\n '1998-10-12',\r\n '1998-10-13',\r\n '1998-10-14',\r\n '1998-10-15',\r\n '1998-10-16',\r\n '1998-10-19',\r\n '1998-10-20',\r\n '1998-10-21',\r\n '1998-10-22',\r\n '1998-10-23',\r\n '1998-10-26',\r\n '1998-10-27',\r\n '1998-10-28',\r\n '1998-10-29',\r\n '1998-10-30',\r\n '1998-11-02',\r\n '1998-11-03',\r\n '1998-11-04',\r\n '1998-11-05',\r\n '1998-11-06',\r\n '1998-11-09',\r\n '1998-11-10',\r\n '1998-11-11',\r\n '1998-11-12',\r\n '1998-11-13',\r\n '1998-11-16',\r\n '1998-11-17',\r\n '1998-11-18',\r\n '1998-11-19',\r\n '1998-11-20',\r\n '1998-11-23',\r\n '1998-11-24',\r\n '1998-11-25',\r\n '1998-11-26',\r\n '1998-11-27',\r\n '1998-11-30',\r\n '1998-12-01',\r\n '1998-12-02',\r\n '1998-12-03',\r\n '1998-12-04',\r\n '1998-12-07',\r\n '1998-12-08',\r\n '1998-12-09',\r\n '1998-12-10',\r\n '1998-12-11',\r\n '1998-12-14',\r\n '1998-12-15',\r\n '1998-12-16',\r\n '1998-12-17',\r\n '1998-12-18',\r\n '1998-12-21',\r\n '1998-12-22',\r\n '1998-12-23',\r\n '1998-12-24',\r\n '1998-12-25',\r\n '1998-12-28',\r\n '1998-12-29',\r\n '1998-12-30',\r\n '1998-12-31',\r\n '1999-01-04',\r\n '1999-01-05',\r\n '1999-01-06',\r\n '1999-01-07',\r\n '1999-01-08',\r\n '1999-01-11',\r\n '1999-01-12',\r\n '1999-01-13',\r\n '1999-01-14',\r\n '1999-01-15',\r\n '1999-01-18',\r\n '1999-01-19',\r\n '1999-01-20',\r\n '1999-01-21',\r\n '1999-01-22',\r\n '1999-01-25',\r\n '1999-01-26',\r\n '1999-01-27',\r\n '1999-01-28',\r\n '1999-01-29',\r\n '1999-02-01',\r\n '1999-02-02',\r\n '1999-02-03',\r\n '1999-02-04',\r\n '1999-02-05',\r\n '1999-02-08',\r\n '1999-02-09',\r\n '1999-03-01',\r\n '1999-03-02',\r\n '1999-03-03',\r\n '1999-03-04',\r\n '1999-03-05',\r\n '1999-03-08',\r\n '1999-03-09',\r\n '1999-03-10',\r\n '1999-03-11',\r\n '1999-03-12',\r\n '1999-03-15',\r\n '1999-03-16',\r\n '1999-03-17',\r\n '1999-03-18',\r\n '1999-03-19',\r\n '1999-03-22',\r\n '1999-03-23',\r\n '1999-03-24',\r\n '1999-03-25',\r\n '1999-03-26',\r\n '1999-03-29',\r\n '1999-03-30',\r\n '1999-03-31',\r\n '1999-04-01',\r\n '1999-04-02',\r\n '1999-04-05',\r\n '1999-04-06',\r\n '1999-04-07',\r\n '1999-04-08',\r\n '1999-04-09',\r\n '1999-04-12',\r\n '1999-04-13',\r\n '1999-04-14',\r\n '1999-04-15',\r\n '1999-04-16',\r\n '1999-04-19',\r\n '1999-04-20',\r\n '1999-04-21',\r\n '1999-04-22',\r\n '1999-04-23',\r\n '1999-04-26',\r\n '1999-04-27',\r\n '1999-04-28',\r\n '1999-04-29',\r\n '1999-04-30',\r\n '1999-05-04',\r\n '1999-05-05',\r\n '1999-05-06',\r\n '1999-05-07',\r\n '1999-05-10',\r\n '1999-05-11',\r\n '1999-05-12',\r\n '1999-05-13',\r\n '1999-05-14',\r\n '1999-05-17',\r\n '1999-05-18',\r\n '1999-05-19',\r\n '1999-05-20',\r\n '1999-05-21',\r\n '1999-05-24',\r\n '1999-05-25',\r\n '1999-05-26',\r\n '1999-05-27',\r\n '1999-05-28',\r\n '1999-05-31',\r\n '1999-06-01',\r\n '1999-06-02',\r\n '1999-06-03',\r\n '1999-06-04',\r\n '1999-06-07',\r\n '1999-06-08',\r\n '1999-06-09',\r\n '1999-06-10',\r\n '1999-06-11',\r\n '1999-06-14',\r\n '1999-06-15',\r\n '1999-06-16',\r\n '1999-06-17',\r\n '1999-06-18',\r\n '1999-06-21',\r\n '1999-06-22',\r\n '1999-06-23',\r\n '1999-06-24',\r\n '1999-06-25',\r\n '1999-06-28',\r\n '1999-06-29',\r\n '1999-06-30',\r\n '1999-07-01',\r\n '1999-07-02',\r\n '1999-07-05',\r\n '1999-07-06',\r\n '1999-07-07',\r\n '1999-07-08',\r\n '1999-07-09',\r\n '1999-07-12',\r\n '1999-07-13',\r\n '1999-07-14',\r\n '1999-07-15',\r\n '1999-07-16',\r\n '1999-07-19',\r\n '1999-07-20',\r\n '1999-07-21',\r\n '1999-07-22',\r\n '1999-07-23',\r\n '1999-07-26',\r\n '1999-07-27',\r\n '1999-07-28',\r\n '1999-07-29',\r\n '1999-07-30',\r\n '1999-08-02',\r\n '1999-08-03',\r\n '1999-08-04',\r\n '1999-08-05',\r\n '1999-08-06',\r\n '1999-08-09',\r\n '1999-08-10',\r\n '1999-08-11',\r\n '1999-08-12',\r\n '1999-08-13',\r\n '1999-08-16',\r\n '1999-08-17',\r\n '1999-08-18',\r\n '1999-08-19',\r\n '1999-08-20',\r\n '1999-08-23',\r\n '1999-08-24',\r\n '1999-08-25',\r\n '1999-08-26',\r\n '1999-08-27',\r\n '1999-08-30',\r\n '1999-08-31',\r\n '1999-09-01',\r\n '1999-09-02',\r\n '1999-09-03',\r\n '1999-09-06',\r\n '1999-09-07',\r\n '1999-09-08',\r\n '1999-09-09',\r\n '1999-09-10',\r\n '1999-09-13',\r\n '1999-09-14',\r\n '1999-09-15',\r\n '1999-09-16',\r\n '1999-09-17',\r\n '1999-09-20',\r\n '1999-09-21',\r\n '1999-09-22',\r\n '1999-09-23',\r\n '1999-09-24',\r\n '1999-09-27',\r\n '1999-09-28',\r\n '1999-09-29',\r\n '1999-09-30',\r\n '1999-10-08',\r\n '1999-10-11',\r\n '1999-10-12',\r\n '1999-10-13',\r\n '1999-10-14',\r\n '1999-10-15',\r\n '1999-10-18',\r\n '1999-10-19',\r\n '1999-10-20',\r\n '1999-10-21',\r\n '1999-10-22',\r\n '1999-10-25',\r\n '1999-10-26',\r\n '1999-10-27',\r\n '1999-10-28',\r\n '1999-10-29',\r\n '1999-11-01',\r\n '1999-11-02',\r\n '1999-11-03',\r\n '1999-11-04',\r\n '1999-11-05',\r\n '1999-11-08',\r\n '1999-11-09',\r\n '1999-11-10',\r\n '1999-11-11',\r\n '1999-11-12',\r\n '1999-11-15',\r\n '1999-11-16',\r\n '1999-11-17',\r\n '1999-11-18',\r\n '1999-11-19',\r\n '1999-11-22',\r\n '1999-11-23',\r\n '1999-11-24',\r\n '1999-11-25',\r\n '1999-11-26',\r\n '1999-11-29',\r\n '1999-11-30',\r\n '1999-12-01',\r\n '1999-12-02',\r\n '1999-12-03',\r\n '1999-12-06',\r\n '1999-12-07',\r\n '1999-12-08',\r\n '1999-12-09',\r\n '1999-12-10',\r\n '1999-12-13',\r\n '1999-12-14',\r\n '1999-12-15',\r\n '1999-12-16',\r\n '1999-12-17',\r\n '1999-12-21',\r\n '1999-12-22',\r\n '1999-12-23',\r\n '1999-12-24',\r\n '1999-12-27',\r\n '1999-12-28',\r\n '1999-12-29',\r\n '1999-12-30',\r\n '2000-01-04',\r\n '2000-01-05',\r\n '2000-01-06',\r\n '2000-01-07',\r\n '2000-01-10',\r\n '2000-01-11',\r\n '2000-01-12',\r\n '2000-01-13',\r\n '2000-01-14',\r\n '2000-01-17',\r\n '2000-01-18',\r\n '2000-01-19',\r\n '2000-01-20',\r\n '2000-01-21',\r\n '2000-01-24',\r\n '2000-01-25',\r\n '2000-01-26',\r\n '2000-01-27',\r\n '2000-01-28',\r\n '2000-02-14',\r\n '2000-02-15',\r\n '2000-02-16',\r\n '2000-02-17',\r\n '2000-02-18',\r\n '2000-02-21',\r\n '2000-02-22',\r\n '2000-02-23',\r\n '2000-02-24',\r\n '2000-02-25',\r\n '2000-02-28',\r\n '2000-02-29',\r\n '2000-03-01',\r\n '2000-03-02',\r\n '2000-03-03',\r\n '2000-03-06',\r\n '2000-03-07',\r\n '2000-03-08',\r\n '2000-03-09',\r\n '2000-03-10',\r\n '2000-03-13',\r\n '2000-03-14',\r\n '2000-03-15',\r\n '2000-03-16',\r\n '2000-03-17',\r\n '2000-03-20',\r\n '2000-03-21',\r\n '2000-03-22',\r\n '2000-03-23',\r\n '2000-03-24',\r\n '2000-03-27',\r\n '2000-03-28',\r\n '2000-03-29',\r\n '2000-03-30',\r\n '2000-03-31',\r\n '2000-04-03',\r\n '2000-04-04',\r\n '2000-04-05',\r\n '2000-04-06',\r\n '2000-04-07',\r\n '2000-04-10',\r\n '2000-04-11',\r\n '2000-04-12',\r\n '2000-04-13',\r\n '2000-04-14',\r\n '2000-04-17',\r\n '2000-04-18',\r\n '2000-04-19',\r\n '2000-04-20',\r\n '2000-04-21',\r\n '2000-04-24',\r\n '2000-04-25',\r\n '2000-04-26',\r\n '2000-04-27',\r\n '2000-04-28',\r\n '2000-05-08',\r\n '2000-05-09',\r\n '2000-05-10',\r\n '2000-05-11',\r\n '2000-05-12',\r\n '2000-05-15',\r\n '2000-05-16',\r\n '2000-05-17',\r\n '2000-05-18',\r\n '2000-05-19',\r\n '2000-05-22',\r\n '2000-05-23',\r\n '2000-05-24',\r\n '2000-05-25',\r\n '2000-05-26',\r\n '2000-05-29',\r\n '2000-05-30',\r\n '2000-05-31',\r\n '2000-06-01',\r\n '2000-06-02',\r\n '2000-06-05',\r\n '2000-06-06',\r\n '2000-06-07',\r\n '2000-06-08',\r\n '2000-06-09',\r\n '2000-06-12',\r\n '2000-06-13',\r\n '2000-06-14',\r\n '2000-06-15',\r\n '2000-06-16',\r\n '2000-06-19',\r\n '2000-06-20',\r\n '2000-06-21',\r\n '2000-06-22',\r\n '2000-06-23',\r\n '2000-06-26',\r\n '2000-06-27',\r\n '2000-06-28',\r\n '2000-06-29',\r\n '2000-06-30',\r\n '2000-07-03',\r\n '2000-07-04',\r\n '2000-07-05',\r\n '2000-07-06',\r\n '2000-07-07',\r\n '2000-07-10',\r\n '2000-07-11',\r\n '2000-07-12',\r\n '2000-07-13',\r\n '2000-07-14',\r\n '2000-07-17',\r\n '2000-07-18',\r\n '2000-07-19',\r\n '2000-07-20',\r\n '2000-07-21',\r\n '2000-07-24',\r\n '2000-07-25',\r\n '2000-07-26',\r\n '2000-07-27',\r\n '2000-07-28',\r\n '2000-07-31',\r\n '2000-08-01',\r\n '2000-08-02',\r\n '2000-08-03',\r\n '2000-08-04',\r\n '2000-08-07',\r\n '2000-08-08',\r\n '2000-08-09',\r\n '2000-08-10',\r\n '2000-08-11',\r\n '2000-08-14',\r\n '2000-08-15',\r\n '2000-08-16',\r\n '2000-08-17',\r\n '2000-08-18',\r\n '2000-08-21',\r\n '2000-08-22',\r\n '2000-08-23',\r\n '2000-08-24',\r\n '2000-08-25',\r\n '2000-08-28',\r\n '2000-08-29',\r\n '2000-08-30',\r\n '2000-08-31',\r\n '2000-09-01',\r\n '2000-09-04',\r\n '2000-09-05',\r\n '2000-09-06',\r\n '2000-09-07',\r\n '2000-09-08',\r\n '2000-09-11',\r\n '2000-09-12',\r\n '2000-09-13',\r\n '2000-09-14',\r\n '2000-09-15',\r\n '2000-09-18',\r\n '2000-09-19',\r\n '2000-09-20',\r\n '2000-09-21',\r\n '2000-09-22',\r\n '2000-09-25',\r\n '2000-09-26',\r\n '2000-09-27',\r\n '2000-09-28',\r\n '2000-09-29',\r\n '2000-10-09',\r\n '2000-10-10',\r\n '2000-10-11',\r\n '2000-10-12',\r\n '2000-10-13',\r\n '2000-10-16',\r\n '2000-10-17',\r\n '2000-10-18',\r\n '2000-10-19',\r\n '2000-10-20',\r\n '2000-10-23',\r\n '2000-10-24',\r\n '2000-10-25',\r\n '2000-10-26',\r\n '2000-10-27',\r\n '2000-10-30',\r\n '2000-10-31',\r\n '2000-11-01',\r\n '2000-11-02',\r\n '2000-11-03',\r\n '2000-11-06',\r\n '2000-11-07',\r\n '2000-11-08',\r\n '2000-11-09',\r\n '2000-11-10',\r\n '2000-11-13',\r\n '2000-11-14',\r\n '2000-11-15',\r\n '2000-11-16',\r\n '2000-11-17',\r\n '2000-11-20',\r\n '2000-11-21',\r\n '2000-11-22',\r\n '2000-11-23',\r\n '2000-11-24',\r\n '2000-11-27',\r\n '2000-11-28',\r\n '2000-11-29',\r\n '2000-11-30',\r\n '2000-12-01',\r\n '2000-12-04',\r\n '2000-12-05',\r\n '2000-12-06',\r\n '2000-12-07',\r\n '2000-12-08',\r\n '2000-12-11',\r\n '2000-12-12',\r\n '2000-12-13',\r\n '2000-12-14',\r\n '2000-12-15',\r\n '2000-12-18',\r\n '2000-12-19',\r\n '2000-12-20',\r\n '2000-12-21',\r\n '2000-12-22',\r\n '2000-12-25',\r\n '2000-12-26',\r\n '2000-12-27',\r\n '2000-12-28',\r\n '2000-12-29',\r\n '2001-01-02',\r\n '2001-01-03',\r\n '2001-01-04',\r\n '2001-01-05',\r\n '2001-01-08',\r\n '2001-01-09',\r\n '2001-01-10',\r\n '2001-01-11',\r\n '2001-01-12',\r\n '2001-01-15',\r\n '2001-01-16',\r\n '2001-01-17',\r\n '2001-01-18',\r\n '2001-01-19',\r\n '2001-02-05',\r\n '2001-02-06',\r\n '2001-02-07',\r\n '2001-02-08',\r\n '2001-02-09',\r\n '2001-02-12',\r\n '2001-02-13',\r\n '2001-02-14',\r\n '2001-02-15',\r\n '2001-02-16',\r\n '2001-02-19',\r\n '2001-02-20',\r\n '2001-02-21',\r\n '2001-02-22',\r\n '2001-02-23',\r\n '2001-02-26',\r\n '2001-02-27',\r\n '2001-02-28',\r\n '2001-03-01',\r\n '2001-03-02',\r\n '2001-03-05',\r\n '2001-03-06',\r\n '2001-03-07',\r\n '2001-03-08',\r\n '2001-03-09',\r\n '2001-03-12',\r\n '2001-03-13',\r\n '2001-03-14',\r\n '2001-03-15',\r\n '2001-03-16',\r\n '2001-03-19',\r\n '2001-03-20',\r\n '2001-03-21',\r\n '2001-03-22',\r\n '2001-03-23',\r\n '2001-03-26',\r\n '2001-03-27',\r\n '2001-03-28',\r\n '2001-03-29',\r\n '2001-03-30',\r\n '2001-04-02',\r\n '2001-04-03',\r\n '2001-04-04',\r\n '2001-04-05',\r\n '2001-04-06',\r\n '2001-04-09',\r\n '2001-04-10',\r\n '2001-04-11',\r\n '2001-04-12',\r\n '2001-04-13',\r\n '2001-04-16',\r\n '2001-04-17',\r\n '2001-04-18',\r\n '2001-04-19',\r\n '2001-04-20',\r\n '2001-04-23',\r\n '2001-04-24',\r\n '2001-04-25',\r\n '2001-04-26',\r\n '2001-04-27',\r\n '2001-04-30',\r\n '2001-05-08',\r\n '2001-05-09',\r\n '2001-05-10',\r\n '2001-05-11',\r\n '2001-05-14',\r\n '2001-05-15',\r\n '2001-05-16',\r\n '2001-05-17',\r\n '2001-05-18',\r\n '2001-05-21',\r\n '2001-05-22',\r\n '2001-05-23',\r\n '2001-05-24',\r\n '2001-05-25',\r\n '2001-05-28',\r\n '2001-05-29',\r\n '2001-05-30',\r\n '2001-05-31',\r\n '2001-06-01',\r\n '2001-06-04',\r\n '2001-06-05',\r\n '2001-06-06',\r\n '2001-06-07',\r\n '2001-06-08',\r\n '2001-06-11',\r\n '2001-06-12',\r\n '2001-06-13',\r\n '2001-06-14',\r\n '2001-06-15',\r\n '2001-06-18',\r\n '2001-06-19',\r\n '2001-06-20',\r\n '2001-06-21',\r\n '2001-06-22',\r\n '2001-06-25',\r\n '2001-06-26',\r\n '2001-06-27',\r\n '2001-06-28',\r\n '2001-06-29',\r\n '2001-07-02',\r\n '2001-07-03',\r\n '2001-07-04',\r\n '2001-07-05',\r\n '2001-07-06',\r\n '2001-07-09',\r\n '2001-07-10',\r\n '2001-07-11',\r\n '2001-07-12',\r\n '2001-07-13',\r\n '2001-07-16',\r\n '2001-07-17',\r\n '2001-07-18',\r\n '2001-07-19',\r\n '2001-07-20',\r\n '2001-07-23',\r\n '2001-07-24',\r\n '2001-07-25',\r\n '2001-07-26',\r\n '2001-07-27',\r\n '2001-07-30',\r\n '2001-07-31',\r\n '2001-08-01',\r\n '2001-08-02',\r\n '2001-08-03',\r\n '2001-08-06',\r\n '2001-08-07',\r\n '2001-08-08',\r\n '2001-08-09',\r\n '2001-08-10',\r\n '2001-08-13',\r\n '2001-08-14',\r\n '2001-08-15',\r\n '2001-08-16',\r\n '2001-08-17',\r\n '2001-08-20',\r\n '2001-08-21',\r\n '2001-08-22',\r\n '2001-08-23',\r\n '2001-08-24',\r\n '2001-08-27',\r\n '2001-08-28',\r\n '2001-08-29',\r\n '2001-08-30',\r\n '2001-08-31',\r\n '2001-09-03',\r\n '2001-09-04',\r\n '2001-09-05',\r\n '2001-09-06',\r\n '2001-09-07',\r\n '2001-09-10',\r\n '2001-09-11',\r\n '2001-09-12',\r\n '2001-09-13',\r\n '2001-09-14',\r\n '2001-09-17',\r\n '2001-09-18',\r\n '2001-09-19',\r\n '2001-09-20',\r\n '2001-09-21',\r\n '2001-09-24',\r\n '2001-09-25',\r\n '2001-09-26',\r\n '2001-09-27',\r\n '2001-09-28',\r\n '2001-10-08',\r\n '2001-10-09',\r\n '2001-10-10',\r\n '2001-10-11',\r\n '2001-10-12',\r\n '2001-10-15',\r\n '2001-10-16',\r\n '2001-10-17',\r\n '2001-10-18',\r\n '2001-10-19',\r\n '2001-10-22',\r\n '2001-10-23',\r\n '2001-10-24',\r\n '2001-10-25',\r\n '2001-10-26',\r\n '2001-10-29',\r\n '2001-10-30',\r\n '2001-10-31',\r\n '2001-11-01',\r\n '2001-11-02',\r\n '2001-11-05',\r\n '2001-11-06',\r\n '2001-11-07',\r\n '2001-11-08',\r\n '2001-11-09',\r\n '2001-11-12',\r\n '2001-11-13',\r\n '2001-11-14',\r\n '2001-11-15',\r\n '2001-11-16',\r\n '2001-11-19',\r\n '2001-11-20',\r\n '2001-11-21',\r\n '2001-11-22',\r\n '2001-11-23',\r\n '2001-11-26',\r\n '2001-11-27',\r\n '2001-11-28',\r\n '2001-11-29',\r\n '2001-11-30',\r\n '2001-12-03',\r\n '2001-12-04',\r\n '2001-12-05',\r\n '2001-12-06',\r\n '2001-12-07',\r\n '2001-12-10',\r\n '2001-12-11',\r\n '2001-12-12',\r\n '2001-12-13',\r\n '2001-12-14',\r\n '2001-12-17',\r\n '2001-12-18',\r\n '2001-12-19',\r\n '2001-12-20',\r\n '2001-12-21',\r\n '2001-12-24',\r\n '2001-12-25',\r\n '2001-12-26',\r\n '2001-12-27',\r\n '2001-12-28',\r\n '2001-12-31',\r\n '2002-01-04',\r\n '2002-01-07',\r\n '2002-01-08',\r\n '2002-01-09',\r\n '2002-01-10',\r\n '2002-01-11',\r\n '2002-01-14',\r\n '2002-01-15',\r\n '2002-01-16',\r\n '2002-01-17',\r\n '2002-01-18',\r\n '2002-01-21',\r\n '2002-01-22',\r\n '2002-01-23',\r\n '2002-01-24',\r\n '2002-01-25',\r\n '2002-01-28',\r\n '2002-01-29',\r\n '2002-01-30',\r\n '2002-01-31',\r\n '2002-02-01',\r\n '2002-02-04',\r\n '2002-02-05',\r\n '2002-02-06',\r\n '2002-02-07',\r\n '2002-02-08',\r\n '2002-02-25',\r\n '2002-02-26',\r\n '2002-02-27',\r\n '2002-02-28',\r\n '2002-03-01',\r\n '2002-03-04',\r\n '2002-03-05',\r\n '2002-03-06',\r\n '2002-03-07',\r\n '2002-03-08',\r\n '2002-03-11',\r\n '2002-03-12',\r\n '2002-03-13',\r\n '2002-03-14',\r\n '2002-03-15',\r\n '2002-03-18',\r\n '2002-03-19',\r\n '2002-03-20',\r\n '2002-03-21',\r\n '2002-03-22',\r\n '2002-03-25',\r\n '2002-03-26',\r\n '2002-03-27',\r\n '2002-03-28',\r\n '2002-03-29',\r\n '2002-04-01',\r\n '2002-04-02',\r\n '2002-04-03',\r\n '2002-04-04',\r\n '2002-04-05',\r\n '2002-04-08',\r\n '2002-04-09',\r\n '2002-04-10',\r\n '2002-04-11',\r\n '2002-04-12',\r\n '2002-04-15',\r\n '2002-04-16',\r\n '2002-04-17',\r\n '2002-04-18',\r\n '2002-04-19',\r\n '2002-04-22',\r\n '2002-04-23',\r\n '2002-04-24',\r\n '2002-04-25',\r\n '2002-04-26',\r\n '2002-04-29',\r\n '2002-04-30',\r\n '2002-05-08',\r\n '2002-05-09',\r\n '2002-05-10',\r\n '2002-05-13',\r\n '2002-05-14',\r\n '2002-05-15',\r\n '2002-05-16',\r\n '2002-05-17',\r\n '2002-05-20',\r\n '2002-05-21',\r\n '2002-05-22',\r\n '2002-05-23',\r\n '2002-05-24',\r\n '2002-05-27',\r\n '2002-05-28',\r\n '2002-05-29',\r\n '2002-05-30',\r\n '2002-05-31',\r\n '2002-06-03',\r\n '2002-06-04',\r\n '2002-06-05',\r\n '2002-06-06',\r\n '2002-06-07',\r\n '2002-06-10',\r\n '2002-06-11',\r\n '2002-06-12',\r\n '2002-06-13',\r\n '2002-06-14',\r\n '2002-06-17',\r\n '2002-06-18',\r\n '2002-06-19',\r\n '2002-06-20',\r\n '2002-06-21',\r\n '2002-06-24',\r\n '2002-06-25',\r\n '2002-06-26',\r\n '2002-06-27',\r\n '2002-06-28',\r\n '2002-07-01',\r\n '2002-07-02',\r\n '2002-07-03',\r\n '2002-07-04',\r\n '2002-07-05',\r\n '2002-07-08',\r\n '2002-07-09',\r\n '2002-07-10',\r\n '2002-07-11',\r\n '2002-07-12',\r\n '2002-07-15',\r\n '2002-07-16',\r\n '2002-07-17',\r\n '2002-07-18',\r\n '2002-07-19',\r\n '2002-07-22',\r\n '2002-07-23',\r\n '2002-07-24',\r\n '2002-07-25',\r\n '2002-07-26',\r\n '2002-07-29',\r\n '2002-07-30',\r\n '2002-07-31',\r\n '2002-08-01',\r\n '2002-08-02',\r\n '2002-08-05',\r\n '2002-08-06',\r\n '2002-08-07',\r\n '2002-08-08',\r\n '2002-08-09',\r\n '2002-08-12',\r\n '2002-08-13',\r\n '2002-08-14',\r\n '2002-08-15',\r\n '2002-08-16',\r\n '2002-08-19',\r\n '2002-08-20',\r\n '2002-08-21',\r\n '2002-08-22',\r\n '2002-08-23',\r\n '2002-08-26',\r\n '2002-08-27',\r\n '2002-08-28',\r\n '2002-08-29',\r\n '2002-08-30',\r\n '2002-09-02',\r\n '2002-09-03',\r\n '2002-09-04',\r\n '2002-09-05',\r\n '2002-09-06',\r\n '2002-09-09',\r\n '2002-09-10',\r\n '2002-09-11',\r\n '2002-09-12',\r\n '2002-09-13',\r\n '2002-09-16',\r\n '2002-09-17',\r\n '2002-09-18',\r\n '2002-09-19',\r\n '2002-09-20',\r\n '2002-09-23',\r\n '2002-09-24',\r\n '2002-09-25',\r\n '2002-09-26',\r\n '2002-09-27',\r\n '2002-10-08',\r\n '2002-10-09',\r\n '2002-10-10',\r\n '2002-10-11',\r\n '2002-10-14',\r\n '2002-10-15',\r\n '2002-10-16',\r\n '2002-10-17',\r\n '2002-10-18',\r\n '2002-10-21',\r\n '2002-10-22',\r\n '2002-10-23',\r\n '2002-10-24',\r\n '2002-10-25',\r\n '2002-10-28',\r\n '2002-10-29',\r\n '2002-10-30',\r\n '2002-10-31',\r\n '2002-11-01',\r\n '2002-11-04',\r\n '2002-11-05',\r\n '2002-11-06',\r\n '2002-11-07',\r\n '2002-11-08',\r\n '2002-11-11',\r\n '2002-11-12',\r\n '2002-11-13',\r\n '2002-11-14',\r\n '2002-11-15',\r\n '2002-11-18',\r\n '2002-11-19',\r\n '2002-11-20',\r\n '2002-11-21',\r\n '2002-11-22',\r\n '2002-11-25',\r\n '2002-11-26',\r\n '2002-11-27',\r\n '2002-11-28',\r\n '2002-11-29',\r\n '2002-12-02',\r\n '2002-12-03',\r\n '2002-12-04',\r\n '2002-12-05',\r\n '2002-12-06',\r\n '2002-12-09',\r\n '2002-12-10',\r\n '2002-12-11',\r\n '2002-12-12',\r\n '2002-12-13',\r\n '2002-12-16',\r\n '2002-12-17',\r\n '2002-12-18',\r\n '2002-12-19',\r\n '2002-12-20',\r\n '2002-12-23',\r\n '2002-12-24',\r\n '2002-12-25',\r\n '2002-12-26',\r\n '2002-12-27',\r\n '2002-12-30',\r\n '2002-12-31',\r\n '2003-01-02',\r\n '2003-01-03',\r\n '2003-01-06',\r\n '2003-01-07',\r\n '2003-01-08',\r\n '2003-01-09',\r\n '2003-01-10',\r\n '2003-01-13',\r\n '2003-01-14',\r\n '2003-01-15',\r\n '2003-01-16',\r\n '2003-01-17',\r\n '2003-01-20',\r\n '2003-01-21',\r\n '2003-01-22',\r\n '2003-01-23',\r\n '2003-01-24',\r\n '2003-01-27',\r\n '2003-01-28',\r\n '2003-01-29',\r\n '2003-02-10',\r\n '2003-02-11',\r\n '2003-02-12',\r\n '2003-02-13',\r\n '2003-02-14',\r\n '2003-02-17',\r\n '2003-02-18',\r\n '2003-02-19',\r\n '2003-02-20',\r\n '2003-02-21',\r\n '2003-02-24',\r\n '2003-02-25',\r\n '2003-02-26',\r\n '2003-02-27',\r\n '2003-02-28',\r\n '2003-03-03',\r\n '2003-03-04',\r\n '2003-03-05',\r\n '2003-03-06',\r\n '2003-03-07',\r\n '2003-03-10',\r\n '2003-03-11',\r\n '2003-03-12',\r\n '2003-03-13',\r\n '2003-03-14',\r\n '2003-03-17',\r\n '2003-03-18',\r\n '2003-03-19',\r\n '2003-03-20',\r\n '2003-03-21',\r\n '2003-03-24',\r\n '2003-03-25',\r\n '2003-03-26',\r\n '2003-03-27',\r\n '2003-03-28',\r\n '2003-03-31',\r\n '2003-04-01',\r\n '2003-04-02',\r\n '2003-04-03',\r\n '2003-04-04',\r\n '2003-04-07',\r\n '2003-04-08',\r\n '2003-04-09',\r\n '2003-04-10',\r\n '2003-04-11',\r\n '2003-04-14',\r\n '2003-04-15',\r\n '2003-04-16',\r\n '2003-04-17',\r\n '2003-04-18',\r\n '2003-04-21',\r\n '2003-04-22',\r\n '2003-04-23',\r\n '2003-04-24',\r\n '2003-04-25',\r\n '2003-04-28',\r\n '2003-04-29',\r\n '2003-04-30',\r\n '2003-05-12',\r\n '2003-05-13',\r\n '2003-05-14',\r\n '2003-05-15',\r\n '2003-05-16',\r\n '2003-05-19',\r\n '2003-05-20',\r\n '2003-05-21',\r\n '2003-05-22',\r\n '2003-05-23',\r\n '2003-05-26',\r\n '2003-05-27',\r\n '2003-05-28',\r\n '2003-05-29',\r\n '2003-05-30',\r\n '2003-06-02',\r\n '2003-06-03',\r\n '2003-06-04',\r\n '2003-06-05',\r\n '2003-06-06',\r\n '2003-06-09',\r\n '2003-06-10',\r\n '2003-06-11',\r\n '2003-06-12',\r\n '2003-06-13',\r\n '2003-06-16',\r\n '2003-06-17',\r\n '2003-06-18',\r\n '2003-06-19',\r\n '2003-06-20',\r\n '2003-06-23',\r\n '2003-06-24',\r\n '2003-06-25',\r\n '2003-06-26',\r\n '2003-06-27',\r\n '2003-06-30',\r\n '2003-07-01',\r\n '2003-07-02',\r\n '2003-07-03',\r\n '2003-07-04',\r\n '2003-07-07',\r\n '2003-07-08',\r\n '2003-07-09',\r\n '2003-07-10',\r\n '2003-07-11',\r\n '2003-07-14',\r\n '2003-07-15',\r\n '2003-07-16',\r\n '2003-07-17',\r\n '2003-07-18',\r\n '2003-07-21',\r\n '2003-07-22',\r\n '2003-07-23',\r\n '2003-07-24',\r\n '2003-07-25',\r\n '2003-07-28',\r\n '2003-07-29',\r\n '2003-07-30',\r\n '2003-07-31',\r\n '2003-08-01',\r\n '2003-08-04',\r\n '2003-08-05',\r\n '2003-08-06',\r\n '2003-08-07',\r\n '2003-08-08',\r\n '2003-08-11',\r\n '2003-08-12',\r\n '2003-08-13',\r\n '2003-08-14',\r\n '2003-08-15',\r\n '2003-08-18',\r\n '2003-08-19',\r\n '2003-08-20',\r\n '2003-08-21',\r\n '2003-08-22',\r\n '2003-08-25',\r\n '2003-08-26',\r\n '2003-08-27',\r\n '2003-08-28',\r\n '2003-08-29',\r\n '2003-09-01',\r\n '2003-09-02',\r\n '2003-09-03',\r\n '2003-09-04',\r\n '2003-09-05',\r\n '2003-09-08',\r\n '2003-09-09',\r\n '2003-09-10',\r\n '2003-09-11',\r\n '2003-09-12',\r\n '2003-09-15',\r\n '2003-09-16',\r\n '2003-09-17',\r\n '2003-09-18',\r\n '2003-09-19',\r\n '2003-09-22',\r\n '2003-09-23',\r\n '2003-09-24',\r\n '2003-09-25',\r\n '2003-09-26',\r\n '2003-09-29',\r\n '2003-09-30',\r\n '2003-10-08',\r\n '2003-10-09',\r\n '2003-10-10',\r\n '2003-10-13',\r\n '2003-10-14',\r\n '2003-10-15',\r\n '2003-10-16',\r\n '2003-10-17',\r\n '2003-10-20',\r\n '2003-10-21',\r\n '2003-10-22',\r\n '2003-10-23',\r\n '2003-10-24',\r\n '2003-10-27',\r\n '2003-10-28',\r\n '2003-10-29',\r\n '2003-10-30',\r\n '2003-10-31',\r\n '2003-11-03',\r\n '2003-11-04',\r\n '2003-11-05',\r\n '2003-11-06',\r\n '2003-11-07',\r\n '2003-11-10',\r\n '2003-11-11',\r\n '2003-11-12',\r\n '2003-11-13',\r\n '2003-11-14',\r\n '2003-11-17',\r\n '2003-11-18',\r\n '2003-11-19',\r\n '2003-11-20',\r\n '2003-11-21',\r\n '2003-11-24',\r\n '2003-11-25',\r\n '2003-11-26',\r\n '2003-11-27',\r\n '2003-11-28',\r\n '2003-12-01',\r\n '2003-12-02',\r\n '2003-12-03',\r\n '2003-12-04',\r\n '2003-12-05',\r\n '2003-12-08',\r\n '2003-12-09',\r\n '2003-12-10',\r\n '2003-12-11',\r\n '2003-12-12',\r\n '2003-12-15',\r\n '2003-12-16',\r\n '2003-12-17',\r\n '2003-12-18',\r\n '2003-12-19',\r\n '2003-12-22',\r\n '2003-12-23',\r\n '2003-12-24',\r\n '2003-12-25',\r\n '2003-12-26',\r\n '2003-12-29',\r\n '2003-12-30',\r\n '2003-12-31',\r\n '2004-01-02',\r\n '2004-01-05',\r\n '2004-01-06',\r\n '2004-01-07',\r\n '2004-01-08',\r\n '2004-01-09',\r\n '2004-01-12',\r\n '2004-01-13',\r\n '2004-01-14',\r\n '2004-01-15',\r\n '2004-01-16',\r\n '2004-01-29',\r\n '2004-01-30',\r\n '2004-02-02',\r\n '2004-02-03',\r\n '2004-02-04',\r\n '2004-02-05',\r\n '2004-02-06',\r\n '2004-02-09',\r\n '2004-02-10',\r\n '2004-02-11',\r\n '2004-02-12',\r\n '2004-02-13',\r\n '2004-02-16',\r\n '2004-02-17',\r\n '2004-02-18',\r\n '2004-02-19',\r\n '2004-02-20',\r\n '2004-02-23',\r\n '2004-02-24',\r\n '2004-02-25',\r\n '2004-02-26',\r\n '2004-02-27',\r\n '2004-03-01',\r\n '2004-03-02',\r\n '2004-03-03',\r\n '2004-03-04',\r\n '2004-03-05',\r\n '2004-03-08',\r\n '2004-03-09',\r\n '2004-03-10',\r\n '2004-03-11',\r\n '2004-03-12',\r\n '2004-03-15',\r\n '2004-03-16',\r\n '2004-03-17',\r\n '2004-03-18',\r\n '2004-03-19',\r\n '2004-03-22',\r\n '2004-03-23',\r\n '2004-03-24',\r\n '2004-03-25',\r\n '2004-03-26',\r\n '2004-03-29',\r\n '2004-03-30',\r\n '2004-03-31',\r\n '2004-04-01',\r\n '2004-04-02',\r\n '2004-04-05',\r\n '2004-04-06',\r\n '2004-04-07',\r\n '2004-04-08',\r\n '2004-04-09',\r\n '2004-04-12',\r\n '2004-04-13',\r\n '2004-04-14',\r\n '2004-04-15',\r\n '2004-04-16',\r\n '2004-04-19',\r\n '2004-04-20',\r\n '2004-04-21',\r\n '2004-04-22',\r\n '2004-04-23',\r\n '2004-04-26',\r\n '2004-04-27',\r\n '2004-04-28',\r\n '2004-04-29',\r\n '2004-04-30',\r\n '2004-05-10',\r\n '2004-05-11',\r\n '2004-05-12',\r\n '2004-05-13',\r\n '2004-05-14',\r\n '2004-05-17',\r\n '2004-05-18',\r\n '2004-05-19',\r\n '2004-05-20',\r\n '2004-05-21',\r\n '2004-05-24',\r\n '2004-05-25',\r\n '2004-05-26',\r\n '2004-05-27',\r\n '2004-05-28',\r\n '2004-05-31',\r\n '2004-06-01',\r\n '2004-06-02',\r\n '2004-06-03',\r\n '2004-06-04',\r\n '2004-06-07',\r\n '2004-06-08',\r\n '2004-06-09',\r\n '2004-06-10',\r\n '2004-06-11',\r\n '2004-06-14',\r\n '2004-06-15',\r\n '2004-06-16',\r\n '2004-06-17',\r\n '2004-06-18',\r\n '2004-06-21',\r\n '2004-06-22',\r\n '2004-06-23',\r\n '2004-06-24',\r\n '2004-06-25',\r\n '2004-06-28',\r\n '2004-06-29',\r\n '2004-06-30',\r\n '2004-07-01',\r\n '2004-07-02',\r\n '2004-07-05',\r\n '2004-07-06',\r\n '2004-07-07',\r\n '2004-07-08',\r\n '2004-07-09',\r\n '2004-07-12',\r\n '2004-07-13',\r\n '2004-07-14',\r\n '2004-07-15',\r\n '2004-07-16',\r\n '2004-07-19',\r\n '2004-07-20',\r\n '2004-07-21',\r\n '2004-07-22',\r\n '2004-07-23',\r\n '2004-07-26',\r\n '2004-07-27',\r\n '2004-07-28',\r\n '2004-07-29',\r\n '2004-07-30',\r\n '2004-08-02',\r\n '2004-08-03',\r\n '2004-08-04',\r\n '2004-08-05',\r\n '2004-08-06',\r\n '2004-08-09',\r\n '2004-08-10',\r\n '2004-08-11',\r\n '2004-08-12',\r\n '2004-08-13',\r\n '2004-08-16',\r\n '2004-08-17',\r\n '2004-08-18',\r\n '2004-08-19',\r\n '2004-08-20',\r\n '2004-08-23',\r\n '2004-08-24',\r\n '2004-08-25',\r\n '2004-08-26',\r\n '2004-08-27',\r\n '2004-08-30',\r\n '2004-08-31',\r\n '2004-09-01',\r\n '2004-09-02',\r\n '2004-09-03',\r\n '2004-09-06',\r\n '2004-09-07',\r\n '2004-09-08',\r\n '2004-09-09',\r\n '2004-09-10',\r\n '2004-09-13',\r\n '2004-09-14',\r\n '2004-09-15',\r\n '2004-09-16',\r\n '2004-09-17',\r\n '2004-09-20',\r\n '2004-09-21',\r\n '2004-09-22',\r\n '2004-09-23',\r\n '2004-09-24',\r\n '2004-09-27',\r\n '2004-09-28',\r\n '2004-09-29',\r\n '2004-09-30',\r\n '2004-10-08',\r\n '2004-10-11',\r\n '2004-10-12',\r\n '2004-10-13',\r\n '2004-10-14',\r\n '2004-10-15',\r\n '2004-10-18',\r\n '2004-10-19',\r\n '2004-10-20',\r\n '2004-10-21',\r\n '2004-10-22',\r\n '2004-10-25',\r\n '2004-10-26',\r\n '2004-10-27',\r\n '2004-10-28',\r\n '2004-10-29',\r\n '2004-11-01',\r\n '2004-11-02',\r\n '2004-11-03',\r\n '2004-11-04',\r\n '2004-11-05',\r\n '2004-11-08',\r\n '2004-11-09',\r\n '2004-11-10',\r\n '2004-11-11',\r\n '2004-11-12',\r\n '2004-11-15',\r\n '2004-11-16',\r\n '2004-11-17',\r\n '2004-11-18',\r\n '2004-11-19',\r\n '2004-11-22',\r\n '2004-11-23',\r\n '2004-11-24',\r\n '2004-11-25',\r\n '2004-11-26',\r\n '2004-11-29',\r\n '2004-11-30',\r\n '2004-12-01',\r\n '2004-12-02',\r\n '2004-12-03',\r\n '2004-12-06',\r\n '2004-12-07',\r\n '2004-12-08',\r\n '2004-12-09',\r\n '2004-12-10',\r\n '2004-12-13',\r\n '2004-12-14',\r\n '2004-12-15',\r\n '2004-12-16',\r\n '2004-12-17',\r\n '2004-12-20',\r\n '2004-12-21',\r\n '2004-12-22',\r\n '2004-12-23',\r\n '2004-12-24',\r\n '2004-12-27',\r\n '2004-12-28',\r\n '2004-12-29',\r\n '2004-12-30',\r\n '2004-12-31',\r\n '2005-01-04',\r\n '2005-01-05',\r\n '2005-01-06',\r\n '2005-01-07',\r\n '2005-01-10',\r\n '2005-01-11',\r\n '2005-01-12',\r\n '2005-01-13',\r\n '2005-01-14',\r\n '2005-01-17',\r\n '2005-01-18',\r\n '2005-01-19',\r\n '2005-01-20',\r\n '2005-01-21',\r\n '2005-01-24',\r\n '2005-01-25',\r\n '2005-01-26',\r\n '2005-01-27',\r\n '2005-01-28',\r\n '2005-01-31',\r\n '2005-02-01',\r\n '2005-02-02',\r\n '2005-02-03',\r\n '2005-02-04',\r\n '2005-02-16',\r\n '2005-02-17',\r\n '2005-02-18',\r\n '2005-02-21',\r\n '2005-02-22',\r\n '2005-02-23',\r\n '2005-02-24',\r\n '2005-02-25',\r\n '2005-02-28',\r\n '2005-03-01',\r\n '2005-03-02',\r\n '2005-03-03',\r\n '2005-03-04',\r\n '2005-03-07',\r\n '2005-03-08',\r\n '2005-03-09',\r\n '2005-03-10',\r\n '2005-03-11',\r\n '2005-03-14',\r\n '2005-03-15',\r\n '2005-03-16',\r\n '2005-03-17',\r\n '2005-03-18',\r\n '2005-03-21',\r\n '2005-03-22',\r\n '2005-03-23',\r\n '2005-03-24',\r\n '2005-03-25',\r\n '2005-03-28',\r\n '2005-03-29',\r\n '2005-03-30',\r\n '2005-03-31',\r\n '2005-04-01',\r\n '2005-04-04',\r\n '2005-04-05',\r\n '2005-04-06',\r\n '2005-04-07',\r\n '2005-04-08',\r\n '2005-04-11',\r\n '2005-04-12',\r\n '2005-04-13',\r\n '2005-04-14',\r\n '2005-04-15',\r\n '2005-04-18',\r\n '2005-04-19',\r\n '2005-04-20',\r\n '2005-04-21',\r\n '2005-04-22',\r\n '2005-04-25',\r\n '2005-04-26',\r\n '2005-04-27',\r\n '2005-04-28',\r\n '2005-04-29',\r\n '2005-05-09',\r\n '2005-05-10',\r\n '2005-05-11',\r\n '2005-05-12',\r\n '2005-05-13',\r\n '2005-05-16',\r\n '2005-05-17',\r\n '2005-05-18',\r\n '2005-05-19',\r\n '2005-05-20',\r\n '2005-05-23',\r\n '2005-05-24',\r\n '2005-05-25',\r\n '2005-05-26',\r\n '2005-05-27',\r\n '2005-05-30',\r\n '2005-05-31',\r\n '2005-06-01',\r\n '2005-06-02',\r\n '2005-06-03',\r\n '2005-06-06',\r\n '2005-06-07',\r\n '2005-06-08',\r\n '2005-06-09',\r\n '2005-06-10',\r\n '2005-06-13',\r\n '2005-06-14',\r\n '2005-06-15',\r\n '2005-06-16',\r\n '2005-06-17',\r\n '2005-06-20',\r\n '2005-06-21',\r\n '2005-06-22',\r\n '2005-06-23',\r\n '2005-06-24',\r\n '2005-06-27',\r\n '2005-06-28',\r\n '2005-06-29',\r\n '2005-06-30',\r\n '2005-07-01',\r\n '2005-07-04',\r\n '2005-07-05',\r\n '2005-07-06',\r\n '2005-07-07',\r\n '2005-07-08',\r\n '2005-07-11',\r\n '2005-07-12',\r\n '2005-07-13',\r\n '2005-07-14',\r\n '2005-07-15',\r\n '2005-07-18',\r\n '2005-07-19',\r\n '2005-07-20',\r\n '2005-07-21',\r\n '2005-07-22',\r\n '2005-07-25',\r\n '2005-07-26',\r\n '2005-07-27',\r\n '2005-07-28',\r\n '2005-07-29',\r\n '2005-08-01',\r\n '2005-08-02',\r\n '2005-08-03',\r\n '2005-08-04',\r\n '2005-08-05',\r\n '2005-08-08',\r\n '2005-08-09',\r\n '2005-08-10',\r\n '2005-08-11',\r\n '2005-08-12',\r\n '2005-08-15',\r\n '2005-08-16',\r\n '2005-08-17',\r\n '2005-08-18',\r\n '2005-08-19',\r\n '2005-08-22',\r\n '2005-08-23',\r\n '2005-08-24',\r\n '2005-08-25',\r\n '2005-08-26',\r\n '2005-08-29',\r\n '2005-08-30',\r\n '2005-08-31',\r\n '2005-09-01',\r\n '2005-09-02',\r\n '2005-09-05',\r\n '2005-09-06',\r\n '2005-09-07',\r\n '2005-09-08',\r\n '2005-09-09',\r\n '2005-09-12',\r\n '2005-09-13',\r\n '2005-09-14',\r\n '2005-09-15',\r\n '2005-09-16',\r\n '2005-09-19',\r\n '2005-09-20',\r\n '2005-09-21',\r\n '2005-09-22',\r\n '2005-09-23',\r\n '2005-09-26',\r\n '2005-09-27',\r\n '2005-09-28',\r\n '2005-09-29',\r\n '2005-09-30',\r\n '2005-10-10',\r\n '2005-10-11',\r\n '2005-10-12',\r\n '2005-10-13',\r\n '2005-10-14',\r\n '2005-10-17',\r\n '2005-10-18',\r\n '2005-10-19',\r\n '2005-10-20',\r\n '2005-10-21',\r\n '2005-10-24',\r\n '2005-10-25',\r\n '2005-10-26',\r\n '2005-10-27',\r\n '2005-10-28',\r\n '2005-10-31',\r\n '2005-11-01',\r\n '2005-11-02',\r\n '2005-11-03',\r\n '2005-11-04',\r\n '2005-11-07',\r\n '2005-11-08',\r\n '2005-11-09',\r\n '2005-11-10',\r\n '2005-11-11',\r\n '2005-11-14',\r\n '2005-11-15',\r\n '2005-11-16',\r\n '2005-11-17',\r\n '2005-11-18',\r\n '2005-11-21',\r\n '2005-11-22',\r\n '2005-11-23',\r\n '2005-11-24',\r\n '2005-11-25',\r\n '2005-11-28',\r\n '2005-11-29',\r\n '2005-11-30',\r\n '2005-12-01',\r\n '2005-12-02',\r\n '2005-12-05',\r\n '2005-12-06',\r\n '2005-12-07',\r\n '2005-12-08',\r\n '2005-12-09',\r\n '2005-12-12',\r\n '2005-12-13',\r\n '2005-12-14',\r\n '2005-12-15',\r\n '2005-12-16',\r\n '2005-12-19',\r\n '2005-12-20',\r\n '2005-12-21',\r\n '2005-12-22',\r\n '2005-12-23',\r\n '2005-12-26',\r\n '2005-12-27',\r\n '2005-12-28',\r\n '2005-12-29',\r\n '2005-12-30',\r\n '2006-01-04',\r\n '2006-01-05',\r\n '2006-01-06',\r\n '2006-01-09',\r\n '2006-01-10',\r\n '2006-01-11',\r\n '2006-01-12',\r\n '2006-01-13',\r\n '2006-01-16',\r\n '2006-01-17',\r\n '2006-01-18',\r\n '2006-01-19',\r\n '2006-01-20',\r\n '2006-01-23',\r\n '2006-01-24',\r\n '2006-01-25',\r\n '2006-02-06',\r\n '2006-02-07',\r\n '2006-02-08',\r\n '2006-02-09',\r\n '2006-02-10',\r\n '2006-02-13',\r\n '2006-02-14',\r\n '2006-02-15',\r\n '2006-02-16',\r\n '2006-02-17',\r\n '2006-02-20',\r\n '2006-02-21',\r\n '2006-02-22',\r\n '2006-02-23',\r\n '2006-02-24',\r\n '2006-02-27',\r\n '2006-02-28',\r\n '2006-03-01',\r\n '2006-03-02',\r\n '2006-03-03',\r\n '2006-03-06',\r\n '2006-03-07',\r\n '2006-03-08',\r\n '2006-03-09',\r\n '2006-03-10',\r\n '2006-03-13',\r\n '2006-03-14',\r\n '2006-03-15',\r\n '2006-03-16',\r\n '2006-03-17',\r\n '2006-03-20',\r\n '2006-03-21',\r\n '2006-03-22',\r\n '2006-03-23',\r\n '2006-03-24',\r\n '2006-03-27',\r\n '2006-03-28',\r\n '2006-03-29',\r\n '2006-03-30',\r\n '2006-03-31',\r\n '2006-04-03',\r\n '2006-04-04',\r\n '2006-04-05',\r\n '2006-04-06',\r\n '2006-04-07',\r\n '2006-04-10',\r\n '2006-04-11',\r\n '2006-04-12',\r\n '2006-04-13',\r\n '2006-04-14',\r\n '2006-04-17',\r\n '2006-04-18',\r\n '2006-04-19',\r\n '2006-04-20',\r\n '2006-04-21',\r\n '2006-04-24',\r\n '2006-04-25',\r\n '2006-04-26',\r\n '2006-04-27',\r\n '2006-04-28',\r\n '2006-05-08',\r\n '2006-05-09',\r\n '2006-05-10',\r\n '2006-05-11',\r\n '2006-05-12',\r\n '2006-05-15',\r\n '2006-05-16',\r\n '2006-05-17',\r\n '2006-05-18',\r\n '2006-05-19',\r\n '2006-05-22',\r\n '2006-05-23',\r\n '2006-05-24',\r\n '2006-05-25',\r\n '2006-05-26',\r\n '2006-05-29',\r\n '2006-05-30',\r\n '2006-05-31',\r\n '2006-06-01',\r\n '2006-06-02',\r\n '2006-06-05',\r\n '2006-06-06',\r\n '2006-06-07',\r\n '2006-06-08',\r\n '2006-06-09',\r\n '2006-06-12',\r\n '2006-06-13',\r\n '2006-06-14',\r\n '2006-06-15',\r\n '2006-06-16',\r\n '2006-06-19',\r\n '2006-06-20',\r\n '2006-06-21',\r\n '2006-06-22',\r\n '2006-06-23',\r\n '2006-06-26',\r\n '2006-06-27',\r\n '2006-06-28',\r\n '2006-06-29',\r\n '2006-06-30',\r\n '2006-07-03',\r\n '2006-07-04',\r\n '2006-07-05',\r\n '2006-07-06',\r\n '2006-07-07',\r\n '2006-07-10',\r\n '2006-07-11',\r\n '2006-07-12',\r\n '2006-07-13',\r\n '2006-07-14',\r\n '2006-07-17',\r\n '2006-07-18',\r\n '2006-07-19',\r\n '2006-07-20',\r\n '2006-07-21',\r\n '2006-07-24',\r\n '2006-07-25',\r\n '2006-07-26',\r\n '2006-07-27',\r\n '2006-07-28',\r\n '2006-07-31',\r\n '2006-08-01',\r\n '2006-08-02',\r\n '2006-08-03',\r\n '2006-08-04',\r\n '2006-08-07',\r\n '2006-08-08',\r\n '2006-08-09',\r\n '2006-08-10',\r\n '2006-08-11',\r\n '2006-08-14',\r\n '2006-08-15',\r\n '2006-08-16',\r\n '2006-08-17',\r\n '2006-08-18',\r\n '2006-08-21',\r\n '2006-08-22',\r\n '2006-08-23',\r\n '2006-08-24',\r\n '2006-08-25',\r\n '2006-08-28',\r\n '2006-08-29',\r\n '2006-08-30',\r\n '2006-08-31',\r\n '2006-09-01',\r\n '2006-09-04',\r\n '2006-09-05',\r\n '2006-09-06',\r\n '2006-09-07',\r\n '2006-09-08',\r\n '2006-09-11',\r\n '2006-09-12',\r\n '2006-09-13',\r\n '2006-09-14',\r\n '2006-09-15',\r\n '2006-09-18',\r\n '2006-09-19',\r\n '2006-09-20',\r\n '2006-09-21',\r\n '2006-09-22',\r\n '2006-09-25',\r\n '2006-09-26',\r\n '2006-09-27',\r\n '2006-09-28',\r\n '2006-09-29',\r\n '2006-10-09',\r\n '2006-10-10',\r\n '2006-10-11',\r\n '2006-10-12',\r\n '2006-10-13',\r\n '2006-10-16',\r\n '2006-10-17',\r\n '2006-10-18',\r\n '2006-10-19',\r\n '2006-10-20',\r\n '2006-10-23',\r\n '2006-10-24',\r\n '2006-10-25',\r\n '2006-10-26',\r\n '2006-10-27',\r\n '2006-10-30',\r\n '2006-10-31',\r\n '2006-11-01',\r\n '2006-11-02',\r\n '2006-11-03',\r\n '2006-11-06',\r\n '2006-11-07',\r\n '2006-11-08',\r\n '2006-11-09',\r\n '2006-11-10',\r\n '2006-11-13',\r\n '2006-11-14',\r\n '2006-11-15',\r\n '2006-11-16',\r\n '2006-11-17',\r\n '2006-11-20',\r\n '2006-11-21',\r\n '2006-11-22',\r\n '2006-11-23',\r\n '2006-11-24',\r\n '2006-11-27',\r\n '2006-11-28',\r\n '2006-11-29',\r\n '2006-11-30',\r\n '2006-12-01',\r\n '2006-12-04',\r\n '2006-12-05',\r\n '2006-12-06',\r\n '2006-12-07',\r\n '2006-12-08',\r\n '2006-12-11',\r\n '2006-12-12',\r\n '2006-12-13',\r\n '2006-12-14',\r\n '2006-12-15',\r\n '2006-12-18',\r\n '2006-12-19',\r\n '2006-12-20',\r\n '2006-12-21',\r\n '2006-12-22',\r\n '2006-12-25',\r\n '2006-12-26',\r\n '2006-12-27',\r\n '2006-12-28',\r\n '2006-12-29',\r\n '2007-01-04',\r\n '2007-01-05',\r\n '2007-01-08',\r\n '2007-01-09',\r\n '2007-01-10',\r\n '2007-01-11',\r\n '2007-01-12',\r\n '2007-01-15',\r\n '2007-01-16',\r\n '2007-01-17',\r\n '2007-01-18',\r\n '2007-01-19',\r\n '2007-01-22',\r\n '2007-01-23',\r\n '2007-01-24',\r\n '2007-01-25',\r\n '2007-01-26',\r\n '2007-01-29',\r\n '2007-01-30',\r\n '2007-01-31',\r\n '2007-02-01',\r\n '2007-02-02',\r\n '2007-02-05',\r\n '2007-02-06',\r\n '2007-02-07',\r\n '2007-02-08',\r\n '2007-02-09',\r\n '2007-02-12',\r\n '2007-02-13',\r\n '2007-02-14',\r\n '2007-02-15',\r\n '2007-02-16',\r\n '2007-02-26',\r\n '2007-02-27',\r\n '2007-02-28',\r\n '2007-03-01',\r\n '2007-03-02',\r\n '2007-03-05',\r\n '2007-03-06',\r\n '2007-03-07',\r\n '2007-03-08',\r\n '2007-03-09',\r\n '2007-03-12',\r\n '2007-03-13',\r\n '2007-03-14',\r\n '2007-03-15',\r\n '2007-03-16',\r\n '2007-03-19',\r\n '2007-03-20',\r\n '2007-03-21',\r\n '2007-03-22',\r\n '2007-03-23',\r\n '2007-03-26',\r\n '2007-03-27',\r\n '2007-03-28',\r\n '2007-03-29',\r\n '2007-03-30',\r\n '2007-04-02',\r\n '2007-04-03',\r\n '2007-04-04',\r\n '2007-04-05',\r\n '2007-04-06',\r\n '2007-04-09',\r\n '2007-04-10',\r\n '2007-04-11',\r\n '2007-04-12',\r\n '2007-04-13',\r\n '2007-04-16',\r\n '2007-04-17',\r\n '2007-04-18',\r\n '2007-04-19',\r\n '2007-04-20',\r\n '2007-04-23',\r\n '2007-04-24',\r\n '2007-04-25',\r\n '2007-04-26',\r\n '2007-04-27',\r\n '2007-04-30',\r\n '2007-05-08',\r\n '2007-05-09',\r\n '2007-05-10',\r\n '2007-05-11',\r\n '2007-05-14',\r\n '2007-05-15',\r\n '2007-05-16',\r\n '2007-05-17',\r\n '2007-05-18',\r\n '2007-05-21',\r\n '2007-05-22',\r\n '2007-05-23',\r\n '2007-05-24',\r\n '2007-05-25',\r\n '2007-05-28',\r\n '2007-05-29',\r\n '2007-05-30',\r\n '2007-05-31',\r\n '2007-06-01',\r\n '2007-06-04',\r\n '2007-06-05',\r\n '2007-06-06',\r\n '2007-06-07',\r\n '2007-06-08',\r\n '2007-06-11',\r\n '2007-06-12',\r\n '2007-06-13',\r\n '2007-06-14',\r\n '2007-06-15',\r\n '2007-06-18',\r\n '2007-06-19',\r\n '2007-06-20',\r\n '2007-06-21',\r\n '2007-06-22',\r\n '2007-06-25',\r\n '2007-06-26',\r\n '2007-06-27',\r\n '2007-06-28',\r\n '2007-06-29',\r\n '2007-07-02',\r\n '2007-07-03',\r\n '2007-07-04',\r\n '2007-07-05',\r\n '2007-07-06',\r\n '2007-07-09',\r\n '2007-07-10',\r\n '2007-07-11',\r\n '2007-07-12',\r\n '2007-07-13',\r\n '2007-07-16',\r\n '2007-07-17',\r\n '2007-07-18',\r\n '2007-07-19',\r\n '2007-07-20',\r\n '2007-07-23',\r\n '2007-07-24',\r\n '2007-07-25',\r\n '2007-07-26',\r\n '2007-07-27',\r\n '2007-07-30',\r\n '2007-07-31',\r\n '2007-08-01',\r\n '2007-08-02',\r\n '2007-08-03',\r\n '2007-08-06',\r\n '2007-08-07',\r\n '2007-08-08',\r\n '2007-08-09',\r\n '2007-08-10',\r\n '2007-08-13',\r\n '2007-08-14',\r\n '2007-08-15',\r\n '2007-08-16',\r\n '2007-08-17',\r\n '2007-08-20',\r\n '2007-08-21',\r\n '2007-08-22',\r\n '2007-08-23',\r\n '2007-08-24',\r\n '2007-08-27',\r\n '2007-08-28',\r\n '2007-08-29',\r\n '2007-08-30',\r\n '2007-08-31',\r\n '2007-09-03',\r\n '2007-09-04',\r\n '2007-09-05',\r\n '2007-09-06',\r\n '2007-09-07',\r\n '2007-09-10',\r\n '2007-09-11',\r\n '2007-09-12',\r\n '2007-09-13',\r\n '2007-09-14',\r\n '2007-09-17',\r\n '2007-09-18',\r\n '2007-09-19',\r\n '2007-09-20',\r\n '2007-09-21',\r\n '2007-09-24',\r\n '2007-09-25',\r\n '2007-09-26',\r\n '2007-09-27',\r\n '2007-09-28',\r\n '2007-10-08',\r\n '2007-10-09',\r\n '2007-10-10',\r\n '2007-10-11',\r\n '2007-10-12',\r\n '2007-10-15',\r\n '2007-10-16',\r\n '2007-10-17',\r\n '2007-10-18',\r\n '2007-10-19',\r\n '2007-10-22',\r\n '2007-10-23',\r\n '2007-10-24',\r\n '2007-10-25',\r\n '2007-10-26',\r\n '2007-10-29',\r\n '2007-10-30',\r\n '2007-10-31',\r\n '2007-11-01',\r\n '2007-11-02',\r\n '2007-11-05',\r\n '2007-11-06',\r\n '2007-11-07',\r\n '2007-11-08',\r\n '2007-11-09',\r\n '2007-11-12',\r\n '2007-11-13',\r\n '2007-11-14',\r\n '2007-11-15',\r\n '2007-11-16',\r\n '2007-11-19',\r\n '2007-11-20',\r\n '2007-11-21',\r\n '2007-11-22',\r\n '2007-11-23',\r\n '2007-11-26',\r\n '2007-11-27',\r\n '2007-11-28',\r\n '2007-11-29',\r\n '2007-11-30',\r\n '2007-12-03',\r\n '2007-12-04',\r\n '2007-12-05',\r\n '2007-12-06',\r\n '2007-12-07',\r\n '2007-12-10',\r\n '2007-12-11',\r\n '2007-12-12',\r\n '2007-12-13',\r\n '2007-12-14',\r\n '2007-12-17',\r\n '2007-12-18',\r\n '2007-12-19',\r\n '2007-12-20',\r\n '2007-12-21',\r\n '2007-12-24',\r\n '2007-12-25',\r\n '2007-12-26',\r\n '2007-12-27',\r\n '2007-12-28',\r\n '2008-01-02',\r\n '2008-01-03',\r\n '2008-01-04',\r\n '2008-01-07',\r\n '2008-01-08',\r\n '2008-01-09',\r\n '2008-01-10',\r\n '2008-01-11',\r\n '2008-01-14',\r\n '2008-01-15',\r\n '2008-01-16',\r\n '2008-01-17',\r\n '2008-01-18',\r\n '2008-01-21',\r\n '2008-01-22',\r\n '2008-01-23',\r\n '2008-01-24',\r\n '2008-01-25',\r\n '2008-01-28',\r\n '2008-01-29',\r\n '2008-01-30',\r\n '2008-01-31',\r\n '2008-02-01',\r\n '2008-02-04',\r\n '2008-02-05',\r\n '2008-02-13',\r\n '2008-02-14',\r\n '2008-02-15',\r\n '2008-02-18',\r\n '2008-02-19',\r\n '2008-02-20',\r\n '2008-02-21',\r\n '2008-02-22',\r\n '2008-02-25',\r\n '2008-02-26',\r\n '2008-02-27',\r\n '2008-02-28',\r\n '2008-02-29',\r\n '2008-03-03',\r\n '2008-03-04',\r\n '2008-03-05',\r\n '2008-03-06',\r\n '2008-03-07',\r\n '2008-03-10',\r\n '2008-03-11',\r\n '2008-03-12',\r\n '2008-03-13',\r\n '2008-03-14',\r\n '2008-03-17',\r\n '2008-03-18',\r\n '2008-03-19',\r\n '2008-03-20',\r\n '2008-03-21',\r\n '2008-03-24',\r\n '2008-03-25',\r\n '2008-03-26',\r\n '2008-03-27',\r\n '2008-03-28',\r\n '2008-03-31',\r\n '2008-04-01',\r\n '2008-04-02',\r\n '2008-04-03',\r\n '2008-04-07',\r\n '2008-04-08',\r\n '2008-04-09',\r\n '2008-04-10',\r\n '2008-04-11',\r\n '2008-04-14',\r\n '2008-04-15',\r\n '2008-04-16',\r\n '2008-04-17',\r\n '2008-04-18',\r\n '2008-04-21',\r\n '2008-04-22',\r\n '2008-04-23',\r\n '2008-04-24',\r\n '2008-04-25',\r\n '2008-04-28',\r\n '2008-04-29',\r\n '2008-04-30',\r\n '2008-05-05',\r\n '2008-05-06',\r\n '2008-05-07',\r\n '2008-05-08',\r\n '2008-05-09',\r\n '2008-05-12',\r\n '2008-05-13',\r\n '2008-05-14',\r\n '2008-05-15',\r\n '2008-05-16',\r\n '2008-05-19',\r\n '2008-05-20',\r\n '2008-05-21',\r\n '2008-05-22',\r\n '2008-05-23',\r\n '2008-05-26',\r\n '2008-05-27',\r\n '2008-05-28',\r\n '2008-05-29',\r\n '2008-05-30',\r\n '2008-06-02',\r\n '2008-06-03',\r\n '2008-06-04',\r\n '2008-06-05',\r\n '2008-06-06',\r\n '2008-06-10',\r\n '2008-06-11',\r\n '2008-06-12',\r\n '2008-06-13',\r\n '2008-06-16',\r\n '2008-06-17',\r\n '2008-06-18',\r\n '2008-06-19',\r\n '2008-06-20',\r\n '2008-06-23',\r\n '2008-06-24',\r\n '2008-06-25',\r\n '2008-06-26',\r\n '2008-06-27',\r\n '2008-06-30',\r\n '2008-07-01',\r\n '2008-07-02',\r\n '2008-07-03',\r\n '2008-07-04',\r\n '2008-07-07',\r\n '2008-07-08',\r\n '2008-07-09',\r\n '2008-07-10',\r\n '2008-07-11',\r\n '2008-07-14',\r\n '2008-07-15',\r\n '2008-07-16',\r\n '2008-07-17',\r\n '2008-07-18',\r\n '2008-07-21',\r\n '2008-07-22',\r\n '2008-07-23',\r\n '2008-07-24',\r\n '2008-07-25',\r\n '2008-07-28',\r\n '2008-07-29',\r\n '2008-07-30',\r\n '2008-07-31',\r\n '2008-08-01',\r\n '2008-08-04',\r\n '2008-08-05',\r\n '2008-08-06',\r\n '2008-08-07',\r\n '2008-08-08',\r\n '2008-08-11',\r\n '2008-08-12',\r\n '2008-08-13',\r\n '2008-08-14',\r\n '2008-08-15',\r\n '2008-08-18',\r\n '2008-08-19',\r\n '2008-08-20',\r\n '2008-08-21',\r\n '2008-08-22',\r\n '2008-08-25',\r\n '2008-08-26',\r\n '2008-08-27',\r\n '2008-08-28',\r\n '2008-08-29',\r\n '2008-09-01',\r\n '2008-09-02',\r\n '2008-09-03',\r\n '2008-09-04',\r\n '2008-09-05',\r\n '2008-09-08',\r\n '2008-09-09',\r\n '2008-09-10',\r\n '2008-09-11',\r\n '2008-09-12',\r\n '2008-09-16',\r\n '2008-09-17',\r\n '2008-09-18',\r\n '2008-09-19',\r\n '2008-09-22',\r\n '2008-09-23',\r\n '2008-09-24',\r\n '2008-09-25',\r\n '2008-09-26',\r\n '2008-10-06',\r\n '2008-10-07',\r\n '2008-10-08',\r\n '2008-10-09',\r\n '2008-10-10',\r\n '2008-10-13',\r\n '2008-10-14',\r\n '2008-10-15',\r\n '2008-10-16',\r\n '2008-10-17',\r\n '2008-10-20',\r\n '2008-10-21',\r\n '2008-10-22',\r\n '2008-10-23',\r\n '2008-10-24',\r\n '2008-10-27',\r\n '2008-10-28',\r\n '2008-10-29',\r\n '2008-10-30',\r\n '2008-10-31',\r\n '2008-11-03',\r\n '2008-11-04',\r\n '2008-11-05',\r\n '2008-11-06',\r\n '2008-11-07',\r\n '2008-11-10',\r\n '2008-11-11',\r\n '2008-11-12',\r\n '2008-11-13',\r\n '2008-11-14',\r\n '2008-11-17',\r\n '2008-11-18',\r\n '2008-11-19',\r\n '2008-11-20',\r\n '2008-11-21',\r\n '2008-11-24',\r\n '2008-11-25',\r\n '2008-11-26',\r\n '2008-11-27',\r\n '2008-11-28',\r\n '2008-12-01',\r\n '2008-12-02',\r\n '2008-12-03',\r\n '2008-12-04',\r\n '2008-12-05',\r\n '2008-12-08',\r\n '2008-12-09',\r\n '2008-12-10',\r\n '2008-12-11',\r\n '2008-12-12',\r\n '2008-12-15',\r\n '2008-12-16',\r\n '2008-12-17',\r\n '2008-12-18',\r\n '2008-12-19',\r\n '2008-12-22',\r\n '2008-12-23',\r\n '2008-12-24',\r\n '2008-12-25',\r\n '2008-12-26',\r\n '2008-12-29',\r\n '2008-12-30',\r\n '2008-12-31',\r\n '2009-01-05',\r\n '2009-01-06',\r\n '2009-01-07',\r\n '2009-01-08',\r\n '2009-01-09',\r\n '2009-01-12',\r\n '2009-01-13',\r\n '2009-01-14',\r\n '2009-01-15',\r\n '2009-01-16',\r\n '2009-01-19',\r\n '2009-01-20',\r\n '2009-01-21',\r\n '2009-01-22',\r\n '2009-01-23',\r\n '2009-02-02',\r\n '2009-02-03',\r\n '2009-02-04',\r\n '2009-02-05',\r\n '2009-02-06',\r\n '2009-02-09',\r\n '2009-02-10',\r\n '2009-02-11',\r\n '2009-02-12',\r\n '2009-02-13',\r\n '2009-02-16',\r\n '2009-02-17',\r\n '2009-02-18',\r\n '2009-02-19',\r\n '2009-02-20',\r\n '2009-02-23',\r\n '2009-02-24',\r\n '2009-02-25',\r\n '2009-02-26',\r\n '2009-02-27',\r\n '2009-03-02',\r\n '2009-03-03',\r\n '2009-03-04',\r\n '2009-03-05',\r\n '2009-03-06',\r\n '2009-03-09',\r\n '2009-03-10',\r\n '2009-03-11',\r\n '2009-03-12',\r\n '2009-03-13',\r\n '2009-03-16',\r\n '2009-03-17',\r\n '2009-03-18',\r\n '2009-03-19',\r\n '2009-03-20',\r\n '2009-03-23',\r\n '2009-03-24',\r\n '2009-03-25',\r\n '2009-03-26',\r\n '2009-03-27',\r\n '2009-03-30',\r\n '2009-03-31',\r\n '2009-04-01',\r\n '2009-04-02',\r\n '2009-04-03',\r\n '2009-04-07',\r\n '2009-04-08',\r\n '2009-04-09',\r\n '2009-04-10',\r\n '2009-04-13',\r\n '2009-04-14',\r\n '2009-04-15',\r\n '2009-04-16',\r\n '2009-04-17',\r\n '2009-04-20',\r\n '2009-04-21',\r\n '2009-04-22',\r\n '2009-04-23',\r\n '2009-04-24',\r\n '2009-04-27',\r\n '2009-04-28',\r\n '2009-04-29',\r\n '2009-04-30',\r\n '2009-05-04',\r\n '2009-05-05',\r\n '2009-05-06',\r\n '2009-05-07',\r\n '2009-05-08',\r\n '2009-05-11',\r\n '2009-05-12',\r\n '2009-05-13',\r\n '2009-05-14',\r\n '2009-05-15',\r\n '2009-05-18',\r\n '2009-05-19',\r\n '2009-05-20',\r\n '2009-05-21',\r\n '2009-05-22',\r\n '2009-05-25',\r\n '2009-05-26',\r\n '2009-05-27',\r\n '2009-06-01',\r\n '2009-06-02',\r\n '2009-06-03',\r\n '2009-06-04',\r\n '2009-06-05',\r\n '2009-06-08',\r\n '2009-06-09',\r\n '2009-06-10',\r\n '2009-06-11',\r\n '2009-06-12',\r\n '2009-06-15',\r\n '2009-06-16',\r\n '2009-06-17',\r\n '2009-06-18',\r\n '2009-06-19',\r\n '2009-06-22',\r\n '2009-06-23',\r\n '2009-06-24',\r\n '2009-06-25',\r\n '2009-06-26',\r\n '2009-06-29',\r\n '2009-06-30',\r\n '2009-07-01',\r\n '2009-07-02',\r\n '2009-07-03',\r\n '2009-07-06',\r\n '2009-07-07',\r\n '2009-07-08',\r\n '2009-07-09',\r\n '2009-07-10',\r\n '2009-07-13',\r\n '2009-07-14',\r\n '2009-07-15',\r\n '2009-07-16',\r\n '2009-07-17',\r\n '2009-07-20',\r\n '2009-07-21',\r\n '2009-07-22',\r\n '2009-07-23',\r\n '2009-07-24',\r\n '2009-07-27',\r\n '2009-07-28',\r\n '2009-07-29',\r\n '2009-07-30',\r\n '2009-07-31',\r\n '2009-08-03',\r\n '2009-08-04',\r\n '2009-08-05',\r\n '2009-08-06',\r\n '2009-08-07',\r\n '2009-08-10',\r\n '2009-08-11',\r\n '2009-08-12',\r\n '2009-08-13',\r\n '2009-08-14',\r\n '2009-08-17',\r\n '2009-08-18',\r\n '2009-08-19',\r\n '2009-08-20',\r\n '2009-08-21',\r\n '2009-08-24',\r\n '2009-08-25',\r\n '2009-08-26',\r\n '2009-08-27',\r\n '2009-08-28',\r\n '2009-08-31',\r\n '2009-09-01',\r\n '2009-09-02',\r\n '2009-09-03',\r\n '2009-09-04',\r\n '2009-09-07',\r\n '2009-09-08',\r\n '2009-09-09',\r\n '2009-09-10',\r\n '2009-09-11',\r\n '2009-09-14',\r\n '2009-09-15',\r\n '2009-09-16',\r\n '2009-09-17',\r\n '2009-09-18',\r\n '2009-09-21',\r\n '2009-09-22',\r\n '2009-09-23',\r\n '2009-09-24',\r\n '2009-09-25',\r\n '2009-09-28',\r\n '2009-09-29',\r\n '2009-09-30',\r\n '2009-10-09',\r\n '2009-10-12',\r\n '2009-10-13',\r\n '2009-10-14',\r\n '2009-10-15',\r\n '2009-10-16',\r\n '2009-10-19',\r\n '2009-10-20',\r\n '2009-10-21',\r\n '2009-10-22',\r\n '2009-10-23',\r\n '2009-10-26',\r\n '2009-10-27',\r\n '2009-10-28',\r\n '2009-10-29',\r\n '2009-10-30',\r\n '2009-11-02',\r\n '2009-11-03',\r\n '2009-11-04',\r\n '2009-11-05',\r\n '2009-11-06',\r\n '2009-11-09',\r\n '2009-11-10',\r\n '2009-11-11',\r\n '2009-11-12',\r\n '2009-11-13',\r\n '2009-11-16',\r\n '2009-11-17',\r\n '2009-11-18',\r\n '2009-11-19',\r\n '2009-11-20',\r\n '2009-11-23',\r\n '2009-11-24',\r\n '2009-11-25',\r\n '2009-11-26',\r\n '2009-11-27',\r\n '2009-11-30',\r\n '2009-12-01',\r\n '2009-12-02',\r\n '2009-12-03',\r\n '2009-12-04',\r\n '2009-12-07',\r\n '2009-12-08',\r\n '2009-12-09',\r\n '2009-12-10',\r\n '2009-12-11',\r\n '2009-12-14',\r\n '2009-12-15',\r\n '2009-12-16',\r\n '2009-12-17',\r\n '2009-12-18',\r\n '2009-12-21',\r\n '2009-12-22',\r\n '2009-12-23',\r\n '2009-12-24',\r\n '2009-12-25',\r\n '2009-12-28',\r\n '2009-12-29',\r\n '2009-12-30',\r\n '2009-12-31',\r\n '2010-01-04',\r\n '2010-01-05',\r\n '2010-01-06',\r\n '2010-01-07',\r\n '2010-01-08',\r\n '2010-01-11',\r\n '2010-01-12',\r\n '2010-01-13',\r\n '2010-01-14',\r\n '2010-01-15',\r\n '2010-01-18',\r\n '2010-01-19',\r\n '2010-01-20',\r\n '2010-01-21',\r\n '2010-01-22',\r\n '2010-01-25',\r\n '2010-01-26',\r\n '2010-01-27',\r\n '2010-01-28',\r\n '2010-01-29',\r\n '2010-02-01',\r\n '2010-02-02',\r\n '2010-02-03',\r\n '2010-02-04',\r\n '2010-02-05',\r\n '2010-02-08',\r\n '2010-02-09',\r\n '2010-02-10',\r\n '2010-02-11',\r\n '2010-02-12',\r\n '2010-02-22',\r\n '2010-02-23',\r\n '2010-02-24',\r\n '2010-02-25',\r\n '2010-02-26',\r\n '2010-03-01',\r\n '2010-03-02',\r\n '2010-03-03',\r\n '2010-03-04',\r\n '2010-03-05',\r\n '2010-03-08',\r\n '2010-03-09',\r\n '2010-03-10',\r\n '2010-03-11',\r\n '2010-03-12',\r\n '2010-03-15',\r\n '2010-03-16',\r\n '2010-03-17',\r\n '2010-03-18',\r\n '2010-03-19',\r\n '2010-03-22',\r\n '2010-03-23',\r\n '2010-03-24',\r\n '2010-03-25',\r\n '2010-03-26',\r\n '2010-03-29',\r\n '2010-03-30',\r\n '2010-03-31',\r\n '2010-04-01',\r\n '2010-04-02',\r\n '2010-04-06',\r\n '2010-04-07',\r\n '2010-04-08',\r\n '2010-04-09',\r\n '2010-04-12',\r\n '2010-04-13',\r\n '2010-04-14',\r\n '2010-04-15',\r\n '2010-04-16',\r\n '2010-04-19',\r\n '2010-04-20',\r\n '2010-04-21',\r\n '2010-04-22',\r\n '2010-04-23',\r\n '2010-04-26',\r\n '2010-04-27',\r\n '2010-04-28',\r\n '2010-04-29',\r\n '2010-04-30',\r\n '2010-05-04',\r\n '2010-05-05',\r\n '2010-05-06',\r\n '2010-05-07',\r\n '2010-05-10',\r\n '2010-05-11',\r\n '2010-05-12',\r\n '2010-05-13',\r\n '2010-05-14',\r\n '2010-05-17',\r\n '2010-05-18',\r\n '2010-05-19',\r\n '2010-05-20',\r\n '2010-05-21',\r\n '2010-05-24',\r\n '2010-05-25',\r\n '2010-05-26',\r\n '2010-05-27',\r\n '2010-05-28',\r\n '2010-05-31',\r\n '2010-06-01',\r\n '2010-06-02',\r\n '2010-06-03',\r\n '2010-06-04',\r\n '2010-06-07',\r\n '2010-06-08',\r\n '2010-06-09',\r\n '2010-06-10',\r\n '2010-06-11',\r\n '2010-06-17',\r\n '2010-06-18',\r\n '2010-06-21',\r\n '2010-06-22',\r\n '2010-06-23',\r\n '2010-06-24',\r\n '2010-06-25',\r\n '2010-06-28',\r\n '2010-06-29',\r\n '2010-06-30',\r\n '2010-07-01',\r\n '2010-07-02',\r\n '2010-07-05',\r\n '2010-07-06',\r\n '2010-07-07',\r\n '2010-07-08',\r\n '2010-07-09',\r\n '2010-07-12',\r\n '2010-07-13',\r\n '2010-07-14',\r\n '2010-07-15',\r\n '2010-07-16',\r\n '2010-07-19',\r\n '2010-07-20',\r\n '2010-07-21',\r\n '2010-07-22',\r\n '2010-07-23',\r\n '2010-07-26',\r\n '2010-07-27',\r\n '2010-07-28',\r\n '2010-07-29',\r\n '2010-07-30',\r\n '2010-08-02',\r\n '2010-08-03',\r\n '2010-08-04',\r\n '2010-08-05',\r\n '2010-08-06',\r\n '2010-08-09',\r\n '2010-08-10',\r\n '2010-08-11',\r\n '2010-08-12',\r\n '2010-08-13',\r\n '2010-08-16',\r\n '2010-08-17',\r\n '2010-08-18',\r\n '2010-08-19',\r\n '2010-08-20',\r\n '2010-08-23',\r\n '2010-08-24',\r\n '2010-08-25',\r\n '2010-08-26',\r\n '2010-08-27',\r\n '2010-08-30',\r\n '2010-08-31',\r\n '2010-09-01',\r\n '2010-09-02',\r\n '2010-09-03',\r\n '2010-09-06',\r\n '2010-09-07',\r\n '2010-09-08',\r\n '2010-09-09',\r\n '2010-09-10',\r\n '2010-09-13',\r\n '2010-09-14',\r\n '2010-09-15',\r\n '2010-09-16',\r\n '2010-09-17',\r\n '2010-09-20',\r\n '2010-09-21',\r\n '2010-09-27',\r\n '2010-09-28',\r\n '2010-09-29',\r\n '2010-09-30',\r\n '2010-10-08',\r\n '2010-10-11',\r\n '2010-10-12',\r\n '2010-10-13',\r\n '2010-10-14',\r\n '2010-10-15',\r\n '2010-10-18',\r\n '2010-10-19',\r\n '2010-10-20',\r\n '2010-10-21',\r\n '2010-10-22',\r\n '2010-10-25',\r\n '2010-10-26',\r\n '2010-10-27',\r\n '2010-10-28',\r\n '2010-10-29',\r\n '2010-11-01',\r\n '2010-11-02',\r\n '2010-11-03',\r\n '2010-11-04',\r\n '2010-11-05',\r\n '2010-11-08',\r\n '2010-11-09',\r\n '2010-11-10',\r\n '2010-11-11',\r\n '2010-11-12',\r\n '2010-11-15',\r\n '2010-11-16',\r\n '2010-11-17',\r\n '2010-11-18',\r\n '2010-11-19',\r\n '2010-11-22',\r\n '2010-11-23',\r\n '2010-11-24',\r\n '2010-11-25',\r\n '2010-11-26',\r\n '2010-11-29',\r\n '2010-11-30',\r\n '2010-12-01',\r\n '2010-12-02',\r\n '2010-12-03',\r\n '2010-12-06',\r\n '2010-12-07',\r\n '2010-12-08',\r\n '2010-12-09',\r\n '2010-12-10',\r\n '2010-12-13',\r\n '2010-12-14',\r\n '2010-12-15',\r\n '2010-12-16',\r\n '2010-12-17',\r\n '2010-12-20',\r\n '2010-12-21',\r\n '2010-12-22',\r\n '2010-12-23',\r\n '2010-12-24',\r\n '2010-12-27',\r\n '2010-12-28',\r\n '2010-12-29',\r\n '2010-12-30',\r\n '2010-12-31',\r\n '2011-01-04',\r\n '2011-01-05',\r\n '2011-01-06',\r\n '2011-01-07',\r\n '2011-01-10',\r\n '2011-01-11',\r\n '2011-01-12',\r\n '2011-01-13',\r\n '2011-01-14',\r\n '2011-01-17',\r\n '2011-01-18',\r\n '2011-01-19',\r\n '2011-01-20',\r\n '2011-01-21',\r\n '2011-01-24',\r\n '2011-01-25',\r\n '2011-01-26',\r\n '2011-01-27',\r\n '2011-01-28',\r\n '2011-01-31',\r\n '2011-02-01',\r\n '2011-02-09',\r\n '2011-02-10',\r\n '2011-02-11',\r\n '2011-02-14',\r\n '2011-02-15',\r\n '2011-02-16',\r\n '2011-02-17',\r\n '2011-02-18',\r\n '2011-02-21',\r\n '2011-02-22',\r\n '2011-02-23',\r\n '2011-02-24',\r\n '2011-02-25',\r\n '2011-02-28',\r\n '2011-03-01',\r\n '2011-03-02',\r\n '2011-03-03',\r\n '2011-03-04',\r\n '2011-03-07',\r\n '2011-03-08',\r\n '2011-03-09',\r\n '2011-03-10',\r\n '2011-03-11',\r\n '2011-03-14',\r\n '2011-03-15',\r\n '2011-03-16',\r\n '2011-03-17',\r\n '2011-03-18',\r\n '2011-03-21',\r\n '2011-03-22',\r\n '2011-03-23',\r\n '2011-03-24',\r\n '2011-03-25',\r\n '2011-03-28',\r\n '2011-03-29',\r\n '2011-03-30',\r\n '2011-03-31',\r\n '2011-04-01',\r\n '2011-04-06',\r\n '2011-04-07',\r\n '2011-04-08',\r\n '2011-04-11',\r\n '2011-04-12',\r\n '2011-04-13',\r\n '2011-04-14',\r\n '2011-04-15',\r\n '2011-04-18',\r\n '2011-04-19',\r\n '2011-04-20',\r\n '2011-04-21',\r\n '2011-04-22',\r\n '2011-04-25',\r\n '2011-04-26',\r\n '2011-04-27',\r\n '2011-04-28',\r\n '2011-04-29',\r\n '2011-05-03',\r\n '2011-05-04',\r\n '2011-05-05',\r\n '2011-05-06',\r\n '2011-05-09',\r\n '2011-05-10',\r\n '2011-05-11',\r\n '2011-05-12',\r\n '2011-05-13',\r\n '2011-05-16',\r\n '2011-05-17',\r\n '2011-05-18',\r\n '2011-05-19',\r\n '2011-05-20',\r\n '2011-05-23',\r\n '2011-05-24',\r\n '2011-05-25',\r\n '2011-05-26',\r\n '2011-05-27',\r\n '2011-05-30',\r\n '2011-05-31',\r\n '2011-06-01',\r\n '2011-06-02',\r\n '2011-06-03',\r\n '2011-06-07',\r\n '2011-06-08',\r\n '2011-06-09',\r\n '2011-06-10',\r\n '2011-06-13',\r\n '2011-06-14',\r\n '2011-06-15',\r\n '2011-06-16',\r\n '2011-06-17',\r\n '2011-06-20',\r\n '2011-06-21',\r\n '2011-06-22',\r\n '2011-06-23',\r\n '2011-06-24',\r\n '2011-06-27',\r\n '2011-06-28',\r\n '2011-06-29',\r\n '2011-06-30',\r\n '2011-07-01',\r\n '2011-07-04',\r\n '2011-07-05',\r\n '2011-07-06',\r\n '2011-07-07',\r\n '2011-07-08',\r\n '2011-07-11',\r\n '2011-07-12',\r\n '2011-07-13',\r\n '2011-07-14',\r\n '2011-07-15',\r\n '2011-07-18',\r\n '2011-07-19',\r\n '2011-07-20',\r\n '2011-07-21',\r\n '2011-07-22',\r\n '2011-07-25',\r\n '2011-07-26',\r\n '2011-07-27',\r\n '2011-07-28',\r\n '2011-07-29',\r\n '2011-08-01',\r\n '2011-08-02',\r\n '2011-08-03',\r\n '2011-08-04',\r\n '2011-08-05',\r\n '2011-08-08',\r\n '2011-08-09',\r\n '2011-08-10',\r\n '2011-08-11',\r\n '2011-08-12',\r\n '2011-08-15',\r\n '2011-08-16',\r\n '2011-08-17',\r\n '2011-08-18',\r\n '2011-08-19',\r\n '2011-08-22',\r\n '2011-08-23',\r\n '2011-08-24',\r\n '2011-08-25',\r\n '2011-08-26',\r\n '2011-08-29',\r\n '2011-08-30',\r\n '2011-08-31',\r\n '2011-09-01',\r\n '2011-09-02',\r\n '2011-09-05',\r\n '2011-09-06',\r\n '2011-09-07',\r\n '2011-09-08',\r\n '2011-09-09',\r\n '2011-09-13',\r\n '2011-09-14',\r\n '2011-09-15',\r\n '2011-09-16',\r\n '2011-09-19',\r\n '2011-09-20',\r\n '2011-09-21',\r\n '2011-09-22',\r\n '2011-09-23',\r\n '2011-09-26',\r\n '2011-09-27',\r\n '2011-09-28',\r\n '2011-09-29',\r\n '2011-09-30',\r\n '2011-10-10',\r\n '2011-10-11',\r\n '2011-10-12',\r\n '2011-10-13',\r\n '2011-10-14',\r\n '2011-10-17',\r\n '2011-10-18',\r\n '2011-10-19',\r\n '2011-10-20',\r\n '2011-10-21',\r\n '2011-10-24',\r\n '2011-10-25',\r\n '2011-10-26',\r\n '2011-10-27',\r\n '2011-10-28',\r\n '2011-10-31',\r\n '2011-11-01',\r\n '2011-11-02',\r\n '2011-11-03',\r\n '2011-11-04',\r\n '2011-11-07',\r\n '2011-11-08',\r\n '2011-11-09',\r\n '2011-11-10',\r\n '2011-11-11',\r\n '2011-11-14',\r\n '2011-11-15',\r\n '2011-11-16',\r\n '2011-11-17',\r\n '2011-11-18',\r\n '2011-11-21',\r\n '2011-11-22',\r\n '2011-11-23',\r\n '2011-11-24',\r\n '2011-11-25',\r\n '2011-11-28',\r\n '2011-11-29',\r\n '2011-11-30',\r\n '2011-12-01',\r\n '2011-12-02',\r\n '2011-12-05',\r\n '2011-12-06',\r\n '2011-12-07',\r\n '2011-12-08',\r\n '2011-12-09',\r\n '2011-12-12',\r\n '2011-12-13',\r\n '2011-12-14',\r\n '2011-12-15',\r\n '2011-12-16',\r\n '2011-12-19',\r\n '2011-12-20',\r\n '2011-12-21',\r\n '2011-12-22',\r\n '2011-12-23',\r\n '2011-12-26',\r\n '2011-12-27',\r\n '2011-12-28',\r\n '2011-12-29',\r\n '2011-12-30',\r\n '2012-01-04',\r\n '2012-01-05',\r\n '2012-01-06',\r\n '2012-01-09',\r\n '2012-01-10',\r\n '2012-01-11',\r\n '2012-01-12',\r\n '2012-01-13',\r\n '2012-01-16',\r\n '2012-01-17',\r\n '2012-01-18',\r\n '2012-01-19',\r\n '2012-01-20',\r\n '2012-01-30',\r\n '2012-01-31',\r\n '2012-02-01',\r\n '2012-02-02',\r\n '2012-02-03',\r\n '2012-02-06',\r\n '2012-02-07',\r\n '2012-02-08',\r\n '2012-02-09',\r\n '2012-02-10',\r\n '2012-02-13',\r\n '2012-02-14',\r\n '2012-02-15',\r\n '2012-02-16',\r\n '2012-02-17',\r\n '2012-02-20',\r\n '2012-02-21',\r\n '2012-02-22',\r\n '2012-02-23',\r\n '2012-02-24',\r\n '2012-02-27',\r\n '2012-02-28',\r\n '2012-02-29',\r\n '2012-03-01',\r\n '2012-03-02',\r\n '2012-03-05',\r\n '2012-03-06',\r\n '2012-03-07',\r\n '2012-03-08',\r\n '2012-03-09',\r\n '2012-03-12',\r\n '2012-03-13',\r\n '2012-03-14',\r\n '2012-03-15',\r\n '2012-03-16',\r\n '2012-03-19',\r\n '2012-03-20',\r\n '2012-03-21',\r\n '2012-03-22',\r\n '2012-03-23',\r\n '2012-03-26',\r\n '2012-03-27',\r\n '2012-03-28',\r\n '2012-03-29',\r\n '2012-03-30',\r\n '2012-04-05',\r\n '2012-04-06',\r\n '2012-04-09',\r\n '2012-04-10',\r\n '2012-04-11',\r\n '2012-04-12',\r\n '2012-04-13',\r\n '2012-04-16',\r\n '2012-04-17',\r\n '2012-04-18',\r\n '2012-04-19',\r\n '2012-04-20',\r\n '2012-04-23',\r\n '2012-04-24',\r\n '2012-04-25',\r\n '2012-04-26',\r\n '2012-04-27',\r\n '2012-05-02',\r\n '2012-05-03',\r\n '2012-05-04',\r\n '2012-05-07',\r\n '2012-05-08',\r\n '2012-05-09',\r\n '2012-05-10',\r\n '2012-05-11',\r\n '2012-05-14',\r\n '2012-05-15',\r\n '2012-05-16',\r\n '2012-05-17',\r\n '2012-05-18',\r\n '2012-05-21',\r\n '2012-05-22',\r\n '2012-05-23',\r\n '2012-05-24',\r\n '2012-05-25',\r\n '2012-05-28',\r\n '2012-05-29',\r\n '2012-05-30',\r\n '2012-05-31',\r\n '2012-06-01',\r\n '2012-06-04',\r\n '2012-06-05',\r\n '2012-06-06',\r\n '2012-06-07',\r\n '2012-06-08',\r\n '2012-06-11',\r\n '2012-06-12',\r\n '2012-06-13',\r\n '2012-06-14',\r\n '2012-06-15',\r\n '2012-06-18',\r\n '2012-06-19',\r\n '2012-06-20',\r\n '2012-06-21',\r\n '2012-06-25',\r\n '2012-06-26',\r\n '2012-06-27',\r\n '2012-06-28',\r\n '2012-06-29',\r\n '2012-07-02',\r\n '2012-07-03',\r\n '2012-07-04',\r\n '2012-07-05',\r\n '2012-07-06',\r\n '2012-07-09',\r\n '2012-07-10',\r\n '2012-07-11',\r\n '2012-07-12',\r\n '2012-07-13',\r\n '2012-07-16',\r\n '2012-07-17',\r\n '2012-07-18',\r\n '2012-07-19',\r\n '2012-07-20',\r\n '2012-07-23',\r\n '2012-07-24',\r\n '2012-07-25',\r\n '2012-07-26',\r\n '2012-07-27',\r\n '2012-07-30',\r\n '2012-07-31',\r\n '2012-08-01',\r\n '2012-08-02',\r\n '2012-08-03',\r\n '2012-08-06',\r\n '2012-08-07',\r\n '2012-08-08',\r\n '2012-08-09',\r\n '2012-08-10',\r\n '2012-08-13',\r\n '2012-08-14',\r\n '2012-08-15',\r\n '2012-08-16',\r\n '2012-08-17',\r\n '2012-08-20',\r\n '2012-08-21',\r\n '2012-08-22',\r\n '2012-08-23',\r\n '2012-08-24',\r\n '2012-08-27',\r\n '2012-08-28',\r\n '2012-08-29',\r\n '2012-08-30',\r\n '2012-08-31',\r\n '2012-09-03',\r\n '2012-09-04',\r\n '2012-09-05',\r\n '2012-09-06',\r\n '2012-09-07',\r\n '2012-09-10',\r\n '2012-09-11',\r\n '2012-09-12',\r\n '2012-09-13',\r\n '2012-09-14',\r\n '2012-09-17',\r\n '2012-09-18',\r\n '2012-09-19',\r\n '2012-09-20',\r\n '2012-09-21',\r\n '2012-09-24',\r\n '2012-09-25',\r\n '2012-09-26',\r\n '2012-09-27',\r\n '2012-09-28',\r\n '2012-10-08',\r\n '2012-10-09',\r\n '2012-10-10',\r\n '2012-10-11',\r\n '2012-10-12',\r\n '2012-10-15',\r\n '2012-10-16',\r\n '2012-10-17',\r\n '2012-10-18',\r\n '2012-10-19',\r\n '2012-10-22',\r\n '2012-10-23',\r\n '2012-10-24',\r\n '2012-10-25',\r\n '2012-10-26',\r\n '2012-10-29',\r\n '2012-10-30',\r\n '2012-10-31',\r\n '2012-11-01',\r\n '2012-11-02',\r\n '2012-11-05',\r\n '2012-11-06',\r\n '2012-11-07',\r\n '2012-11-08',\r\n '2012-11-09',\r\n '2012-11-12',\r\n '2012-11-13',\r\n '2012-11-14',\r\n '2012-11-15',\r\n '2012-11-16',\r\n '2012-11-19',\r\n '2012-11-20',\r\n '2012-11-21',\r\n '2012-11-22',\r\n '2012-11-23',\r\n '2012-11-26',\r\n '2012-11-27',\r\n '2012-11-28',\r\n '2012-11-29',\r\n '2012-11-30',\r\n '2012-12-03',\r\n '2012-12-04',\r\n '2012-12-05',\r\n '2012-12-06',\r\n '2012-12-07',\r\n '2012-12-10',\r\n '2012-12-11',\r\n '2012-12-12',\r\n '2012-12-13',\r\n '2012-12-14',\r\n '2012-12-17',\r\n '2012-12-18',\r\n '2012-12-19',\r\n '2012-12-20',\r\n '2012-12-21',\r\n '2012-12-24',\r\n '2012-12-25',\r\n '2012-12-26',\r\n '2012-12-27',\r\n '2012-12-28',\r\n '2012-12-31',\r\n '2013-01-04',\r\n '2013-01-07',\r\n '2013-01-08',\r\n '2013-01-09',\r\n '2013-01-10',\r\n '2013-01-11',\r\n '2013-01-14',\r\n '2013-01-15',\r\n '2013-01-16',\r\n '2013-01-17',\r\n '2013-01-18',\r\n '2013-01-21',\r\n '2013-01-22',\r\n '2013-01-23',\r\n '2013-01-24',\r\n '2013-01-25',\r\n '2013-01-28',\r\n '2013-01-29',\r\n '2013-01-30',\r\n '2013-01-31',\r\n '2013-02-01',\r\n '2013-02-04',\r\n '2013-02-05',\r\n '2013-02-06',\r\n '2013-02-07',\r\n '2013-02-08',\r\n '2013-02-18',\r\n '2013-02-19',\r\n '2013-02-20',\r\n '2013-02-21',\r\n '2013-02-22',\r\n '2013-02-25',\r\n '2013-02-26',\r\n '2013-02-27',\r\n '2013-02-28',\r\n '2013-03-01',\r\n '2013-03-04',\r\n '2013-03-05',\r\n '2013-03-06',\r\n '2013-03-07',\r\n '2013-03-08',\r\n '2013-03-11',\r\n '2013-03-12',\r\n '2013-03-13',\r\n '2013-03-14',\r\n '2013-03-15',\r\n '2013-03-18',\r\n '2013-03-19',\r\n '2013-03-20',\r\n '2013-03-21',\r\n '2013-03-22',\r\n '2013-03-25',\r\n '2013-03-26',\r\n '2013-03-27',\r\n '2013-03-28',\r\n '2013-03-29',\r\n '2013-04-01',\r\n '2013-04-02',\r\n '2013-04-03',\r\n '2013-04-08',\r\n '2013-04-09',\r\n '2013-04-10',\r\n '2013-04-11',\r\n '2013-04-12',\r\n '2013-04-15',\r\n '2013-04-16',\r\n '2013-04-17',\r\n '2013-04-18',\r\n '2013-04-19',\r\n '2013-04-22',\r\n '2013-04-23',\r\n '2013-04-24',\r\n '2013-04-25',\r\n '2013-04-26',\r\n '2013-05-02',\r\n '2013-05-03',\r\n '2013-05-06',\r\n '2013-05-07',\r\n '2013-05-08',\r\n '2013-05-09',\r\n '2013-05-10',\r\n '2013-05-13',\r\n '2013-05-14',\r\n '2013-05-15',\r\n '2013-05-16',\r\n '2013-05-17',\r\n '2013-05-20',\r\n '2013-05-21',\r\n '2013-05-22',\r\n '2013-05-23',\r\n '2013-05-24',\r\n '2013-05-27',\r\n '2013-05-28',\r\n '2013-05-29',\r\n '2013-05-30',\r\n '2013-05-31',\r\n '2013-06-03',\r\n '2013-06-04',\r\n '2013-06-05',\r\n '2013-06-06',\r\n '2013-06-07',\r\n '2013-06-13',\r\n '2013-06-14',\r\n '2013-06-17',\r\n '2013-06-18',\r\n '2013-06-19',\r\n '2013-06-20',\r\n '2013-06-21',\r\n '2013-06-24',\r\n '2013-06-25',\r\n '2013-06-26',\r\n '2013-06-27',\r\n '2013-06-28',\r\n '2013-07-01',\r\n '2013-07-02',\r\n '2013-07-03',\r\n '2013-07-04',\r\n '2013-07-05',\r\n '2013-07-08',\r\n '2013-07-09',\r\n '2013-07-10',\r\n '2013-07-11',\r\n '2013-07-12',\r\n '2013-07-15',\r\n '2013-07-16',\r\n '2013-07-17',\r\n '2013-07-18',\r\n '2013-07-19',\r\n '2013-07-22',\r\n '2013-07-23',\r\n '2013-07-24',\r\n '2013-07-25',\r\n '2013-07-26',\r\n '2013-07-29',\r\n '2013-07-30',\r\n '2013-07-31',\r\n '2013-08-01',\r\n '2013-08-02',\r\n '2013-08-05',\r\n '2013-08-06',\r\n '2013-08-07',\r\n '2013-08-08',\r\n '2013-08-09',\r\n '2013-08-12',\r\n '2013-08-13',\r\n '2013-08-14',\r\n '2013-08-15',\r\n '2013-08-16',\r\n '2013-08-19',\r\n '2013-08-20',\r\n '2013-08-21',\r\n '2013-08-22',\r\n '2013-08-23',\r\n '2013-08-26',\r\n '2013-08-27',\r\n '2013-08-28',\r\n '2013-08-29',\r\n '2013-08-30',\r\n '2013-09-02',\r\n '2013-09-03',\r\n '2013-09-04',\r\n '2013-09-05',\r\n '2013-09-06',\r\n '2013-09-09',\r\n '2013-09-10',\r\n '2013-09-11',\r\n '2013-09-12',\r\n '2013-09-13',\r\n '2013-09-16',\r\n '2013-09-17',\r\n '2013-09-18',\r\n '2013-09-23',\r\n '2013-09-24',\r\n '2013-09-25',\r\n '2013-09-26',\r\n '2013-09-27',\r\n '2013-09-30',\r\n '2013-10-08',\r\n '2013-10-09',\r\n '2013-10-10',\r\n '2013-10-11',\r\n '2013-10-14',\r\n '2013-10-15',\r\n '2013-10-16',\r\n '2013-10-17',\r\n '2013-10-18',\r\n '2013-10-21',\r\n '2013-10-22',\r\n '2013-10-23',\r\n '2013-10-24',\r\n '2013-10-25',\r\n '2013-10-28',\r\n '2013-10-29',\r\n '2013-10-30',\r\n '2013-10-31',\r\n '2013-11-01',\r\n '2013-11-04',\r\n '2013-11-05',\r\n '2013-11-06',\r\n '2013-11-07',\r\n '2013-11-08',\r\n '2013-11-11',\r\n '2013-11-12',\r\n '2013-11-13',\r\n '2013-11-14',\r\n '2013-11-15',\r\n '2013-11-18',\r\n '2013-11-19',\r\n '2013-11-20',\r\n '2013-11-21',\r\n '2013-11-22',\r\n '2013-11-25',\r\n '2013-11-26',\r\n '2013-11-27',\r\n '2013-11-28',\r\n '2013-11-29',\r\n '2013-12-02',\r\n '2013-12-03',\r\n '2013-12-04',\r\n '2013-12-05',\r\n '2013-12-06',\r\n '2013-12-09',\r\n '2013-12-10',\r\n '2013-12-11',\r\n '2013-12-12',\r\n '2013-12-13',\r\n '2013-12-16',\r\n '2013-12-17',\r\n '2013-12-18',\r\n '2013-12-19',\r\n '2013-12-20',\r\n '2013-12-23',\r\n '2013-12-24',\r\n '2013-12-25',\r\n '2013-12-26',\r\n '2013-12-27',\r\n '2013-12-30',\r\n '2013-12-31',\r\n '2014-01-02',\r\n '2014-01-03',\r\n '2014-01-06',\r\n '2014-01-07',\r\n '2014-01-08',\r\n '2014-01-09',\r\n '2014-01-10',\r\n '2014-01-13',\r\n '2014-01-14',\r\n '2014-01-15',\r\n '2014-01-16',\r\n '2014-01-17',\r\n '2014-01-20',\r\n '2014-01-21',\r\n '2014-01-22',\r\n '2014-01-23',\r\n '2014-01-24',\r\n '2014-01-27',\r\n '2014-01-28',\r\n '2014-01-29',\r\n '2014-01-30',\r\n '2014-02-07',\r\n '2014-02-10',\r\n '2014-02-11',\r\n '2014-02-12',\r\n '2014-02-13',\r\n '2014-02-14',\r\n '2014-02-17',\r\n '2014-02-18',\r\n '2014-02-19',\r\n '2014-02-20',\r\n '2014-02-21',\r\n '2014-02-24',\r\n '2014-02-25',\r\n '2014-02-26',\r\n '2014-02-27',\r\n '2014-02-28',\r\n '2014-03-03',\r\n '2014-03-04',\r\n '2014-03-05',\r\n '2014-03-06',\r\n '2014-03-07',\r\n '2014-03-10',\r\n '2014-03-11',\r\n '2014-03-12',\r\n '2014-03-13',\r\n '2014-03-14',\r\n '2014-03-17',\r\n '2014-03-18',\r\n '2014-03-19',\r\n '2014-03-20',\r\n '2014-03-21',\r\n '2014-03-24',\r\n '2014-03-25',\r\n '2014-03-26',\r\n '2014-03-27',\r\n '2014-03-28',\r\n '2014-03-31',\r\n '2014-04-01',\r\n '2014-04-02',\r\n '2014-04-03',\r\n '2014-04-04',\r\n '2014-04-08',\r\n '2014-04-09',\r\n '2014-04-10',\r\n '2014-04-11',\r\n '2014-04-14',\r\n '2014-04-15',\r\n '2014-04-16',\r\n '2014-04-17',\r\n '2014-04-18',\r\n '2014-04-21',\r\n '2014-04-22',\r\n '2014-04-23',\r\n '2014-04-24',\r\n '2014-04-25',\r\n '2014-04-28',\r\n '2014-04-29',\r\n '2014-04-30',\r\n '2014-05-05',\r\n '2014-05-06',\r\n '2014-05-07',\r\n '2014-05-08',\r\n '2014-05-09',\r\n '2014-05-12',\r\n '2014-05-13',\r\n '2014-05-14',\r\n '2014-05-15',\r\n '2014-05-16',\r\n '2014-05-19',\r\n '2014-05-20',\r\n '2014-05-21',\r\n '2014-05-22',\r\n '2014-05-23',\r\n '2014-05-26',\r\n '2014-05-27',\r\n '2014-05-28',\r\n '2014-05-29',\r\n '2014-05-30',\r\n '2014-06-03',\r\n '2014-06-04',\r\n '2014-06-05',\r\n '2014-06-06',\r\n '2014-06-09',\r\n '2014-06-10',\r\n '2014-06-11',\r\n '2014-06-12',\r\n '2014-06-13',\r\n '2014-06-16',\r\n '2014-06-17',\r\n '2014-06-18',\r\n '2014-06-19',\r\n '2014-06-20',\r\n '2014-06-23',\r\n '2014-06-24',\r\n '2014-06-25',\r\n '2014-06-26',\r\n '2014-06-27',\r\n '2014-06-30',\r\n '2014-07-01',\r\n '2014-07-02',\r\n '2014-07-03',\r\n '2014-07-04',\r\n '2014-07-07',\r\n '2014-07-08',\r\n '2014-07-09',\r\n '2014-07-10',\r\n '2014-07-11',\r\n '2014-07-14',\r\n '2014-07-15',\r\n '2014-07-16',\r\n '2014-07-17',\r\n '2014-07-18',\r\n '2014-07-21',\r\n '2014-07-22',\r\n '2014-07-23',\r\n '2014-07-24',\r\n '2014-07-25',\r\n '2014-07-28',\r\n '2014-07-29',\r\n '2014-07-30',\r\n '2014-07-31',\r\n '2014-08-01',\r\n '2014-08-04',\r\n '2014-08-05',\r\n '2014-08-06',\r\n '2014-08-07',\r\n '2014-08-08',\r\n '2014-08-11',\r\n '2014-08-12',\r\n '2014-08-13',\r\n '2014-08-14',\r\n '2014-08-15',\r\n '2014-08-18',\r\n '2014-08-19',\r\n '2014-08-20',\r\n '2014-08-21',\r\n '2014-08-22',\r\n '2014-08-25',\r\n '2014-08-26',\r\n '2014-08-27',\r\n '2014-08-28',\r\n '2014-08-29',\r\n '2014-09-01',\r\n '2014-09-02',\r\n '2014-09-03',\r\n '2014-09-04',\r\n '2014-09-05',\r\n '2014-09-09',\r\n '2014-09-10',\r\n '2014-09-11',\r\n '2014-09-12',\r\n '2014-09-15',\r\n '2014-09-16',\r\n '2014-09-17',\r\n '2014-09-18',\r\n '2014-09-19',\r\n '2014-09-22',\r\n '2014-09-23',\r\n '2014-09-24',\r\n '2014-09-25',\r\n '2014-09-26',\r\n '2014-09-29',\r\n '2014-09-30',\r\n '2014-10-08',\r\n '2014-10-09',\r\n '2014-10-10',\r\n '2014-10-13',\r\n '2014-10-14',\r\n '2014-10-15',\r\n '2014-10-16',\r\n '2014-10-17',\r\n '2014-10-20',\r\n '2014-10-21',\r\n '2014-10-22',\r\n '2014-10-23',\r\n '2014-10-24',\r\n '2014-10-27',\r\n '2014-10-28',\r\n '2014-10-29',\r\n '2014-10-30',\r\n '2014-10-31',\r\n '2014-11-03',\r\n '2014-11-04',\r\n '2014-11-05',\r\n '2014-11-06',\r\n '2014-11-07',\r\n '2014-11-10',\r\n '2014-11-11',\r\n '2014-11-12',\r\n '2014-11-13',\r\n '2014-11-14',\r\n '2014-11-17',\r\n '2014-11-18',\r\n '2014-11-19',\r\n '2014-11-20',\r\n '2014-11-21',\r\n '2014-11-24',\r\n '2014-11-25',\r\n '2014-11-26',\r\n '2014-11-27',\r\n '2014-11-28',\r\n '2014-12-01',\r\n '2014-12-02',\r\n '2014-12-03',\r\n '2014-12-04',\r\n '2014-12-05',\r\n '2014-12-08',\r\n '2014-12-09',\r\n '2014-12-10',\r\n '2014-12-11',\r\n '2014-12-12',\r\n '2014-12-15',\r\n '2014-12-16',\r\n '2014-12-17',\r\n '2014-12-18',\r\n '2014-12-19',\r\n '2014-12-22',\r\n '2014-12-23',\r\n '2014-12-24',\r\n '2014-12-25',\r\n '2014-12-26',\r\n '2014-12-29',\r\n '2014-12-30',\r\n '2014-12-31',\r\n '2015-01-05',\r\n '2015-01-06',\r\n '2015-01-07',\r\n '2015-01-08',\r\n '2015-01-09',\r\n '2015-01-12',\r\n '2015-01-13',\r\n '2015-01-14',\r\n '2015-01-15',\r\n '2015-01-16',\r\n '2015-01-19',\r\n '2015-01-20',\r\n '2015-01-21',\r\n '2015-01-22',\r\n '2015-01-23',\r\n '2015-01-26',\r\n '2015-01-27',\r\n '2015-01-28',\r\n '2015-01-29',\r\n '2015-01-30',\r\n '2015-02-02',\r\n '2015-02-03',\r\n '2015-02-04',\r\n '2015-02-05',\r\n '2015-02-06',\r\n '2015-02-09',\r\n '2015-02-10',\r\n '2015-02-11',\r\n '2015-02-12',\r\n '2015-02-13',\r\n '2015-02-16',\r\n '2015-02-17',\r\n '2015-02-25',\r\n '2015-02-26',\r\n '2015-02-27',\r\n '2015-03-02',\r\n '2015-03-03',\r\n '2015-03-04',\r\n '2015-03-05',\r\n '2015-03-06',\r\n '2015-03-09',\r\n '2015-03-10',\r\n '2015-03-11',\r\n '2015-03-12',\r\n '2015-03-13',\r\n '2015-03-16',\r\n '2015-03-17',\r\n '2015-03-18',\r\n '2015-03-19',\r\n '2015-03-20',\r\n '2015-03-23',\r\n '2015-03-24',\r\n '2015-03-25',\r\n '2015-03-26',\r\n '2015-03-27',\r\n '2015-03-30',\r\n '2015-03-31',\r\n '2015-04-01',\r\n '2015-04-02',\r\n '2015-04-03',\r\n '2015-04-07',\r\n '2015-04-08',\r\n '2015-04-09',\r\n '2015-04-10',\r\n '2015-04-13',\r\n '2015-04-14',\r\n '2015-04-15',\r\n '2015-04-16',\r\n '2015-04-17',\r\n '2015-04-20',\r\n '2015-04-21',\r\n '2015-04-22',\r\n '2015-04-23',\r\n '2015-04-24',\r\n '2015-04-27',\r\n '2015-04-28',\r\n '2015-04-29',\r\n '2015-04-30',\r\n '2015-05-04',\r\n '2015-05-05',\r\n '2015-05-06',\r\n '2015-05-07',\r\n '2015-05-08',\r\n '2015-05-11',\r\n '2015-05-12',\r\n '2015-05-13',\r\n '2015-05-14',\r\n '2015-05-15',\r\n '2015-05-18',\r\n '2015-05-19',\r\n '2015-05-20',\r\n '2015-05-21',\r\n '2015-05-22',\r\n '2015-05-25',\r\n '2015-05-26',\r\n '2015-05-27',\r\n '2015-05-28',\r\n '2015-05-29',\r\n '2015-06-01',\r\n '2015-06-02',\r\n '2015-06-03',\r\n '2015-06-04',\r\n '2015-06-05',\r\n '2015-06-08',\r\n '2015-06-09',\r\n '2015-06-10',\r\n '2015-06-11',\r\n '2015-06-12',\r\n '2015-06-15',\r\n '2015-06-16',\r\n '2015-06-17',\r\n '2015-06-18',\r\n '2015-06-19',\r\n '2015-06-23',\r\n '2015-06-24',\r\n '2015-06-25',\r\n '2015-06-26',\r\n '2015-06-29',\r\n '2015-06-30',\r\n '2015-07-01',\r\n '2015-07-02',\r\n '2015-07-03',\r\n '2015-07-06',\r\n '2015-07-07',\r\n '2015-07-08',\r\n '2015-07-09',\r\n '2015-07-10',\r\n '2015-07-13',\r\n '2015-07-14',\r\n '2015-07-15',\r\n '2015-07-16',\r\n '2015-07-17',\r\n '2015-07-20',\r\n '2015-07-21',\r\n '2015-07-22',\r\n '2015-07-23',\r\n '2015-07-24',\r\n '2015-07-27',\r\n '2015-07-28',\r\n '2015-07-29',\r\n '2015-07-30',\r\n '2015-07-31',\r\n '2015-08-03',\r\n '2015-08-04',\r\n '2015-08-05',\r\n '2015-08-06',\r\n '2015-08-07',\r\n '2015-08-10',\r\n '2015-08-11',\r\n '2015-08-12',\r\n '2015-08-13',\r\n '2015-08-14',\r\n '2015-08-17',\r\n '2015-08-18',\r\n '2015-08-19',\r\n '2015-08-20',\r\n '2015-08-21',\r\n '2015-08-24',\r\n '2015-08-25',\r\n '2015-08-26',\r\n '2015-08-27',\r\n '2015-08-28',\r\n '2015-08-31',\r\n '2015-09-01',\r\n '2015-09-02',\r\n '2015-09-07',\r\n '2015-09-08',\r\n '2015-09-09',\r\n '2015-09-10',\r\n '2015-09-11',\r\n '2015-09-14',\r\n '2015-09-15',\r\n '2015-09-16',\r\n '2015-09-17',\r\n '2015-09-18',\r\n '2015-09-21',\r\n '2015-09-22',\r\n '2015-09-23',\r\n '2015-09-24',\r\n '2015-09-25',\r\n '2015-09-28',\r\n '2015-09-29',\r\n '2015-09-30',\r\n '2015-10-08',\r\n '2015-10-09',\r\n '2015-10-12',\r\n '2015-10-13',\r\n '2015-10-14',\r\n '2015-10-15',\r\n '2015-10-16',\r\n '2015-10-19',\r\n '2015-10-20',\r\n '2015-10-21',\r\n '2015-10-22',\r\n '2015-10-23',\r\n '2015-10-26',\r\n '2015-10-27',\r\n '2015-10-28',\r\n '2015-10-29',\r\n '2015-10-30',\r\n '2015-11-02',\r\n '2015-11-03',\r\n '2015-11-04',\r\n '2015-11-05',\r\n '2015-11-06',\r\n '2015-11-09',\r\n '2015-11-10',\r\n '2015-11-11',\r\n '2015-11-12',\r\n '2015-11-13',\r\n '2015-11-16',\r\n '2015-11-17',\r\n '2015-11-18',\r\n '2015-11-19',\r\n '2015-11-20',\r\n '2015-11-23',\r\n '2015-11-24',\r\n '2015-11-25',\r\n '2015-11-26',\r\n '2015-11-27',\r\n '2015-11-30',\r\n '2015-12-01',\r\n '2015-12-02',\r\n '2015-12-03',\r\n '2015-12-04',\r\n '2015-12-07',\r\n '2015-12-08',\r\n '2015-12-09',\r\n '2015-12-10',\r\n '2015-12-11',\r\n '2015-12-14',\r\n '2015-12-15',\r\n '2015-12-16',\r\n '2015-12-17',\r\n '2015-12-18',\r\n '2015-12-21',\r\n '2015-12-22',\r\n '2015-12-23',\r\n '2015-12-24',\r\n '2015-12-25',\r\n '2015-12-28',\r\n '2015-12-29',\r\n '2015-12-30',\r\n '2015-12-31',\r\n '2016-01-04',\r\n '2016-01-05',\r\n '2016-01-06',\r\n '2016-01-07',\r\n '2016-01-08',\r\n '2016-01-11',\r\n '2016-01-12',\r\n '2016-01-13',\r\n '2016-01-14',\r\n '2016-01-15',\r\n '2016-01-18',\r\n '2016-01-19',\r\n '2016-01-20',\r\n '2016-01-21',\r\n '2016-01-22',\r\n '2016-01-25',\r\n '2016-01-26',\r\n '2016-01-27',\r\n '2016-01-28',\r\n '2016-01-29',\r\n '2016-02-01',\r\n '2016-02-02',\r\n '2016-02-03',\r\n '2016-02-04',\r\n '2016-02-05',\r\n '2016-02-15',\r\n '2016-02-16',\r\n '2016-02-17',\r\n '2016-02-18',\r\n '2016-02-19',\r\n '2016-02-22',\r\n '2016-02-23',\r\n '2016-02-24',\r\n '2016-02-25',\r\n '2016-02-26',\r\n '2016-02-29',\r\n '2016-03-01',\r\n '2016-03-02',\r\n '2016-03-03',\r\n '2016-03-04',\r\n '2016-03-07',\r\n '2016-03-08',\r\n '2016-03-09',\r\n '2016-03-10',\r\n '2016-03-11',\r\n '2016-03-14',\r\n '2016-03-15',\r\n '2016-03-16',\r\n '2016-03-17',\r\n '2016-03-18',\r\n '2016-03-21',\r\n '2016-03-22',\r\n '2016-03-23',\r\n '2016-03-24',\r\n '2016-03-25',\r\n '2016-03-28',\r\n '2016-03-29',\r\n '2016-03-30',\r\n '2016-03-31',\r\n '2016-04-01',\r\n '2016-04-05',\r\n '2016-04-06',\r\n '2016-04-07',\r\n '2016-04-08',\r\n '2016-04-11',\r\n '2016-04-12',\r\n '2016-04-13',\r\n '2016-04-14',\r\n '2016-04-15',\r\n '2016-04-18',\r\n '2016-04-19',\r\n '2016-04-20',\r\n '2016-04-21',\r\n '2016-04-22',\r\n '2016-04-25',\r\n '2016-04-26',\r\n '2016-04-27',\r\n '2016-04-28',\r\n '2016-04-29',\r\n '2016-05-03',\r\n '2016-05-04',\r\n '2016-05-05',\r\n '2016-05-06',\r\n '2016-05-09',\r\n '2016-05-10',\r\n '2016-05-11',\r\n '2016-05-12',\r\n '2016-05-13',\r\n '2016-05-16',\r\n '2016-05-17',\r\n '2016-05-18',\r\n '2016-05-19',\r\n '2016-05-20',\r\n '2016-05-23',\r\n '2016-05-24',\r\n '2016-05-25',\r\n '2016-05-26',\r\n '2016-05-27',\r\n '2016-05-30',\r\n '2016-05-31',\r\n '2016-06-01',\r\n '2016-06-02',\r\n '2016-06-03',\r\n '2016-06-06',\r\n '2016-06-07',\r\n '2016-06-08',\r\n '2016-06-13',\r\n '2016-06-14',\r\n '2016-06-15',\r\n '2016-06-16',\r\n '2016-06-17',\r\n '2016-06-20',\r\n '2016-06-21',\r\n '2016-06-22',\r\n '2016-06-23',\r\n '2016-06-24',\r\n '2016-06-27',\r\n '2016-06-28',\r\n '2016-06-29',\r\n '2016-06-30',\r\n '2016-07-01',\r\n '2016-07-04',\r\n '2016-07-05',\r\n '2016-07-06',\r\n '2016-07-07',\r\n '2016-07-08',\r\n '2016-07-11',\r\n '2016-07-12',\r\n '2016-07-13',\r\n '2016-07-14',\r\n '2016-07-15',\r\n '2016-07-18',\r\n '2016-07-19',\r\n '2016-07-20',\r\n '2016-07-21',\r\n '2016-07-22',\r\n '2016-07-25',\r\n '2016-07-26',\r\n '2016-07-27',\r\n '2016-07-28',\r\n '2016-07-29',\r\n '2016-08-01',\r\n '2016-08-02',\r\n '2016-08-03',\r\n '2016-08-04',\r\n '2016-08-05',\r\n '2016-08-08',\r\n '2016-08-09',\r\n '2016-08-10',\r\n '2016-08-11',\r\n '2016-08-12',\r\n '2016-08-15',\r\n '2016-08-16',\r\n '2016-08-17',\r\n '2016-08-18',\r\n '2016-08-19',\r\n '2016-08-22',\r\n '2016-08-23',\r\n '2016-08-24',\r\n '2016-08-25',\r\n '2016-08-26',\r\n '2016-08-29',\r\n '2016-08-30',\r\n '2016-08-31',\r\n '2016-09-01',\r\n '2016-09-02',\r\n '2016-09-05',\r\n '2016-09-06',\r\n '2016-09-07',\r\n '2016-09-08',\r\n '2016-09-09',\r\n '2016-09-12',\r\n '2016-09-13',\r\n '2016-09-14',\r\n '2016-09-19',\r\n '2016-09-20',\r\n '2016-09-21',\r\n '2016-09-22',\r\n '2016-09-23',\r\n '2016-09-26',\r\n '2016-09-27',\r\n '2016-09-28',\r\n '2016-09-29',\r\n '2016-09-30',\r\n '2016-10-10',\r\n '2016-10-11',\r\n '2016-10-12',\r\n '2016-10-13',\r\n '2016-10-14',\r\n '2016-10-17',\r\n '2016-10-18',\r\n '2016-10-19',\r\n '2016-10-20',\r\n '2016-10-21',\r\n '2016-10-24',\r\n '2016-10-25',\r\n '2016-10-26',\r\n '2016-10-27',\r\n '2016-10-28',\r\n '2016-10-31',\r\n '2016-11-01',\r\n '2016-11-02',\r\n '2016-11-03',\r\n '2016-11-04',\r\n '2016-11-07',\r\n '2016-11-08',\r\n '2016-11-09',\r\n '2016-11-10',\r\n '2016-11-11',\r\n '2016-11-14',\r\n '2016-11-15',\r\n '2016-11-16',\r\n '2016-11-17',\r\n '2016-11-18',\r\n '2016-11-21',\r\n '2016-11-22',\r\n '2016-11-23',\r\n '2016-11-24',\r\n '2016-11-25',\r\n '2016-11-28',\r\n '2016-11-29',\r\n '2016-11-30',\r\n '2016-12-01',\r\n '2016-12-02',\r\n '2016-12-05',\r\n '2016-12-06',\r\n '2016-12-07',\r\n '2016-12-08',\r\n '2016-12-09',\r\n '2016-12-12',\r\n '2016-12-13',\r\n '2016-12-14',\r\n '2016-12-15',\r\n '2016-12-16',\r\n '2016-12-19',\r\n '2016-12-20',\r\n '2016-12-21',\r\n '2016-12-22',\r\n '2016-12-23',\r\n '2016-12-26',\r\n '2016-12-27',\r\n '2016-12-28',\r\n '2016-12-29',\r\n '2016-12-30',\r\n '2017-01-03',\r\n '2017-01-04',\r\n '2017-01-05',\r\n '2017-01-06',\r\n '2017-01-09',\r\n '2017-01-10',\r\n '2017-01-11',\r\n '2017-01-12',\r\n '2017-01-13',\r\n '2017-01-16',\r\n '2017-01-17',\r\n '2017-01-18',\r\n '2017-01-19',\r\n '2017-01-20',\r\n '2017-01-23',\r\n '2017-01-24',\r\n '2017-01-25',\r\n '2017-01-26',\r\n '2017-02-03',\r\n '2017-02-06',\r\n '2017-02-07',\r\n '2017-02-08',\r\n '2017-02-09',\r\n '2017-02-10',\r\n '2017-02-13',\r\n '2017-02-14',\r\n '2017-02-15',\r\n '2017-02-16',\r\n '2017-02-17',\r\n '2017-02-20',\r\n '2017-02-21',\r\n '2017-02-22',\r\n '2017-02-23',\r\n '2017-02-24',\r\n '2017-02-27',\r\n '2017-02-28',\r\n '2017-03-01',\r\n '2017-03-02',\r\n '2017-03-03',\r\n '2017-03-06',\r\n '2017-03-07',\r\n '2017-03-08',\r\n '2017-03-09',\r\n '2017-03-10',\r\n '2017-03-13',\r\n '2017-03-14',\r\n '2017-03-15',\r\n '2017-03-16',\r\n '2017-03-17',\r\n '2017-03-20',\r\n '2017-03-21',\r\n '2017-03-22',\r\n '2017-03-23',\r\n '2017-03-24',\r\n '2017-03-27',\r\n '2017-03-28',\r\n '2017-03-29',\r\n '2017-03-30',\r\n '2017-03-31',\r\n '2017-04-05',\r\n '2017-04-06',\r\n '2017-04-07',\r\n '2017-04-10',\r\n '2017-04-11',\r\n '2017-04-12',\r\n '2017-04-13',\r\n '2017-04-14',\r\n '2017-04-17',\r\n '2017-04-18',\r\n '2017-04-19',\r\n '2017-04-20',\r\n '2017-04-21',\r\n '2017-04-24',\r\n '2017-04-25',\r\n '2017-04-26',\r\n '2017-04-27',\r\n '2017-04-28',\r\n '2017-05-02',\r\n '2017-05-03',\r\n '2017-05-04',\r\n '2017-05-05',\r\n '2017-05-08',\r\n '2017-05-09',\r\n '2017-05-10',\r\n '2017-05-11',\r\n '2017-05-12',\r\n '2017-05-15',\r\n '2017-05-16',\r\n '2017-05-17',\r\n '2017-05-18',\r\n '2017-05-19',\r\n '2017-05-22',\r\n '2017-05-23',\r\n '2017-05-24',\r\n '2017-05-25',\r\n '2017-05-26',\r\n '2017-05-31',\r\n '2017-06-01',\r\n '2017-06-02',\r\n '2017-06-05',\r\n '2017-06-06',\r\n '2017-06-07',\r\n '2017-06-08',\r\n '2017-06-09',\r\n '2017-06-12',\r\n '2017-06-13',\r\n '2017-06-14',\r\n '2017-06-15',\r\n '2017-06-16',\r\n '2017-06-19',\r\n '2017-06-20',\r\n '2017-06-21',\r\n '2017-06-22',\r\n '2017-06-23',\r\n '2017-06-26',\r\n '2017-06-27',\r\n '2017-06-28',\r\n '2017-06-29',\r\n '2017-06-30',\r\n '2017-07-03',\r\n '2017-07-04',\r\n '2017-07-05',\r\n '2017-07-06',\r\n '2017-07-07',\r\n '2017-07-10',\r\n '2017-07-11',\r\n '2017-07-12',\r\n '2017-07-13',\r\n '2017-07-14',\r\n '2017-07-17',\r\n '2017-07-18',\r\n '2017-07-19',\r\n '2017-07-20',\r\n '2017-07-21',\r\n '2017-07-24',\r\n '2017-07-25',\r\n '2017-07-26',\r\n '2017-07-27',\r\n '2017-07-28',\r\n '2017-07-31',\r\n '2017-08-01',\r\n '2017-08-02',\r\n '2017-08-03',\r\n '2017-08-04',\r\n '2017-08-07',\r\n '2017-08-08',\r\n '2017-08-09',\r\n '2017-08-10',\r\n '2017-08-11',\r\n '2017-08-14',\r\n '2017-08-15',\r\n '2017-08-16',\r\n '2017-08-17',\r\n '2017-08-18',\r\n '2017-08-21',\r\n '2017-08-22',\r\n '2017-08-23',\r\n '2017-08-24',\r\n '2017-08-25',\r\n '2017-08-28',\r\n '2017-08-29',\r\n '2017-08-30',\r\n '2017-08-31',\r\n '2017-09-01',\r\n '2017-09-04',\r\n '2017-09-05',\r\n '2017-09-06',\r\n '2017-09-07',\r\n '2017-09-08',\r\n '2017-09-11',\r\n '2017-09-12',\r\n '2017-09-13',\r\n '2017-09-14',\r\n '2017-09-15',\r\n '2017-09-18',\r\n '2017-09-19',\r\n '2017-09-20',\r\n '2017-09-21',\r\n '2017-09-22',\r\n '2017-09-25',\r\n '2017-09-26',\r\n '2017-09-27',\r\n '2017-09-28',\r\n '2017-09-29',\r\n '2017-10-09',\r\n '2017-10-10',\r\n '2017-10-11',\r\n '2017-10-12',\r\n '2017-10-13',\r\n '2017-10-16',\r\n '2017-10-17',\r\n '2017-10-18',\r\n '2017-10-19',\r\n '2017-10-20',\r\n '2017-10-23',\r\n '2017-10-24',\r\n '2017-10-25',\r\n '2017-10-26',\r\n '2017-10-27',\r\n '2017-10-30',\r\n '2017-10-31',\r\n '2017-11-01',\r\n '2017-11-02',\r\n '2017-11-03',\r\n '2017-11-06',\r\n '2017-11-07',\r\n '2017-11-08',\r\n '2017-11-09',\r\n '2017-11-10',\r\n '2017-11-13',\r\n '2017-11-14',\r\n '2017-11-15',\r\n '2017-11-16',\r\n '2017-11-17',\r\n '2017-11-20',\r\n '2017-11-21',\r\n '2017-11-22',\r\n '2017-11-23',\r\n '2017-11-24',\r\n '2017-11-27',\r\n '2017-11-28',\r\n '2017-11-29',\r\n '2017-11-30',\r\n '2017-12-01',\r\n '2017-12-04',\r\n '2017-12-05',\r\n '2017-12-06',\r\n '2017-12-07',\r\n '2017-12-08',\r\n '2017-12-11',\r\n '2017-12-12',\r\n '2017-12-13',\r\n '2017-12-14',\r\n '2017-12-15',\r\n '2017-12-18',\r\n '2017-12-19',\r\n '2017-12-20',\r\n '2017-12-21',\r\n '2017-12-22',\r\n '2017-12-25',\r\n '2017-12-26',\r\n '2017-12-27',\r\n '2017-12-28',\r\n '2017-12-29',\r\n '2018-01-02',\r\n '2018-01-03',\r\n '2018-01-04',\r\n '2018-01-05',\r\n '2018-01-08',\r\n '2018-01-09',\r\n '2018-01-10',\r\n '2018-01-11',\r\n '2018-01-12',\r\n '2018-01-15',\r\n '2018-01-16',\r\n '2018-01-17',\r\n '2018-01-18',\r\n '2018-01-19',\r\n '2018-01-22',\r\n '2018-01-23',\r\n '2018-01-24',\r\n '2018-01-25',\r\n '2018-01-26',\r\n '2018-01-29',\r\n '2018-01-30',\r\n '2018-01-31',\r\n '2018-02-01',\r\n '2018-02-02',\r\n '2018-02-05',\r\n '2018-02-06',\r\n '2018-02-07',\r\n '2018-02-08',\r\n '2018-02-09',\r\n '2018-02-12',\r\n '2018-02-13',\r\n '2018-02-14',\r\n '2018-02-22',\r\n '2018-02-23',\r\n '2018-02-26',\r\n '2018-02-27',\r\n '2018-02-28',\r\n '2018-03-01',\r\n '2018-03-02',\r\n '2018-03-05',\r\n '2018-03-06',\r\n '2018-03-07',\r\n '2018-03-08',\r\n '2018-03-09',\r\n '2018-03-12',\r\n '2018-03-13',\r\n '2018-03-14',\r\n '2018-03-15',\r\n '2018-03-16',\r\n '2018-03-19',\r\n '2018-03-20',\r\n '2018-03-21',\r\n '2018-03-22',\r\n '2018-03-23',\r\n '2018-03-26',\r\n '2018-03-27',\r\n '2018-03-28',\r\n '2018-03-29',\r\n '2018-03-30',\r\n '2018-04-02',\r\n '2018-04-03',\r\n '2018-04-04',\r\n '2018-04-09',\r\n '2018-04-10',\r\n '2018-04-11',\r\n '2018-04-12',\r\n '2018-04-13',\r\n '2018-04-16',\r\n '2018-04-17',\r\n '2018-04-18',\r\n '2018-04-19',\r\n '2018-04-20',\r\n '2018-04-23',\r\n '2018-04-24',\r\n '2018-04-25',\r\n '2018-04-26',\r\n '2018-04-27',\r\n '2018-05-02',\r\n '2018-05-03',\r\n '2018-05-04',\r\n '2018-05-07',\r\n '2018-05-08',\r\n '2018-05-09',\r\n '2018-05-10',\r\n '2018-05-11',\r\n '2018-05-14',\r\n '2018-05-15',\r\n '2018-05-16',\r\n '2018-05-17',\r\n '2018-05-18',\r\n '2018-05-21',\r\n '2018-05-22',\r\n '2018-05-23',\r\n '2018-05-24',\r\n '2018-05-25',\r\n '2018-05-28',\r\n '2018-05-29',\r\n '2018-05-30',\r\n '2018-05-31',\r\n '2018-06-01',\r\n '2018-06-04',\r\n '2018-06-05',\r\n '2018-06-06',\r\n '2018-06-07',\r\n '2018-06-08',\r\n '2018-06-11',\r\n '2018-06-12',\r\n '2018-06-13',\r\n '2018-06-14',\r\n '2018-06-15',\r\n '2018-06-19',\r\n '2018-06-20',\r\n '2018-06-21',\r\n '2018-06-22',\r\n '2018-06-25',\r\n '2018-06-26',\r\n '2018-06-27',\r\n '2018-06-28',\r\n '2018-06-29',\r\n '2018-07-02',\r\n '2018-07-03',\r\n '2018-07-04',\r\n '2018-07-05',\r\n '2018-07-06',\r\n '2018-07-09',\r\n '2018-07-10',\r\n '2018-07-11',\r\n '2018-07-12',\r\n '2018-07-13',\r\n '2018-07-16',\r\n '2018-07-17',\r\n '2018-07-18',\r\n '2018-07-19',\r\n '2018-07-20',\r\n '2018-07-23',\r\n '2018-07-24',\r\n '2018-07-25',\r\n '2018-07-26',\r\n '2018-07-27',\r\n '2018-07-30',\r\n '2018-07-31',\r\n '2018-08-01',\r\n '2018-08-02',\r\n '2018-08-03',\r\n '2018-08-06',\r\n '2018-08-07',\r\n '2018-08-08',\r\n '2018-08-09',\r\n '2018-08-10',\r\n '2018-08-13',\r\n '2018-08-14',\r\n '2018-08-15',\r\n '2018-08-16',\r\n '2018-08-17',\r\n '2018-08-20',\r\n '2018-08-21',\r\n '2018-08-22',\r\n '2018-08-23',\r\n '2018-08-24',\r\n '2018-08-27',\r\n '2018-08-28',\r\n '2018-08-29',\r\n '2018-08-30',\r\n '2018-08-31',\r\n '2018-09-03',\r\n '2018-09-04',\r\n '2018-09-05',\r\n '2018-09-06',\r\n '2018-09-07',\r\n '2018-09-10',\r\n '2018-09-11',\r\n '2018-09-12',\r\n '2018-09-13',\r\n '2018-09-14',\r\n '2018-09-17',\r\n '2018-09-18',\r\n '2018-09-19',\r\n '2018-09-20',\r\n '2018-09-21',\r\n '2018-09-25',\r\n '2018-09-26',\r\n '2018-09-27',\r\n '2018-09-28',\r\n '2018-10-08',\r\n '2018-10-09',\r\n '2018-10-10',\r\n '2018-10-11',\r\n '2018-10-12',\r\n '2018-10-15',\r\n '2018-10-16',\r\n '2018-10-17',\r\n '2018-10-18',\r\n '2018-10-19',\r\n '2018-10-22',\r\n '2018-10-23',\r\n '2018-10-24',\r\n '2018-10-25',\r\n '2018-10-26',\r\n '2018-10-29',\r\n '2018-10-30',\r\n '2018-10-31',\r\n '2018-11-01',\r\n '2018-11-02',\r\n '2018-11-05',\r\n '2018-11-06',\r\n '2018-11-07',\r\n '2018-11-08',\r\n '2018-11-09',\r\n '2018-11-12',\r\n '2018-11-13',\r\n '2018-11-14',\r\n '2018-11-15',\r\n '2018-11-16',\r\n '2018-11-19',\r\n '2018-11-20',\r\n '2018-11-21',\r\n '2018-11-22',\r\n '2018-11-23',\r\n '2018-11-26',\r\n '2018-11-27',\r\n '2018-11-28',\r\n '2018-11-29',\r\n '2018-11-30',\r\n '2018-12-03',\r\n '2018-12-04',\r\n '2018-12-05',\r\n '2018-12-06',\r\n '2018-12-07',\r\n '2018-12-10',\r\n '2018-12-11',\r\n '2018-12-12',\r\n '2018-12-13',\r\n '2018-12-14',\r\n '2018-12-17',\r\n '2018-12-18',\r\n '2018-12-19',\r\n '2018-12-20',\r\n '2018-12-21',\r\n '2018-12-24',\r\n '2018-12-25',\r\n '2018-12-26',\r\n '2018-12-27',\r\n '2018-12-28',\r\n '2019-01-02',\r\n '2019-01-03',\r\n '2019-01-04',\r\n '2019-01-07',\r\n '2019-01-08',\r\n '2019-01-09',\r\n '2019-01-10',\r\n '2019-01-11',\r\n '2019-01-14',\r\n '2019-01-15',\r\n '2019-01-16',\r\n '2019-01-17',\r\n '2019-01-18',\r\n '2019-01-21',\r\n '2019-01-22',\r\n '2019-01-23',\r\n '2019-01-24',\r\n '2019-01-25',\r\n '2019-01-28',\r\n '2019-01-29',\r\n '2019-01-30',\r\n '2019-01-31',\r\n '2019-02-01',\r\n '2019-02-11',\r\n '2019-02-12',\r\n '2019-02-13',\r\n '2019-02-14',\r\n '2019-02-15',\r\n '2019-02-18',\r\n '2019-02-19',\r\n '2019-02-20',\r\n '2019-02-21',\r\n '2019-02-22',\r\n '2019-02-25',\r\n '2019-02-26',\r\n '2019-02-27',\r\n '2019-02-28',\r\n '2019-03-01',\r\n '2019-03-04',\r\n '2019-03-05',\r\n '2019-03-06',\r\n '2019-03-07',\r\n '2019-03-08',\r\n '2019-03-11',\r\n '2019-03-12',\r\n '2019-03-13',\r\n '2019-03-14',\r\n '2019-03-15',\r\n '2019-03-18',\r\n '2019-03-19',\r\n '2019-03-20',\r\n '2019-03-21',\r\n '2019-03-22',\r\n '2019-03-25',\r\n '2019-03-26',\r\n '2019-03-27',\r\n '2019-03-28',\r\n '2019-03-29',\r\n '2019-04-01',\r\n '2019-04-02',\r\n '2019-04-03',\r\n '2019-04-04',\r\n '2019-04-08',\r\n '2019-04-09',\r\n '2019-04-10',\r\n '2019-04-11',\r\n '2019-04-12',\r\n '2019-04-15',\r\n '2019-04-16',\r\n '2019-04-17',\r\n '2019-04-18',\r\n '2019-04-19',\r\n '2019-04-22',\r\n '2019-04-23',\r\n '2019-04-24',\r\n '2019-04-25',\r\n '2019-04-26',\r\n '2019-04-29',\r\n '2019-04-30',\r\n '2019-05-06',\r\n '2019-05-07',\r\n '2019-05-08',\r\n '2019-05-09',\r\n '2019-05-10',\r\n '2019-05-13',\r\n '2019-05-14',\r\n '2019-05-15',\r\n '2019-05-16',\r\n '2019-05-17',\r\n '2019-05-20',\r\n '2019-05-21',\r\n '2019-05-22',\r\n '2019-05-23',\r\n '2019-05-24',\r\n '2019-05-27',\r\n '2019-05-28',\r\n '2019-05-29',\r\n '2019-05-30',\r\n '2019-05-31',\r\n '2019-06-03',\r\n '2019-06-04',\r\n '2019-06-05',\r\n '2019-06-06',\r\n '2019-06-10',\r\n '2019-06-11',\r\n '2019-06-12',\r\n '2019-06-13',\r\n '2019-06-14',\r\n '2019-06-17',\r\n '2019-06-18',\r\n '2019-06-19',\r\n '2019-06-20',\r\n '2019-06-21',\r\n '2019-06-24',\r\n '2019-06-25',\r\n '2019-06-26',\r\n '2019-06-27',\r\n '2019-06-28',\r\n '2019-07-01',\r\n '2019-07-02',\r\n '2019-07-03',\r\n '2019-07-04',\r\n '2019-07-05',\r\n '2019-07-08',\r\n '2019-07-09',\r\n '2019-07-10',\r\n '2019-07-11',\r\n '2019-07-12',\r\n '2019-07-15',\r\n '2019-07-16',\r\n '2019-07-17',\r\n '2019-07-18',\r\n '2019-07-19',\r\n '2019-07-22',\r\n '2019-07-23',\r\n '2019-07-24',\r\n '2019-07-25',\r\n '2019-07-26',\r\n '2019-07-29',\r\n '2019-07-30',\r\n '2019-07-31',\r\n '2019-08-01',\r\n '2019-08-02',\r\n '2019-08-05',\r\n '2019-08-06',\r\n '2019-08-07',\r\n '2019-08-08',\r\n '2019-08-09',\r\n '2019-08-12',\r\n '2019-08-13',\r\n '2019-08-14',\r\n '2019-08-15',\r\n '2019-08-16',\r\n '2019-08-19',\r\n '2019-08-20',\r\n '2019-08-21',\r\n '2019-08-22',\r\n '2019-08-23',\r\n '2019-08-26',\r\n '2019-08-27',\r\n '2019-08-28',\r\n '2019-08-29',\r\n '2019-08-30',\r\n '2019-09-02',\r\n '2019-09-03',\r\n '2019-09-04',\r\n '2019-09-05',\r\n '2019-09-06',\r\n '2019-09-09',\r\n '2019-09-10',\r\n '2019-09-11',\r\n '2019-09-12',\r\n '2019-09-16',\r\n '2019-09-17',\r\n '2019-09-18',\r\n '2019-09-19',\r\n '2019-09-20',\r\n '2019-09-23',\r\n '2019-09-24',\r\n '2019-09-25',\r\n '2019-09-26',\r\n '2019-09-27',\r\n '2019-09-30',\r\n '2019-10-08',\r\n '2019-10-09',\r\n '2019-10-10',\r\n '2019-10-11',\r\n '2019-10-14',\r\n '2019-10-15',\r\n '2019-10-16',\r\n '2019-10-17',\r\n '2019-10-18',\r\n '2019-10-21',\r\n '2019-10-22',\r\n '2019-10-23',\r\n '2019-10-24',\r\n '2019-10-25',\r\n '2019-10-28',\r\n '2019-10-29',\r\n '2019-10-30',\r\n '2019-10-31',\r\n '2019-11-01',\r\n '2019-11-04',\r\n '2019-11-05',\r\n '2019-11-06',\r\n '2019-11-07',\r\n '2019-11-08',\r\n '2019-11-11',\r\n '2019-11-12',\r\n '2019-11-13',\r\n '2019-11-14',\r\n '2019-11-15',\r\n '2019-11-18',\r\n '2019-11-19',\r\n '2019-11-20',\r\n '2019-11-21',\r\n '2019-11-22',\r\n '2019-11-25',\r\n '2019-11-26',\r\n '2019-11-27',\r\n '2019-11-28',\r\n '2019-11-29',\r\n '2019-12-02',\r\n '2019-12-03',\r\n '2019-12-04',\r\n '2019-12-05',\r\n '2019-12-06',\r\n '2019-12-09',\r\n '2019-12-10',\r\n '2019-12-11',\r\n '2019-12-12',\r\n '2019-12-13',\r\n '2019-12-16',\r\n '2019-12-17',\r\n '2019-12-18',\r\n '2019-12-19',\r\n '2019-12-20',\r\n '2019-12-23',\r\n '2019-12-24',\r\n '2019-12-25',\r\n '2019-12-26',\r\n '2019-12-27',\r\n '2019-12-30',\r\n '2019-12-31',\r\n '2020-01-02',\r\n '2020-01-03',\r\n '2020-01-06',\r\n '2020-01-07',\r\n '2020-01-08',\r\n '2020-01-09',\r\n '2020-01-10',\r\n '2020-01-13',\r\n '2020-01-14',\r\n '2020-01-15',\r\n '2020-01-16',\r\n '2020-01-17',\r\n '2020-01-20',\r\n '2020-01-21',\r\n '2020-01-22',\r\n '2020-01-23',\r\n '2020-01-31',\r\n '2020-02-03',\r\n '2020-02-04',\r\n '2020-02-05',\r\n '2020-02-06',\r\n '2020-02-07',\r\n '2020-02-10',\r\n '2020-02-11',\r\n '2020-02-12',\r\n '2020-02-13',\r\n '2020-02-14',\r\n '2020-02-17',\r\n '2020-02-18',\r\n '2020-02-19',\r\n '2020-02-20',\r\n '2020-02-21',\r\n '2020-02-24',\r\n '2020-02-25',\r\n '2020-02-26',\r\n '2020-02-27',\r\n '2020-02-28',\r\n '2020-03-02',\r\n '2020-03-03',\r\n '2020-03-04',\r\n '2020-03-05',\r\n '2020-03-06',\r\n '2020-03-09',\r\n '2020-03-10',\r\n '2020-03-11',\r\n '2020-03-12',\r\n '2020-03-13',\r\n '2020-03-16',\r\n '2020-03-17',\r\n '2020-03-18',\r\n '2020-03-19',\r\n '2020-03-20',\r\n '2020-03-23',\r\n '2020-03-24',\r\n '2020-03-25',\r\n '2020-03-26',\r\n '2020-03-27',\r\n '2020-03-30',\r\n '2020-03-31',\r\n '2020-04-01',\r\n '2020-04-02',\r\n '2020-04-03',\r\n '2020-04-07',\r\n '2020-04-08',\r\n '2020-04-09',\r\n '2020-04-10',\r\n '2020-04-13',\r\n '2020-04-14',\r\n '2020-04-15',\r\n '2020-04-16',\r\n '2020-04-17',\r\n '2020-04-20',\r\n '2020-04-21',\r\n '2020-04-22',\r\n '2020-04-23',\r\n '2020-04-24',\r\n '2020-04-27',\r\n '2020-04-28',\r\n '2020-04-29',\r\n '2020-04-30',\r\n '2020-05-06',\r\n '2020-05-07',\r\n '2020-05-08',\r\n '2020-05-11',\r\n '2020-05-12',\r\n '2020-05-13',\r\n '2020-05-14',\r\n '2020-05-15',\r\n '2020-05-18',\r\n '2020-05-19',\r\n '2020-05-20',\r\n '2020-05-21',\r\n '2020-05-22',\r\n '2020-05-25',\r\n '2020-05-26',\r\n '2020-05-27',\r\n '2020-05-28',\r\n '2020-05-29',\r\n '2020-06-01',\r\n '2020-06-02',\r\n '2020-06-03',\r\n '2020-06-04',\r\n '2020-06-05',\r\n '2020-06-08',\r\n '2020-06-09',\r\n '2020-06-10',\r\n '2020-06-11',\r\n '2020-06-12',\r\n '2020-06-15',\r\n '2020-06-16',\r\n '2020-06-17',\r\n '2020-06-18',\r\n '2020-06-19',\r\n '2020-06-22',\r\n '2020-06-23',\r\n '2020-06-24',\r\n '2020-06-29',\r\n '2020-06-30',\r\n '2020-07-01',\r\n '2020-07-02',\r\n '2020-07-03',\r\n '2020-07-06',\r\n '2020-07-07',\r\n '2020-07-08',\r\n '2020-07-09',\r\n '2020-07-10',\r\n '2020-07-13',\r\n '2020-07-14',\r\n '2020-07-15',\r\n '2020-07-16',\r\n '2020-07-17',\r\n '2020-07-20',\r\n '2020-07-21',\r\n '2020-07-22',\r\n '2020-07-23',\r\n '2020-07-24',\r\n '2020-07-27',\r\n '2020-07-28',\r\n '2020-07-29',\r\n '2020-07-30',\r\n '2020-07-31',\r\n '2020-08-03',\r\n '2020-08-04',\r\n '2020-08-05',\r\n '2020-08-06',\r\n '2020-08-07',\r\n '2020-08-10',\r\n '2020-08-11',\r\n '2020-08-12',\r\n '2020-08-13',\r\n '2020-08-14',\r\n '2020-08-17',\r\n '2020-08-18',\r\n '2020-08-19',\r\n '2020-08-20',\r\n '2020-08-21',\r\n '2020-08-24',\r\n '2020-08-25',\r\n '2020-08-26',\r\n '2020-08-27',\r\n '2020-08-28',\r\n '2020-08-31',\r\n '2020-09-01',\r\n '2020-09-02',\r\n '2020-09-03',\r\n '2020-09-04',\r\n '2020-09-07',\r\n '2020-09-08',\r\n '2020-09-09',\r\n '2020-09-10',\r\n '2020-09-11',\r\n '2020-09-14',\r\n '2020-09-15',\r\n '2020-09-16',\r\n '2020-09-17',\r\n '2020-09-18',\r\n '2020-09-21',\r\n '2020-09-22',\r\n '2020-09-23',\r\n '2020-09-24',\r\n '2020-09-25',\r\n '2020-09-28',\r\n '2020-09-29',\r\n '2020-09-30',\r\n '2020-10-09',\r\n '2020-10-12',\r\n '2020-10-13',\r\n '2020-10-14',\r\n '2020-10-15',\r\n '2020-10-16',\r\n '2020-10-19',\r\n '2020-10-20',\r\n '2020-10-21',\r\n '2020-10-22',\r\n '2020-10-23',\r\n '2020-10-26',\r\n '2020-10-27',\r\n '2020-10-28',\r\n '2020-10-29',\r\n '2020-10-30',\r\n '2020-11-02',\r\n '2020-11-03',\r\n '2020-11-04',\r\n '2020-11-05',\r\n '2020-11-06',\r\n '2020-11-09',\r\n '2020-11-10',\r\n '2020-11-11',\r\n '2020-11-12',\r\n '2020-11-13',\r\n '2020-11-16',\r\n '2020-11-17',\r\n '2020-11-18',\r\n '2020-11-19',\r\n '2020-11-20',\r\n '2020-11-23',\r\n '2020-11-24',\r\n '2020-11-25',\r\n '2020-11-26',\r\n '2020-11-27',\r\n '2020-11-30',\r\n '2020-12-01',\r\n '2020-12-02',\r\n '2020-12-03',\r\n '2020-12-04',\r\n '2020-12-07',\r\n '2020-12-08',\r\n '2020-12-09',\r\n '2020-12-10',\r\n '2020-12-11',\r\n '2020-12-14',\r\n '2020-12-15',\r\n '2020-12-16',\r\n '2020-12-17',\r\n '2020-12-18',\r\n '2020-12-21',\r\n '2020-12-22',\r\n '2020-12-23',\r\n '2020-12-24',\r\n '2020-12-25',\r\n '2020-12-28',\r\n '2020-12-29',\r\n '2020-12-30',\r\n '2020-12-31'\r\n]\r\n\r\ndef QA_util_format_date2str(cursor_date):\r\n \"\"\"\r\n 对输入日期进行格式化处理,返回格式为 \"%Y-%m-%d\" 格式字符串\r\n 支持格式包括:\r\n 1. str: \"%Y%m%d\" \"%Y%m%d%H%M%S\", \"%Y%m%d %H:%M:%S\",\r\n \"%Y-%m-%d\", \"%Y-%m-%d %H:%M:%S\", \"%Y-%m-%d %H%M%S\"\r\n 2. datetime.datetime\r\n 3. pd.Timestamp\r\n 4. int -> 自动在右边加 0 然后转换,譬如 '20190302093' --> \"2019-03-02\"\r\n\r\n :param cursor_date: str/datetime.datetime/int 日期或时间\r\n :return: str 返回字符串格式日期\r\n \"\"\"\r\n if isinstance(cursor_date, datetime.datetime):\r\n cursor_date = str(cursor_date)[:10]\r\n elif isinstance(cursor_date, str):\r\n try:\r\n cursor_date = str(pd.Timestamp(cursor_date))[:10]\r\n except:\r\n raise ValueError('请输入正确的日期格式, 建议 \"%Y-%m-%d\"')\r\n elif isinstance(cursor_date, int):\r\n cursor_date = str(pd.Timestamp(\"{:<014d}\".format(cursor_date)))[:10]\r\n else:\r\n raise ValueError('请输入正确的日期格式,建议 \"%Y-%m-%d\"')\r\n return cursor_date\r\n\r\n\r\ndef QA_util_get_next_trade_date(cursor_date, n=1):\r\n \"\"\"\r\n 得到下 n 个交易日 (不包含当前交易日)\r\n :param date:\r\n :param n:\r\n \"\"\"\r\n\r\n cursor_date = QA_util_format_date2str(cursor_date)\r\n if cursor_date in trade_date_sse:\r\n # 如果指定日期为交易日\r\n return QA_util_date_gap(cursor_date, n, \"gt\")\r\n real_pre_trade_date = QA_util_get_real_date(cursor_date)\r\n return QA_util_date_gap(real_pre_trade_date, n, \"gt\")\r\n\r\n\r\ndef QA_util_get_pre_trade_date(cursor_date, n=1):\r\n \"\"\"\r\n 得到前 n 个交易日 (不包含当前交易日)\r\n :param date:\r\n :param n:\r\n \"\"\"\r\n\r\n cursor_date = QA_util_format_date2str(cursor_date)\r\n if cursor_date in trade_date_sse:\r\n return QA_util_date_gap(cursor_date, n, \"lt\")\r\n real_aft_trade_date = QA_util_get_real_date(cursor_date)\r\n return QA_util_date_gap(real_aft_trade_date, n, \"lt\")\r\n\r\n\r\n\r\ndef QA_util_if_trade(day):\r\n '''\r\n '日期是否交易'\r\n 查询上面的 交易日 列表\r\n :param day: 类型 str eg: 2018-11-11\r\n :return: Boolean 类型\r\n '''\r\n if day in trade_date_sse:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\ndef QA_util_if_tradetime(\r\n _time=datetime.datetime.now(),\r\n market=MARKET_TYPE.STOCK_CN,\r\n code=None\r\n):\r\n '时间是否交易'\r\n _time = datetime.datetime.strptime(str(_time)[0:19], '%Y-%m-%d %H:%M:%S')\r\n if market is MARKET_TYPE.STOCK_CN:\r\n if QA_util_if_trade(str(_time.date())[0:10]):\r\n if _time.hour in [10, 13, 14]:\r\n return True\r\n elif _time.hour in [\r\n 9\r\n ] and _time.minute >= 15: # 修改成9:15 加入 9:15-9:30的盘前竞价时间\r\n return True\r\n elif _time.hour in [11] and _time.minute <= 30:\r\n return True\r\n else:\r\n return False\r\n else:\r\n return False\r\n elif market is MARKET_TYPE.FUTURE_CN: \r\n date_today=str(_time.date()) \r\n date_yesterday=str((_time-datetime.timedelta(days=1)).date()) \r\n \r\n is_today_open=QA_util_if_trade(date_today)\r\n is_yesterday_open=QA_util_if_trade(date_yesterday)\r\n \r\n #考虑周六日的期货夜盘情况\r\n if is_today_open==False: #可能是周六或者周日\r\n if is_yesterday_open==False or (_time.hour > 2 or _time.hour == 2 and _time.minute > 30):\r\n return False\r\n\r\n shortName = \"\" # i , p\r\n for i in range(len(code)):\r\n ch = code[i]\r\n if ch.isdigit(): # ch >= 48 and ch <= 57:\r\n break\r\n shortName += code[i].upper()\r\n\r\n period = [\r\n [9, 0, 10, 15],\r\n [10, 30, 11, 30],\r\n [13, 30, 15, 0]\r\n ]\r\n \r\n if (shortName in [\"IH\", 'IF', 'IC']):\r\n period = [\r\n [9, 30, 11, 30],\r\n [13, 0, 15, 0]\r\n ]\r\n elif (shortName in [\"T\", \"TF\"]):\r\n period = [\r\n [9, 15, 11, 30],\r\n [13, 0, 15, 15]\r\n ]\r\n \r\n if 0<=_time.weekday()<=4:\r\n for i in range(len(period)):\r\n p = period[i]\r\n if ((_time.hour > p[0] or (_time.hour == p[0] and _time.minute >= p[1])) and (_time.hour < p[2] or (_time.hour == p[2] and _time.minute < p[3]))):\r\n return True\r\n\r\n #最新夜盘时间表_2019.03.29\r\n nperiod = [\r\n [\r\n ['AU', 'AG', 'SC'],\r\n [21, 0, 2, 30] \r\n ],\r\n [\r\n ['CU', 'AL', 'ZN', 'PB', 'SN', 'NI'],\r\n [21, 0, 1, 0] \r\n ],\r\n [\r\n ['RU', 'RB', 'HC', 'BU','FU','SP'],\r\n [21, 0, 23, 0]\r\n ],\r\n [\r\n ['A', 'B', 'Y', 'M', 'JM', 'J', 'P', 'I', 'L', 'V', 'PP', 'EG', 'C', 'CS'],\r\n [21, 0, 23, 0]\r\n ],\r\n [\r\n ['SR', 'CF', 'RM', 'MA', 'TA', 'ZC', 'FG', 'IO', 'CY'],\r\n [21, 0, 23, 30]\r\n ],\r\n ]\r\n\r\n for i in range(len(nperiod)):\r\n for j in range(len(nperiod[i][0])):\r\n if nperiod[i][0][j] == shortName:\r\n p = nperiod[i][1]\r\n condA = _time.hour > p[0] or (_time.hour == p[0] and _time.minute >= p[1])\r\n condB = _time.hour < p[2] or (_time.hour == p[2] and _time.minute < p[3])\r\n # in one day\r\n if p[2] >= p[0]:\r\n if ((_time.weekday() >= 0 and _time.weekday() <= 4) and condA and condB):\r\n return True\r\n else:\r\n if (((_time.weekday() >= 0 and _time.weekday() <= 4) and condA) or ((_time.weekday() >= 1 and _time.weekday() <= 5) and condB)):\r\n return True\r\n return False\r\n return False\r\n\r\n\r\ndef QA_util_get_next_day(date, n=1):\r\n '''\r\n 得到下一个(n)交易日\r\n :param date: 类型 str eg: 2018-11-11\r\n :param n: 整形\r\n :return: 字符串 str eg: 2018-11-12\r\n '''\r\n date = str(date)[0:10]\r\n return QA_util_date_gap(date, n, 'gt')\r\n\r\n\r\ndef QA_util_get_last_day(date, n=1):\r\n '''\r\n 得到上一个(n)交易日\r\n :param date: 类型 str eg: 2018-11-11\r\n :param n: 整形\r\n :return: 字符串 str eg: 2018-11-10\r\n '''\r\n date = str(date)[0:10]\r\n return QA_util_date_gap(date, n, 'lt')\r\n\r\n\r\ndef QA_util_get_last_datetime(datetime, day=1):\r\n date = str(datetime)[0:10]\r\n return '{} {}'.format(QA_util_date_gap(date, day, 'lt'), str(datetime)[11:])\r\n\r\n\r\ndef QA_util_get_next_datetime(datetime, day=1):\r\n date = str(datetime)[0:10]\r\n return '{} {}'.format(QA_util_date_gap(date, day, 'gt'), str(datetime)[11:])\r\n\r\n\r\ndef QA_util_get_real_date(date, trade_list=trade_date_sse, towards=-1):\r\n \"\"\"\r\n 获取真实的交易日期,其中,第三个参数towards是表示向前/向后推\r\n towards=1 日期向后迭代\r\n towards=-1 日期向前迭代\r\n @ yutiansut\r\n\r\n \"\"\"\r\n date = str(date)[0:10]\r\n if towards == 1:\r\n while date not in trade_list:\r\n date = str(\r\n datetime.datetime.strptime(str(date)[0:10],\r\n '%Y-%m-%d') +\r\n datetime.timedelta(days=1)\r\n )[0:10]\r\n else:\r\n return str(date)[0:10]\r\n elif towards == -1:\r\n while date not in trade_list:\r\n date = str(\r\n datetime.datetime.strptime(str(date)[0:10],\r\n '%Y-%m-%d') -\r\n datetime.timedelta(days=1)\r\n )[0:10]\r\n else:\r\n return str(date)[0:10]\r\n\r\n\r\ndef QA_util_get_real_datelist(start, end):\r\n \"\"\"\r\n 取数据的真实区间,返回的时候用 start,end=QA_util_get_real_datelist\r\n @yutiansut\r\n 2017/8/10\r\n\r\n 当start end中间没有交易日 返回None, None\r\n @yutiansut/ 2017-12-19\r\n \"\"\"\r\n real_start = QA_util_get_real_date(start, trade_date_sse, 1)\r\n real_end = QA_util_get_real_date(end, trade_date_sse, -1)\r\n if trade_date_sse.index(real_start) > trade_date_sse.index(real_end):\r\n return None, None\r\n else:\r\n return (real_start, real_end)\r\n\r\n\r\ndef QA_util_get_trade_range(start, end):\r\n '给出交易具体时间'\r\n start, end = QA_util_get_real_datelist(start, end)\r\n if start is not None:\r\n return trade_date_sse[trade_date_sse\r\n .index(start):trade_date_sse.index(end) + 1:1]\r\n else:\r\n return None\r\n\r\n\r\ndef QA_util_get_trade_gap(start, end):\r\n '返回start_day到end_day中间有多少个交易天 算首尾'\r\n start, end = QA_util_get_real_datelist(start, end)\r\n if start is not None:\r\n return trade_date_sse.index(end) + 1 - trade_date_sse.index(start)\r\n else:\r\n return 0\r\n\r\n\r\ndef QA_util_date_gap(date, gap, methods):\r\n '''\r\n :param date: 字符串起始日 类型 str eg: 2018-11-11\r\n :param gap: 整数 间隔多数个交易日\r\n :param methods: gt大于 ,gte 大于等于, 小于lt ,小于等于lte , 等于===\r\n :return: 字符串 eg:2000-01-01\r\n '''\r\n try:\r\n if methods in ['>', 'gt']:\r\n return trade_date_sse[trade_date_sse.index(date) + gap]\r\n elif methods in ['>=', 'gte']:\r\n return trade_date_sse[trade_date_sse.index(date) + gap - 1]\r\n elif methods in ['<', 'lt']:\r\n return trade_date_sse[trade_date_sse.index(date) - gap]\r\n elif methods in ['<=', 'lte']:\r\n return trade_date_sse[trade_date_sse.index(date) - gap + 1]\r\n elif methods in ['==', '=', 'eq']:\r\n return date\r\n\r\n except:\r\n return 'wrong date'\r\n\r\n\r\ndef QA_util_get_trade_datetime(dt=datetime.datetime.now()):\r\n \"\"\"交易的真实日期\r\n\r\n Returns:\r\n [type] -- [description]\r\n \"\"\"\r\n\r\n #dt= datetime.datetime.now()\r\n\r\n if QA_util_if_trade(str(dt.date())) and dt.time() < datetime.time(15, 0, 0):\r\n return str(dt.date())\r\n else:\r\n return QA_util_get_real_date(str(dt.date()), trade_date_sse, 1)\r\n\r\n\r\ndef QA_util_get_order_datetime(dt):\r\n \"\"\"委托的真实日期\r\n\r\n Returns:\r\n [type] -- [description]\r\n \"\"\"\r\n\r\n #dt= datetime.datetime.now()\r\n dt = datetime.datetime.strptime(str(dt)[0:19], '%Y-%m-%d %H:%M:%S')\r\n\r\n if QA_util_if_trade(str(dt.date())) and dt.time() < datetime.time(15, 0, 0):\r\n return str(dt)\r\n else:\r\n # print('before')\r\n # print(QA_util_date_gap(str(dt.date()),1,'lt'))\r\n return '{} {}'.format(\r\n QA_util_date_gap(str(dt.date()),\r\n 1,\r\n 'lt'),\r\n dt.time()\r\n )\r\n\r\n\r\ndef QA_util_future_to_tradedatetime(real_datetime):\r\n \"\"\"输入是真实交易时间,返回按期货交易所规定的时间* 适用于tb/文华/博弈的转换\r\n\r\n Arguments:\r\n real_datetime {[type]} -- [description]\r\n\r\n Returns:\r\n [type] -- [description]\r\n \"\"\"\r\n if len(str(real_datetime)) >= 19:\r\n dt = datetime.datetime.strptime(\r\n str(real_datetime)[0:19],\r\n '%Y-%m-%d %H:%M:%S'\r\n )\r\n return dt if dt.time(\r\n ) < datetime.time(21,\r\n 0) else QA_util_get_next_datetime(dt,\r\n 1)\r\n elif len(str(real_datetime)) == 16:\r\n dt = datetime.datetime.strptime(\r\n str(real_datetime)[0:16],\r\n '%Y-%m-%d %H:%M'\r\n )\r\n return dt if dt.time(\r\n ) < datetime.time(21,\r\n 0) else QA_util_get_next_datetime(dt,\r\n 1)\r\n\r\n\r\ndef QA_util_future_to_realdatetime(trade_datetime):\r\n \"\"\"输入是交易所规定的时间,返回真实时间*适用于通达信的时间转换\r\n\r\n Arguments:\r\n trade_datetime {[type]} -- [description]\r\n\r\n Returns:\r\n [type] -- [description]\r\n \"\"\"\r\n if len(str(trade_datetime)) == 19:\r\n dt = datetime.datetime.strptime(\r\n str(trade_datetime)[0:19],\r\n '%Y-%m-%d %H:%M:%S'\r\n )\r\n return dt if dt.time(\r\n ) < datetime.time(21,\r\n 0) else QA_util_get_last_datetime(dt,\r\n 1)\r\n elif len(str(trade_datetime)) == 16:\r\n dt = datetime.datetime.strptime(\r\n str(trade_datetime)[0:16],\r\n '%Y-%m-%d %H:%M'\r\n )\r\n return dt if dt.time(\r\n ) < datetime.time(21,\r\n 0) else QA_util_get_last_datetime(dt,\r\n 1)\r\n" ]
[ [ "pandas.Timestamp" ] ]
yanfei-zhang-95/CT-Recon_
[ "bfe1d9d0b269129847fd8ecf0b654ebda6b15ec4" ]
[ "model.py" ]
[ "import numpy as np\nimport tensorflow as tf\nimport tensorlayer as tl\n\nimport tensorflow.keras as keras\n\nclass modelComponents():\n\n @staticmethod\n def tensorial_neural_block(x, new_shape1, new_shape2, new_shape3, scope, actv = tf.nn.sigmoid, isIN = True, isActv = True):\n\n\n with tf.variable_scope(scope):\n\n [N, H, W, C] = x.get_shape().as_list()\n var1 = tf.Variable(initial_value = tf.random_normal(shape=[N, H, C, new_shape3], mean=0, stddev=1), name = 'var1')\n x = tf.matmul(x, var1)\n x = tf.transpose(x, [0, 1, 3, 2])\n\n [N, H, W, C] = x.get_shape().as_list()\n var2 = tf.Variable(initial_value = tf.random_normal(shape=[N, H, C, new_shape2], mean=0, stddev=1), name = 'var2')\n x = tf.matmul(x, var2)\n x = tf.transpose(x, [0, 3, 2, 1])\n\n [N, H, W, C] = x.get_shape().as_list()\n var3 = tf.Variable(initial_value = tf.random_normal(shape=[N, H, C, new_shape1], mean=0, stddev=1), name = 'var3')\n x = tf.matmul(x, var3)\n x = tf.transpose(x, [0, 3, 1, 2])\n\n # x = tf.matmul(x, var1)\n # x = tf.tensordot(x, var2, axes = 2)\n # x = tf.tensordot(x, var3, axes = 3)\n\n if isIN:\n x = tf.contrib.layers.instance_norm(x)\n\n if isActv:\n x = actv(x)\n\n return x\n\n @staticmethod\n def conv2d_block(x, filters, scope, pad = 1, kernels = (3, 3), strides = (2, 2), actv = tf.nn.relu, isIN = True, isActv = True, padding = 'VALID'):\n\n x = tf.pad(x, paddings = ([0, 0], [pad, pad], [pad, pad], [0, 0]), mode = 'REFLECT')\n conv1 = tf.layers.conv2d(inputs = x, filters = filters, kernel_size = kernels, strides = strides, padding = padding,\n use_bias = True, name = scope)\n if isIN == True:\n conv1 = tf.contrib.layers.instance_norm(conv1)\n\n if isActv == True:\n conv1 = actv(conv1)\n return conv1\n\n @staticmethod\n def pooling2d_block(x, pool_size = (2, 2), strides = (2, 2)):\n pool1 = tf.layers.MaxPooling2D(pool_size = pool_size, strides = strides)(x)\n return pool1\n\n @staticmethod\n def convLSTM2D(x, filter, scope, seq = False, pad = 1, actv = tf.nn.leaky_relu, kernels = (3, 3), strides = (2, 2), isIN = True, isActv = True):\n\n [N, H, W, C] = x.get_shape().as_list()\n\n x = tf.transpose(x, (0, 3, 1, 2))\n x = x[:, :, :, :, tf.newaxis]\n\n x = tf.pad(x, [[0, 0], [0, 0], [pad, pad], [pad, pad], [0, 0]], mode = 'REFLECT')\n input_shape = x.get_shape().as_list()\n conv1 = keras.layers.ConvLSTM2D(filters = filter, kernel_size = kernels, strides = strides, data_format = 'channels_last'\n , input_shape = input_shape, padding = 'VALID', return_sequences = seq, name = scope)(x)\n if isIN is True:\n conv1 = tf.contrib.layers.instance_norm(conv1)\n\n if isActv is True:\n conv1 = actv(conv1)\n\n if seq is True:\n conv1 = tf.transpose(conv1, (0, 2, 3, 1, 4))\n conv1 = tf.reshape(conv1, shape = [N, H, W, C])\n\n return conv1\n\n @staticmethod\n def upsampling2d_block(x, filter, scope, kernels=(3, 3), strides=(2, 2), padding='SAME', actv=tf.nn.relu):\n\n up1 = tf.layers.conv2d_transpose(inputs = x, filters = filter, kernel_size = kernels, strides = strides, padding = padding,\n use_bias = True, name = scope+'_conv_transpose')\n up1 = tf.contrib.layers.instance_norm(up1)\n up1 = actv(up1)\n return up1\n\n @staticmethod\n def res2d_block(x, scope, filters):\n padded = tf.pad(x, paddings = ([0, 0], [1, 1], [1, 1], [0, 0]), mode = 'REFLECT')\n conv1 = tf.layers.conv2d(inputs = padded, filters = filters, kernel_size = (3, 3), strides = (1, 1), padding = 'VALID',\n name = scope+'conv1')\n conv1 = tf.contrib.layers.instance_norm(conv1)\n conv1 = tf.nn.relu(conv1)\n\n conv1 = tf.pad(conv1, paddings = ([0, 0], [1, 1], [1, 1], [0, 0]), mode = 'REFLECT')\n conv2 = tf.layers.conv2d(inputs = conv1, filters = filters, kernel_size = (3, 3), strides = (1, 1), padding = 'VALID',\n name = scope+'conv2')\n conv2 = tf.contrib.layers.instance_norm(conv2)\n\n return x + conv2\n\n @staticmethod\n def dense_block(x, layer_filters, final_filters, dense_layers, scope, actv = tf.nn.relu, final_actv = tf.nn.relu, reuse = False):\n comp = modelComponents()\n\n pre_input = x\n # pre_input = tf.nn.relu(pre_input)\n # pre_input = tf.contrib.layers.instance_norm(pre_input)\n\n with tf.variable_scope(scope, reuse = reuse):\n for i in range(dense_layers):\n\n dense = comp.conv2d_block(x=pre_input, filters=layer_filters, strides=(1, 1), actv = actv, scope = 'dense%i'%i)\n pre_input = tf.concat([pre_input, dense], axis=-1)\n\n output = comp.conv2d_block(x=pre_input, filters=final_filters, strides=(1, 1), actv = final_actv, scope='output')\n\n return output\n\n @staticmethod\n def gaussian_kernel(size=3, sigma=1.5):\n x_points = np.arange(-(size - 1) // 2, (size - 1) // 2 + 1, 1)\n y_points = x_points[::-1]\n xs, ys = np.meshgrid(x_points, y_points)\n kernel = np.exp(-(xs ** 2 + ys ** 2) / (2 * sigma ** 2)) / (2 * np.pi * sigma ** 2)\n return kernel / kernel.sum()\n\n @staticmethod\n def laplacian_kernel():\n return tf.constant([[0.0, -1.0, 0.0], [-1.0, 4.0, -1.0], [0.0, -1.0, 0.0]])\n\n @staticmethod\n def matlab_style_gauss2D(shape=(3,3),sigma=0.5):\n\n m,n = [(ss-1.)/2. for ss in shape]\n y,x = np.ogrid[-m:m+1,-n:n+1]\n h = np.exp( -(x*x + y*y) / (2.*sigma*sigma) )\n h[ h < np.finfo(h.dtype).eps*h.max() ] = 0\n sumh = h.sum()\n if sumh != 0:\n h /= sumh\n return tf.convert_to_tensor(h, dtype = tf.float32)\n\n @staticmethod\n def filter2(x, kernel, mode='SAME'):\n return tf.nn.conv2d(x, tf.image.rot90(kernel, 2), strides = [1, 1, 1, 1], padding=mode)\n\nclass modelStructure():\n\n @staticmethod\n def dualAttModule(x, param_h, param_v, scope, reuse = False):\n\n param_softmax_v = param_v\n param_softmax_h = param_h\n\n [N, H, W, C] = x.get_shape().as_list()\n\n with tf.variable_scope('%s_vertical_att'%scope, reuse = reuse):\n att_v = tf.layers.conv2d(x, kernel_size = (1, 1), filters = C, name = 'conv_att_v')\n att_v_reshaped = tf.reshape(att_v, shape = [N, H, W, C], name = 'conv_att_v_reshaped')\n\n att_v_reshaped = att_v_reshaped/param_softmax_v\n att_v_softmax = tf.nn.softmax(logits = att_v_reshaped, axis = 1, name = 'conv_att_v_softmax')\n\n att_v_softmax_max = tf.reduce_max(att_v_softmax, axis = 1, name = 'conv_att_v_softmax_max')\n\n att_v_softmax_norm = att_v_softmax/att_v_softmax_max\n\n x = x * att_v_softmax_norm\n x_reshaped = tf.reshape(tensor=x, shape=[N, H, W, C])\n\n with tf.variable_scope('%s_horizontal_att'%scope, reuse = reuse):\n att_h = tf.layers.conv2d(x_reshaped, kernel_size = (1, 1), filters = C, name = 'conv_att_h')\n\n att_h_flatten = tf.reshape(att_h, shape = [N, H*W, C], name = 'conv_att_h_flatten')\n att_h_flatten = att_h_flatten/param_softmax_h\n\n att_h_flatten_softmax = tf.nn.softmax(logits = att_h_flatten, axis = -1, name = 'conv_att_h_flatten_softmax')\n att_h_flatten_softmax_max = tf.reduce_max(att_h_flatten_softmax, axis = -1, name = 'conv_att_h_flatten_softmax_max')\n att_h_flatten_softmax_max = tf.reshape(att_h_flatten_softmax_max, shape = [N, H*W, 1])\n\n att_h_softmax_norm = att_h_flatten_softmax/att_h_flatten_softmax_max\n att_h_softmax_norm_reshaped = tf.reshape(tensor=att_h_softmax_norm, shape=[N, H, W, C])\n\n att_h = att_h * att_h_softmax_norm_reshaped\n\n att_reshaped = tf.reshape(tensor=att_h, shape=[N, H, W, C])\n\n return att_reshaped\n\n @staticmethod\n def dense_net(x, scope, final_filter = 1, reuse = False):\n\n comp = modelComponents()\n structure = modelStructure()\n\n with tf.variable_scope(scope, reuse = reuse):\n dense1 = comp.dense_block(x, layer_filters = 10, final_filters = 32, dense_layers = 20, scope = 'dense_net1', reuse = reuse)\n dense1 = comp.conv2d_block(dense1, filters = 16, pad = 1, kernels = (3, 3), strides = (1, 1), scope = 'dense_net1_compress', actv = tf.nn.leaky_relu)\n\n dense2 = comp.dense_block(dense1, layer_filters=10, final_filters = 64, dense_layers = 20, scope = 'dense_net2', reuse = reuse)\n dense2 = comp.conv2d_block(dense2, filters=32, pad=1, kernels=(3, 3), strides=(1, 1), scope='dense_net2_compress', actv = tf.nn.leaky_relu)\n\n dense3 = comp.dense_block(dense2, layer_filters=10, final_filters = 128, dense_layers = 20, scope = 'dense_net3', reuse = reuse)\n dense3 = comp.conv2d_block(dense3, filters=64, pad=1, kernels=(3, 3), strides=(1, 1), scope='dense_net3_compress', actv = tf.nn.leaky_relu)\n\n dense6 = comp.dense_block(dense3, layer_filters=5, final_filters = 1024, dense_layers = 20, scope = 'dense_net6', reuse = reuse)\n dense6 = comp.conv2d_block(dense6, filters=final_filter, pad=1, kernels=(3, 3), strides=(1, 1), scope='dense_net6_compress', isIN = False, isActv = False)\n # dense6 = comp.pooling2d_block(dense6) #h/32, w/32\n\n return tf.nn.tanh(dense6)\n\n @staticmethod\n def unet_generator(x, scope, final_actv = None, reuse = False):\n\n comp = modelComponents()\n\n with tf.variable_scope(scope, reuse = reuse):\n conv1_1 = comp.conv2d_block(x, filters=64, pad = 3, kernels = (7, 7), strides = (1, 1), scope = 'conv1_1') # 256\n conv2_1 = comp.conv2d_block(conv1_1, filters = 64, pad = 1, kernels = (3, 3), scope = 'conv2_1') # 128\n conv3_1 = comp.conv2d_block(conv2_1, filters = 256, pad = 1, kernels = (3, 3), scope = 'conv3_1') # 64\n conv4_1 = comp.conv2d_block(conv3_1, filters = 512, pad = 1, kernels = (3, 3), scope = 'conv4_1') # 32\n\n res = conv4_1\n for i in range(1, 10):\n res = comp.res2d_block(res, scope = 'res_%i'%i, filters = 512)\n\n # Conv_transpose\n res = tf.concat([res, conv4_1], axis = -1)\n deconv1_1 = comp.upsampling2d_block(res, filter=256, scope='deconv1_1', actv = tf.nn.leaky_relu) # 64\n deconv1_1 = tf.concat([deconv1_1, conv3_1], axis = -1)\n deconv2_1 = comp.upsampling2d_block(deconv1_1, filter=128, scope='deconv2_1', actv = tf.nn.leaky_relu) # 128\n deconv2_1 = tf.concat([deconv2_1, conv2_1], axis = -1)\n deconv3_1 = comp.upsampling2d_block(deconv2_1, filter=64, scope='deconv3_1', actv = tf.nn.leaky_relu) # 256\n deconv3_1 = tf.concat([deconv3_1, conv1_1], axis = -1)\n\n # large_output = comp.tensorial_neural_block(x = deconv2_1, new_shape1 = 256, new_shape2 = 256, new_shape3 = 1, scope = 'large_output', isIN = False, isActv = False)\n large_output = comp.conv2d_block(deconv3_1, filters = 1, pad = 3, kernels = (7, 7), strides = (1, 1), scope = 'large_output', isIN = False, isActv = False)\n\n if final_actv is not None:\n large_output = final_actv(large_output)\n\n return large_output\n\n @staticmethod\n def generator(x, scope, final_layer = True, reuse = False):\n\n comp = modelComponents()\n\n with tf.variable_scope(scope, reuse = reuse):\n conv1_1 = comp.conv2d_block(x, filters=64, pad = 3, kernels = (7, 7), strides = (1, 1), scope = 'conv1_1')\n conv2_1 = comp.conv2d_block(conv1_1, filters = 64, pad = 1, kernels = (3, 3), scope = 'conv2_1')\n conv3_1 = comp.conv2d_block(conv2_1, filters = 256, pad = 1, kernels = (3, 3), scope = 'conv3_1')\n\n res = conv3_1\n for i in range(1, 10):\n res = comp.res2d_block(res, scope = 'res_%i'%i, filters = 256)\n\n # Conv_transpose\n deconv1_1 = comp.upsampling2d_block(res, filter=128, scope='deconv1_1', actv = tf.nn.leaky_relu)\n deconv2_1 = comp.upsampling2d_block(deconv1_1, filter=64, scope='deconv2_1', actv = tf.nn.leaky_relu)\n large_output = comp.conv2d_block(deconv2_1, filters=1, pad=3, kernels=(7, 7), strides=(1, 1), scope='large_output', isIN=False, isActv=False)\n\n\n if final_layer:\n large_output_after = tf.nn.tanh(large_output)\n\n return large_output_after\n\n @staticmethod\n def discriminator(x, scope, reuse = False):\n\n comp = modelComponents()\n with tf.variable_scope(scope, reuse=reuse):\n dis1 = comp.conv2d_block(x, filters=8, pad=1, kernels=(3, 3), strides=(2, 2), scope='dis1', isIN=False, actv=tf.nn.leaky_relu)\n dis2 = comp.conv2d_block(dis1, filters=32, pad=1, kernels=(3, 3), strides=(2, 2), scope='dis2', actv=tf.nn.leaky_relu)\n dis3 = comp.conv2d_block(dis2, filters=64, pad=1, kernels=(3, 3), strides=(2, 2), scope='dis3', actv=tf.nn.leaky_relu)\n dis4 = comp.conv2d_block(dis3, filters=1, pad=1, kernels=(3, 3), strides=(2, 2), scope='dis4', isIN=False, isActv=False)\n\n output = tf.nn.sigmoid(dis4)\n\n return output\n\n @staticmethod\n def feature_matching_discriminator(x, scope, reuse = False):\n\n comp = modelComponents()\n with tf.variable_scope(scope, reuse=reuse):\n dis1 = comp.conv2d_block(x, filters=8, pad=1, kernels=(3, 3), strides=(2, 2), scope='dis1', isIN=False, actv=tf.nn.leaky_relu)\n dis2 = comp.conv2d_block(dis1, filters=32, pad=1, kernels=(3, 3), strides=(2, 2), scope='dis2', actv=tf.nn.leaky_relu)\n dis3 = comp.conv2d_block(dis2, filters=64, pad=1, kernels=(3, 3), strides=(2, 2), scope='dis3', actv=tf.nn.leaky_relu)\n dis4 = comp.conv2d_block(dis3, filters=1, pad=1, kernels=(3, 3), strides=(2, 2), scope='dis4', isIN=False, isActv=False)\n\n output = tf.nn.sigmoid(dis4)\n\n return output, [dis1, dis2, dis3, dis4]\n\n @staticmethod\n def GAN_Loss(D_real, D_fake):\n D_loss = tf.reduce_mean(tf.square(D_real - 1.)) + tf.reduce_mean(tf.square(D_fake))\n G_loss = tf.reduce_mean(tf.square(D_fake - 1.))\n return D_loss, G_loss\n\n @staticmethod\n def abs_loss(true, pred, mask = None):\n if mask is not None:\n return tf.reduce_sum(mask*true-mask*pred)/tf.reduce_sum(mask)\n else:\n return tf.losses.absolute_difference(true, pred)\n\n @staticmethod\n # Laplician of Gaussian Losses that exerts the importances of Boundary\n def LoG_Loss(gt, gen, mask = None, delta = 0.01, abs = False):\n comp = modelComponents()\n structure = modelStructure()\n\n # Make Gaussian Kernel with desired specs.\n gauss_kernel = comp.gaussian_kernel()\n laplace_kernel = comp.laplacian_kernel()\n\n # Expand dimensions of `gauss_kernel` for `tf.nn.conv3d` signature.\n gauss_kernel_new = gauss_kernel[:, :, tf.newaxis, tf.newaxis]\n laplace_kernel_new = laplace_kernel[:, :, tf.newaxis, tf.newaxis]\n\n # Convolve.\n gt = tf.nn.conv2d(gt, gauss_kernel_new, strides=[1, 1, 1, 1], padding=\"SAME\")\n gt = tf.nn.conv2d(gt, laplace_kernel_new, strides=[1, 1, 1, 1], padding=\"SAME\")\n\n gen = tf.nn.conv2d(gen, gauss_kernel_new, strides=[1, 1, 1, 1], padding=\"SAME\")\n gen = tf.nn.conv2d(gen, laplace_kernel_new, strides=[1, 1, 1, 1], padding=\"SAME\")\n\n # Abs loss\n if abs is True:\n if mask is not None:\n return structure.abs_loss(gt, gen, mask)\n else:\n return structure.abs_loss(gt, gen)\n\n @staticmethod\n def feature_matching_loss(fea_real, fea_fake, delta = 0.01):\n structure = modelStructure()\n loss = 0\n for i in range(len(fea_fake)):\n loss += tf.reduce_mean(structure.abs_loss(fea_fake[i], fea_real[i]))\n return loss\n\n @staticmethod\n def huber(true, pred, delta=0.01):\n loss = tf.where(tf.abs(true - pred) < delta, 0.5 * ((true - pred) ** 2), delta * tf.abs(true - pred) - 0.5 * (delta ** 2))\n return tf.reduce_sum(loss)\n\n @staticmethod\n def metric_func(im1, im2):\n\n comp = modelComponents()\n\n k1 = 0.01\n k2 = 0.03\n win_size = 11\n L = 1\n\n [N, H, W, C] = im1.get_shape().as_list()\n # im1 = tf.reshape(im1, [H, W])\n # im2 = tf.reshape(im2, [H, W])\n #\n # M, N = im1.shape\n C1 = (k1 * L) ** 2\n C2 = (k2 * L) ** 2\n window = comp.matlab_style_gauss2D(shape=(win_size, win_size), sigma=1.5)\n window = window / tf.reduce_sum(tf.reduce_sum(window))\n\n window = window[:, :, tf.newaxis, tf.newaxis]\n\n if im1.dtype == tf.uint8:\n im1 = tf.double(im1)\n if im2.dtype == tf.uint8:\n im2 = tf.double(im2)\n\n mu1 = comp.filter2(im1, window, 'VALID')\n mu2 = comp.filter2(im2, window, 'VALID')\n mu1_sq = mu1 * mu1\n mu2_sq = mu2 * mu2\n mu1_mu2 = mu1 * mu2\n sigma1_sq = comp.filter2(im1 * im1, window, 'VALID') - mu1_sq\n sigma2_sq = comp.filter2(im2 * im2, window, 'VALID') - mu2_sq\n sigmal2 = comp.filter2(im1 * im2, window, 'VALID') - mu1_mu2\n\n ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigmal2 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2))\n\n return tf.reduce_mean(tf.reduce_mean(ssim_map))\n\n\n @staticmethod\n def basic_model(X):\n comp = modelComponents()\n structure = modelStructure()\n\n y_basic = structure.generator(x = X, scope = 'gen', reuse = False)\n basic_var = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope = 'gen')\n\n return y_basic, basic_var\n\n @staticmethod\n def structured_model(X, y, body_inputs, true_body, vessel_inputs, true_vessel, y_basic, body_mask, vessel_mask, old_window, body_window, vessel_window):\n comp = modelComponents()\n structure = modelStructure()\n\n [N, H, W, C] = X.get_shape().as_list()\n\n with tf.variable_scope('model'):\n\n # Prepare Training Materials\n pred_vessel_basic = y_basic * (-vessel_mask + 1) # -1~1\n pred_body_basic = y_basic * body_mask #-1~1\n background = y_basic - pred_body_basic - pred_vessel_basic #-1~1\n\n # Refined Vessel & Refined Body\n refined_vessel = structure.unet_generator(vessel_inputs, scope = 'further_refinement_vessel', final_actv = tf.nn.tanh, reuse = False) # -1~1\n # refined_body = structure.dense_net(vessel_inputs, final_filter = 1, scope = 'further_refinement_vessel', reuse = False) # -1~1\n refined_body = structure.dense_net(body_inputs, final_filter = 1, scope = 'further_refinement_body', reuse = False) # -1~1\n\n # Adjust them to the original window [-1024, 3071] for comparison\n refined_body_adj = (refined_body+1)/2*(body_window[1] - body_window[0]) + body_window[0]\n refined_body_adj = (refined_body_adj - old_window[0])/(old_window[1] - old_window[0]) *2 - 1\n\n refined_vessel_adj = (refined_vessel+1)/2*(vessel_window[1] - vessel_window[0]) + vessel_window[0]\n refined_vessel_adj = (refined_vessel_adj - old_window[0])/(old_window[1] - old_window[0]) *2 - 1\n\n # Final pred after fusion\n final_pred = refined_body_adj * body_mask + refined_vessel_adj*(-vessel_mask + 1) + background # -1~1\n\n # Loss Function\n Loss_vessel_abs = tf.losses.absolute_difference(true_vessel*(1-vessel_mask), refined_vessel*(1-vessel_mask)) # -1~1 and -1~1\n Loss_body_abs = tf.losses.absolute_difference(true_body*body_mask, refined_body*body_mask) # -1~1 and -1~1\n\n # Loss_vessel_huber = structure.huber(true_vessel*(1-vessel_mask), refined_vessel*(1-vessel_mask))\n # Loss_body_huber = structure.huber(true_body*body_mask, refined_body*body_mask)\n\n Loss_img_ssim = - structure.metric_func(y, final_pred) # -1~1 and -1~1\n\n Losses = Loss_vessel_abs + Loss_body_abs + Loss_img_ssim\n # Losses = Loss_vessel_abs + Loss_body_abs\n\n var = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope = 'model')\n\n return Losses, var, final_pred, y_basic, refined_body*body_mask, true_body*body_mask" ]
[ [ "tensorflow.convert_to_tensor", "tensorflow.concat", "tensorflow.reduce_sum", "tensorflow.losses.absolute_difference", "tensorflow.image.rot90", "tensorflow.layers.conv2d_transpose", "tensorflow.pad", "tensorflow.keras.layers.ConvLSTM2D", "numpy.exp", "tensorflow.nn.conv2d", "tensorflow.get_collection", "numpy.arange", "numpy.finfo", "tensorflow.square", "tensorflow.layers.conv2d", "tensorflow.matmul", "tensorflow.nn.sigmoid", "tensorflow.double", "tensorflow.nn.tanh", "tensorflow.contrib.layers.instance_norm", "numpy.meshgrid", "tensorflow.layers.MaxPooling2D", "tensorflow.nn.relu", "tensorflow.reduce_max", "tensorflow.nn.softmax", "tensorflow.transpose", "tensorflow.constant", "tensorflow.reduce_mean", "tensorflow.reshape", "tensorflow.variable_scope", "tensorflow.abs", "tensorflow.random_normal" ] ]
my-isu-research/CutMix-PyTorch
[ "00870a196a39c6061c97972d797d34886589439d" ]
[ "extras.py" ]
[ "import torchvision.transforms as transforms\nimport os\nimport torchvision.datasets as datasets\nimport torch.nn as nn\n\ndef get_base_transform(img_size = 32,\n mean = [x / 255.0 for x in [125.3, 123.0, 113.9]],\n std = [x / 255.0 for x in [63.0, 62.1, 66.7]]):\n\n normalize = transforms.Normalize(mean, std)\n\n transform_train = transforms.Compose([\n transforms.RandomCrop(img_size, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n ])\n\n transform_test = transforms.Compose([\n transforms.RandomCrop(img_size, padding=4),\n transforms.ToTensor(),\n normalize\n ])\n\n return transform_train, transform_test\n\ndef get_train_test_dataset(dataset, train_transform, test_transform):\n trainset = datasets.ImageFolder(os.path.join(dataset, 'train'), transform=train_transform)\n testset = datasets.ImageFolder(os.path.join(dataset, 'test'), transform=test_transform)\n\n return trainset, testset\n\ndef update_resnet18_no_of_classes(resnet18_model, no_of_classes):\n num_ftrs = resnet18_model.linear.in_features\n resnet18_model.linear = nn.Linear(num_ftrs, no_of_classes)\n\n return resnet18_model\n" ]
[ [ "torch.nn.Linear" ] ]
j-zimmermann/pyABC
[ "fde3f97978297ad7a09528d1e18ff3580c91e8d9" ]
[ "pyabc/sampler/redis_eps/cli.py" ]
[ "import sys\nimport socket\nimport signal\nfrom redis import StrictRedis\nimport pickle\nimport os\nimport cloudpickle\nfrom time import time\nimport click\nfrom .redis_logging import logger\nfrom .cmd import (N_EVAL, N_ACC, N_REQ, ALL_ACCEPTED,\n N_WORKER, SSA, QUEUE, START, STOP,\n MSG, BATCH_SIZE)\nfrom multiprocessing import Pool\nimport numpy as np\nimport random\n\n\nTIMES = {\"s\": 1,\n \"m\": 60,\n \"h\": 3600,\n \"d\": 24*3600}\n\n\ndef runtime_parse(s):\n unit = TIMES[s[-1].lower()]\n nr = float(s[:-1])\n return unit * nr\n\n\nclass KillHandler:\n def __init__(self):\n self.killed = False\n self.exit = True\n signal.signal(signal.SIGTERM, self.handle)\n signal.signal(signal.SIGINT, self.handle)\n\n def handle(self, *args):\n self.killed = True\n if self.exit:\n sys.exit(0)\n\n\ndef work_on_population(redis: StrictRedis,\n start_time: int,\n max_runtime_s: int,\n kill_handler: KillHandler):\n \"\"\"\n Here the actual sampling happens.\n \"\"\"\n\n # set timers\n population_start_time = time()\n cumulative_simulation_time = 0\n\n # read from pipeline\n pipeline = redis.pipeline()\n # extract bytes\n ssa_b, batch_size_b, all_accepted_b, n_req_b, n_acc_b \\\n = (pipeline.get(SSA).get(BATCH_SIZE)\n .get(ALL_ACCEPTED).get(N_REQ).get(N_ACC).execute())\n\n if ssa_b is None:\n return\n\n kill_handler.exit = False\n\n if n_acc_b is None:\n return\n\n # convert from bytes\n simulate_one, sample_factory = pickle.loads(ssa_b)\n batch_size = int(batch_size_b.decode())\n all_accepted = bool(int(all_accepted_b.decode()))\n n_req = int(n_req_b.decode())\n\n # notify sign up as worker\n n_worker = redis.incr(N_WORKER)\n logger.info(\n f\"Begin population, batch size {batch_size}. \"\n f\"I am worker {n_worker}\")\n\n # counter for number of simulations\n internal_counter = 0\n\n # create empty sample\n sample = sample_factory()\n\n # loop until no more particles required\n while int(redis.get(N_ACC).decode()) < n_req \\\n and (not all_accepted or int(redis.get(N_EVAL).decode()) < n_req):\n if kill_handler.killed:\n logger.info(\n f\"Worker {n_worker} received stop signal. \"\n f\"Terminating in the middle of a population \"\n f\"after {internal_counter} samples.\")\n # notify quit\n redis.decr(N_WORKER)\n sys.exit(0)\n\n # check whether time's up\n current_runtime = time() - start_time\n if current_runtime > max_runtime_s:\n logger.info(\n f\"Worker {n_worker} stops during population because \"\n f\"runtime {current_runtime} exceeds \"\n f\"max runtime {max_runtime_s}\")\n # notify quit\n redis.decr(N_WORKER)\n return\n\n # increase global number of evaluations counter\n particle_max_id = redis.incr(N_EVAL, batch_size)\n\n # timer for current simulation until batch_size acceptances\n this_sim_start = time()\n # collect accepted particles\n accepted_samples = []\n\n # make batch_size attempts\n for n_batched in range(batch_size):\n # increase evaluation counter\n internal_counter += 1\n try:\n # simulate\n new_sim = simulate_one()\n # append to current sample\n sample.append(new_sim)\n # check for acceptance\n if new_sim.accepted:\n # the order of the IDs is reversed, but this does not\n # matter. Important is only that the IDs are specified\n # before the simulation starts\n\n # append to accepted list\n accepted_samples.append(\n cloudpickle.dumps(\n (particle_max_id - n_batched, sample)))\n # initialize new sample\n sample = sample_factory()\n except Exception as e:\n logger.warning(f\"Redis worker number {n_worker} failed. \"\n f\"Error message is: {e}\")\n # initialize new sample to be sure\n sample = sample_factory()\n\n # update total simulation-specific time\n cumulative_simulation_time += time() - this_sim_start\n\n # push to pipeline if at least one sample got accepted\n if len(accepted_samples) > 0:\n # new pipeline\n pipeline = redis.pipeline()\n # update particles counter\n pipeline.incr(N_ACC, len(accepted_samples))\n # note: samples are appended 1-by-1\n pipeline.rpush(QUEUE, *accepted_samples)\n # execute all commands\n pipeline.execute()\n\n # end of sampling loop\n\n # notify quit\n redis.decr(N_WORKER)\n kill_handler.exit = True\n population_total_time = time() - population_start_time\n logger.info(\n f\"Finished population, did {internal_counter} samples. \"\n f\"Simulation time: {cumulative_simulation_time:.2f}s, \"\n f\"total time {population_total_time:.2f}.\")\n\n\[email protected](help=\"Evaluation parallel redis sampler for pyABC.\")\[email protected]('--host', default=\"localhost\", help='Redis host.')\[email protected]('--port', default=6379, type=int, help='Redis port.')\[email protected]('--runtime', type=str, default=\"2h\",\n help='Max worker runtime if the form <NR><UNIT>, '\n 'where <NR> is any number and <UNIT> can be s, '\n '(S,) m, (M,) '\n 'h, (H,) d, (D) for seconds, minutes, hours and days. '\n 'E.g. for 12 hours you would pass --runtime=12h, for half '\n 'a day you could do 0.5d.')\[email protected]('--processes', type=int, default=1, help=\"The number of worker \"\n \"processes to start\")\ndef work(host=\"localhost\", port=6379, runtime=\"2h\", processes=1):\n \"\"\"\n Corresponds to the entry point abc-redis-worker.\n \"\"\"\n if processes == 1:\n # start a single process right here, not within pool\n # this handles the problem of starting a daemon process within a\n # daemon process\n return _work(host, port, runtime)\n\n with Pool(processes) as pool:\n res = pool.starmap(_work, [(host, port, runtime)] * processes)\n return res\n\n\ndef _work(host=\"localhost\", port=6379, runtime=\"2h\"):\n np.random.seed()\n random.seed()\n\n kill_handler = KillHandler()\n\n start_time = time()\n max_runtime_s = runtime_parse(runtime)\n logger.info(\n f\"Start redis worker. Max run time {max_runtime_s}s, \"\n f\"HOST={socket.gethostname()}, PID={os.getpid()}\")\n redis = StrictRedis(host=host, port=port)\n\n p = redis.pubsub()\n p.subscribe(MSG)\n listener = p.listen()\n for msg in listener:\n try:\n data = msg[\"data\"].decode()\n except AttributeError:\n data = msg[\"data\"]\n\n # check if it is int to (first iteration) run at least once\n if data == START or isinstance(data, int):\n work_on_population(redis, start_time, max_runtime_s, kill_handler)\n\n if data == STOP:\n logger.info(\"Received stop signal. Shutdown redis worker.\")\n return\n\n elapsed_time = time() - start_time\n if elapsed_time > max_runtime_s:\n logger.info(\n \"Shutdown redis worker. Max runtime {}s reached\"\n .format(max_runtime_s))\n return\n\n\[email protected](help=\"ABC Redis cluster manager. \"\n \"The command can be 'info' or 'stop'. \"\n \"For 'stop' the workers are shut down cleanly \"\n \"after the current population. \"\n \"For 'info' you'll see how many workers are connected, \"\n \"how many evaluations the current population has, and \"\n \"how many particles are still missing. \"\n \"For 'reset-workers', the worker count will be resetted to\"\n \"zero. This does not cancel the sampling. This is useful \"\n \"if workers were unexpectedly killed.\")\[email protected]('--host', default=\"localhost\", help='Redis host.')\[email protected]('--port', default=6379, type=int, help='Redis port.')\[email protected]('command', type=str)\ndef manage(command, host=\"localhost\", port=6379):\n \"\"\"\n Corresponds to the entry point abc-redis-manager.\n \"\"\"\n return _manage(command, host=host, port=port)\n\n\ndef _manage(command, host=\"localhost\", port=6379):\n redis = StrictRedis(host=host, port=port)\n if command == \"info\":\n pipe = redis.pipeline()\n pipe.get(N_WORKER)\n pipe.get(N_EVAL)\n pipe.get(N_ACC)\n pipe.get(N_REQ)\n res = pipe.execute()\n res = [r.decode() if r is not None else r for r in res]\n print(\"Workers={} Evaluations={} Acceptances={}/{}\".format(*res))\n elif command == \"stop\":\n redis.publish(MSG, STOP)\n elif command == \"reset-workers\":\n redis.set(N_WORKER, 0)\n else:\n print(\"Unknown command:\", command)\n" ]
[ [ "numpy.random.seed" ] ]
WenmuZhou/DABNet_Paddle
[ "b551085009faf07b351df98ecb625e1f82ddd50e" ]
[ "dataset/cityscapes.py" ]
[ "import os.path as osp\nimport numpy as np\nimport random\nimport cv2\nfrom paddle.io import Dataset\nimport pickle\n\n\nclass CityscapesDataSet(Dataset):\n \"\"\" \n CityscapesDataSet is employed to load train set\n Args:\n root: the Cityscapes dataset path, \n cityscapes\n ├── gtFine\n ├── leftImg8bit\n list_path: cityscapes_train_list.txt, include partial path\n mean: bgr_mean (73.15835921, 82.90891754, 72.39239876)\n\n \"\"\"\n\n def __init__(self, root='', list_path='', max_iters=None, crop_size=(512, 1024), mean=(128, 128, 128), scale=True,\n mirror=True, ignore_label=255):\n super().__init__()\n self.root = root\n self.list_path = list_path\n self.crop_h, self.crop_w = crop_size\n self.scale = scale\n self.ignore_label = ignore_label\n self.mean = mean\n self.is_mirror = mirror\n self.img_ids = [i_id.strip() for i_id in open(list_path)]\n if not max_iters == None:\n self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids)))\n self.files = []\n\n # for split in [\"train\", \"trainval\", \"val\"]:\n for name in self.img_ids:\n img_file = osp.join(self.root, name.split()[0])\n # print(img_file)\n label_file = osp.join(self.root, name.split()[1])\n # print(label_file)\n self.files.append({\n \"img\": img_file,\n \"label\": label_file,\n \"name\": name\n })\n\n print(\"length of dataset: \", len(self.files))\n\n def __len__(self):\n return len(self.files)\n\n def __getitem__(self, index):\n datafiles = self.files[index]\n image = cv2.imread(datafiles[\"img\"], cv2.IMREAD_COLOR)\n label = cv2.imread(datafiles[\"label\"], cv2.IMREAD_GRAYSCALE)\n size = image.shape\n name = datafiles[\"name\"]\n if self.scale:\n scale = [0.75, 1.0, 1.25, 1.5, 1.75, 2.0]\n f_scale = scale[random.randint(0, 5)]\n # f_scale = 0.5 + random.randint(0, 15) / 10.0 # random resize between 0.5 and 2\n image = cv2.resize(image, None, fx=f_scale, fy=f_scale, interpolation=cv2.INTER_LINEAR)\n label = cv2.resize(label, None, fx=f_scale, fy=f_scale, interpolation=cv2.INTER_NEAREST)\n\n image = np.asarray(image, np.float32)\n\n image -= self.mean\n # image = image.astype(np.float32) / 255.0\n image = image[:, :, ::-1] # change to RGB\n img_h, img_w = label.shape\n pad_h = max(self.crop_h - img_h, 0)\n pad_w = max(self.crop_w - img_w, 0)\n if pad_h > 0 or pad_w > 0:\n img_pad = cv2.copyMakeBorder(image, 0, pad_h, 0,\n pad_w, cv2.BORDER_CONSTANT,\n value=(0.0, 0.0, 0.0))\n label_pad = cv2.copyMakeBorder(label, 0, pad_h, 0,\n pad_w, cv2.BORDER_CONSTANT,\n value=(self.ignore_label,))\n else:\n img_pad, label_pad = image, label\n\n img_h, img_w = label_pad.shape\n h_off = random.randint(0, img_h - self.crop_h)\n w_off = random.randint(0, img_w - self.crop_w)\n # roi = cv2.Rect(w_off, h_off, self.crop_w, self.crop_h);\n image = np.asarray(img_pad[h_off: h_off + self.crop_h, w_off: w_off + self.crop_w], np.float32)\n label = np.asarray(label_pad[h_off: h_off + self.crop_h, w_off: w_off + self.crop_w], np.float32)\n\n image = image.transpose((2, 0, 1)) # NHWC -> NCHW\n\n if self.is_mirror:\n flip = np.random.choice(2) * 2 - 1\n image = image[:, :, ::flip]\n label = label[:, ::flip]\n\n return image.copy(), label.copy(), np.array(size), name\n\n\nclass CityscapesValDataSet(Dataset):\n \"\"\" \n CityscapesDataSet is employed to load val set\n Args:\n root: the Cityscapes dataset path, \n cityscapes\n ├── gtFine\n ├── leftImg8bit\n list_path: cityscapes_val_list.txt, include partial path\n\n \"\"\"\n\n def __init__(self, root='', list_path='', f_scale=1, mean=(128, 128, 128), ignore_label=255):\n super().__init__()\n self.root = root\n self.list_path = list_path\n self.ignore_label = ignore_label\n self.mean = mean\n self.f_scale = f_scale\n self.img_ids = [i_id.strip() for i_id in open(list_path)]\n self.files = []\n for name in self.img_ids:\n img_file = osp.join(self.root, name.split()[0])\n # print(img_file)\n label_file = osp.join(self.root, name.split()[1])\n # print(label_file)\n image_name = osp.basename(name)\n self.files.append({\n \"img\": img_file,\n \"label\": label_file,\n \"name\": image_name\n })\n\n print(\"length of dataset: \", len(self.files))\n\n def __len__(self):\n return len(self.files)\n\n def __getitem__(self, index):\n datafiles = self.files[index]\n image = cv2.imread(datafiles[\"img\"], cv2.IMREAD_COLOR)\n label = cv2.imread(datafiles[\"label\"], cv2.IMREAD_GRAYSCALE)\n\n size = image.shape\n name = datafiles[\"name\"]\n if self.f_scale != 1:\n image = cv2.resize(image, None, fx=self.f_scale, fy=self.f_scale, interpolation=cv2.INTER_LINEAR)\n label = cv2.resize(label, None, fx=self.f_scale, fy=self.f_scale, interpolation=cv2.INTER_NEAREST)\n\n image = np.asarray(image, np.float32)\n\n image -= self.mean\n # image = image.astype(np.float32) / 255.0\n image = image[:, :, ::-1] # change to RGB\n image = image.transpose((2, 0, 1)) # HWC -> CHW\n\n # print('image.shape:',image.shape)\n return image.copy(), label.copy(), np.array(size), name\n\n\nclass CityscapesTestDataSet(Dataset):\n \"\"\" \n CityscapesDataSet is employed to load test set\n Args:\n root: the Cityscapes dataset path,\n list_path: cityscapes_test_list.txt, include partial path\n\n \"\"\"\n\n def __init__(self, root='', list_path='', mean=(128, 128, 128), ignore_label=255):\n super().__init__()\n self.root = root\n self.list_path = list_path\n self.ignore_label = ignore_label\n self.mean = mean\n self.img_ids = [i_id.strip() for i_id in open(list_path)]\n self.files = []\n for name in self.img_ids:\n img_file = osp.join(self.root, name.split()[0])\n # print(img_file)\n image_name = osp.basename(name)\n # print(image_name)\n self.files.append({\n \"img\": img_file,\n \"name\": image_name\n })\n print(\"lenth of dataset: \", len(self.files))\n\n def __len__(self):\n return len(self.files)\n\n def __getitem__(self, index):\n datafiles = self.files[index]\n\n image = cv2.imread(datafiles[\"img\"], cv2.IMREAD_COLOR)\n name = datafiles[\"name\"]\n image = np.asarray(image, np.float32)\n size = image.shape\n\n image -= self.mean\n # image = image.astype(np.float32) / 255.0\n image = image[:, :, ::-1] # change to RGB\n image = image.transpose((2, 0, 1)) # HWC -> CHW\n return image.copy(), np.array(size), name\n\n\nclass CityscapesTrainInform:\n \"\"\" To get statistical information about the train set, such as mean, std, class distribution.\n The class is employed for tackle class imbalance.\n \"\"\"\n\n def __init__(self, data_dir='', classes=19, inform_data_file=\"\", normVal=1.10):\n \"\"\"\n Args:\n data_dir: directory where the dataset is kept\n classes: number of classes in the dataset\n inform_data_file: location where cached file has to be stored\n normVal: normalization value, as defined in ERFNet paper\n \"\"\"\n self.data_dir = data_dir\n self.classes = classes\n self.classWeights = np.ones(self.classes, dtype=np.float32)\n self.normVal = normVal\n self.mean = np.zeros(3, dtype=np.float32)\n self.std = np.zeros(3, dtype=np.float32)\n self.inform_data_file = inform_data_file\n\n def compute_class_weights(self, histogram):\n \"\"\"to compute the class weights\n Args:\n histogram: distribution of class samples\n \"\"\"\n normHist = histogram / np.sum(histogram)\n for i in range(self.classes):\n self.classWeights[i] = 1 / (np.log(self.normVal + normHist[i]))\n\n def readWholeTrainSet(self, data_list, train_flag=True):\n \"\"\"to read the whole train set of current dataset.\n Args:\n fileName: train set file that stores the image locations\n trainStg: if processing training or validation data\n \n return: 0 if successful\n \"\"\"\n global_hist = np.zeros(self.classes, dtype=np.float32)\n\n no_files = 0\n min_val_al = 0\n max_val_al = 0\n for fileName in data_list:\n with open(fileName, 'r') as textFile:\n # with open(fileName, 'r') as textFile:\n for line in textFile:\n # we expect the text file to contain the data in following format\n # <RGB Image> <Label Image>\n line_arr = line.split()\n img_file = ((self.data_dir).strip() + '/' + line_arr[0].strip()).strip()\n label_file = ((self.data_dir).strip() + '/' + line_arr[1].strip()).strip()\n\n label_img = cv2.imread(label_file, 0)\n unique_values = np.unique(label_img)\n max_val = max(unique_values)\n min_val = min(unique_values)\n\n max_val_al = max(max_val, max_val_al)\n min_val_al = min(min_val, min_val_al)\n\n if train_flag == True:\n hist = np.histogram(label_img, self.classes, range=(0, 18))\n global_hist += hist[0]\n\n rgb_img = cv2.imread(img_file)\n self.mean[0] += np.mean(rgb_img[:, :, 0])\n self.mean[1] += np.mean(rgb_img[:, :, 1])\n self.mean[2] += np.mean(rgb_img[:, :, 2])\n\n self.std[0] += np.std(rgb_img[:, :, 0])\n self.std[1] += np.std(rgb_img[:, :, 1])\n self.std[2] += np.std(rgb_img[:, :, 2])\n\n else:\n print(\"we can only collect statistical information of train set, please check\")\n\n if max_val > (self.classes - 1) or min_val < 0:\n print('Labels can take value between 0 and number of classes.')\n print('Some problem with labels. Please check. label_set:', unique_values)\n print('Label Image ID: ' + label_file)\n no_files += 1\n\n # divide the mean and std values by the sample space size\n self.mean /= no_files\n self.std /= no_files\n\n # compute the class imbalance information\n self.compute_class_weights(global_hist)\n return 0\n\n def collectDataAndSave(self, data_list):\n \"\"\" To collect statistical information of train set and then save it.\n The file train.txt should be inside the data directory.\n \"\"\"\n print('Processing training data')\n return_val = self.readWholeTrainSet(data_list)\n\n print('Pickling data')\n if return_val == 0:\n data_dict = dict()\n data_dict['mean'] = self.mean\n data_dict['std'] = self.std\n data_dict['classWeights'] = self.classWeights\n pickle.dump(data_dict, open(self.inform_data_file, \"wb\"))\n return data_dict\n return None\n" ]
[ [ "numpy.log", "numpy.histogram", "numpy.random.choice", "numpy.asarray", "numpy.unique", "numpy.ones", "numpy.std", "numpy.mean", "numpy.array", "numpy.zeros", "numpy.sum" ] ]
NCMlab/CogntiveTasksOnline
[ "02e2bdb2b54a96b609fd5658cac4323322f183f0" ]
[ "VSTMPsychopyFiles/VSTM_PassConfigFile.py" ]
[ "'''\nThis version of the task takes a config file when called along with a flag based\non whether to use a fixed dot location or not.\nThe config file creates three different lists of probe pos/neg orders and dot\nlocations for load levels 1 to 15.\n'''\n# https://docs.python.org/3/library/configparser.html\n\n\nfrom psychopy import locale_setup, gui, visual, core, data, event, logging\nimport numpy as np # whole numpy lib is available, prepend 'np.'\nimport os # handy system and path functions\nimport sys # to get file system encoding\nimport random\nimport wx\n\n# Ensure that relative paths start from the same directory as this script\n_thisDir = os.path.dirname(os.path.abspath(__file__))#.decode(sys.getfilesystemencoding())\n# import parameters from a config file\nsys.path.append(os.path.join(_thisDir, '..','ConfigFiles'))\n\n\ncountDown = core.CountdownTimer()\n# Store info about the experiment session\n# #################\n# Store info about the experiment session\nexpName = u'VSTM' # from the Builder filename that created this script\ntask = 'Block'\nexpInfo = {u'session': u'01', u'Participant ID': u'9999999'}\n\nexpInfo['date'] = data.getDateStr() # add a simple timestamp\nexpInfo['expName'] = expName\nif len(sys.argv) > 1:\n #tempFile.write(\"Entered if clause\\n\")\n #tempFile.write('%s\\n'%(sys.argv[2]))\n expInfo['Participant ID'] = sys.argv[1]\n #tempFile.write('%s\\n'%(sys.argv[1]))\n #tempFile.write('%s\\n'%(sys.argv[2]))\n\n PartDataFolder = sys.argv[2]\n LoadList = sys.argv[3].split(' ')\n LoadList = np.array(LoadList)\n LoadList = LoadList.astype(np.int)\n Tag = sys.argv[4]\n ConfigFile = sys.argv[5]\n # Make sure to convert this to a boolean for later decision making\n FixedLocations = bool(sys.argv[6])\nelse:\n dlg = gui.DlgFromDict(dictionary=expInfo)\n if dlg.OK == False:\n core.quit() # user pressed cancel\n DataFolder = \"../../data\"\n PartDataFolder = 'unorganized'\n OutDir = os.path.join(DataFolder, PartDataFolder)\n if not os.path.exists(OutDir):\n os.mkdir(OutDir)\n LoadList = np.array(range(1,6,1)) ### <<<<<<<<<<<<<<<<<<<\n LoadList = LoadList.astype(np.int)\n \n Tag = 'BehRun1'\n PartDataFolder = OutDir\n ConfigFile = 'VSTM_BehavOnline_Config'\n FixedLocations = True\n \nif FixedLocations: \n # Based on the tag passed, determine which run to use from the config file\n if Tag == 'MRIRun1':\n CurrentRun = 0\n elif Tag == 'MRIRun2':\n CurrentRun = 1\n elif Tag == 'BehRun1':\n CurrentRun = 2 \n # I can only see this situation occuring if there is a mistake running the \n # behavioral run and it needs to be repeated.\n else:\n CurrentRun = 2\n\n# Load up the config file\nprint(\"Loading up the config file: %s\"%(ConfigFile))\nStr = 'from %s import *'%(ConfigFile)\nexec(Str)\n\nprint(\"Fixed Locations: %r\"%(FixedLocations))\n\nGridSize = VSTM_GridSizeScale*VSTM_GridCount + 1 # The size of the grid for which the circles on on\nCircleSize = (GridSize*2)/VSTM_GridCount # The circle size so that they are all just touching\nTotalTrialTime = VSTM_StimOnTime + VSTM_RetOnTime + VSTM_ProbeOnTime + VSTM_ITITime\n\nOffSet = range(-GridSize+int(CircleSize/2),GridSize-int(CircleSize/2),int(CircleSize))\nMaskLocations = np.arange(0,1+VSTM_GridCount**2)\n\n\n# Data file name stem = absolute path + name; later add .psyexp, .csv, .log, etc\nfilename = os.path.join(PartDataFolder, '%s_%s_%s_%s_%s' % (expInfo['Participant ID'],expName, task, Tag, expInfo['date']))\n\n# #################\n\n\n# Data file name stem = absolute path + name; later add .psyexp, .csv, .log, etc\n#OutDir = '..' + os.sep + 'data' + os.sep + PartDataFolder + os.sep\n#filename = OutDir + '%s_%s_%s' % (expName, expInfo['Participant ID'], expInfo['date'])\n#\n\n\n# Setup the Window\nwin = visual.Window(\n size=(1200, 800), fullscr=True, screen=0,\n allowGUI=False, allowStencil=False,\n monitor='testMonitor', color=VSTM_BGColor, colorSpace='rgb',\n blendMode='avg', useFBO=True,\n units=VSTM_FontSizeUnits)\n \nexpInfo['date'] = data.getDateStr() # add a simple timestamp\n# Data file name stem = absolute path + name; later add .psyexp, .csv, .log, etc\n#OutDir = '..' + os.sep + '..' + os.sep + '..' + os.sep + 'data' + os.sep + PartDataFolder + os.sep\n\ndataFile = open(filename+'.csv', 'w')\n\n#filename = OutDir + '%s%s_%s_%s' % (expName, Tag, expInfo['Participant ID'], expInfo['date'])\nprint(filename)\n\n#OutDir = '..' + os.sep + '..' + os.sep + 'data' + os.sep + PartDataFolder + os.sep\n#filename = OutDir + '%s%s_%s_%s' % (expName, task, expInfo['Participant ID'], expInfo['date'])\n#print(filename)\n#dataFile = open(filename+'.csv', 'w')\ndataFile.write('Trial,Load,TrialStartTime,Resp,Corr,RT,CorrectRT,ProbeType,ProbeLoc,')\n# WHen writing out the data make sure to write out column names \nfor i in np.arange(max(LoadList)):\n dataFile.write('StimLoc%02d,'%(i+1))\ndataFile.write('\\n')\n\n# An ExperimentHandler isn't essential but helps with data saving\nthisExp = data.ExperimentHandler(name=expName, version='',\n extraInfo=expInfo, runtimeInfo=None,\n originPath=None,\n savePickle=False, saveWideText=False,\n dataFileName=filename)\n \nthisResp = event.BuilderKeyResponse()\n# Circle\n# This is a single component that will be displayed on the screen multiple times while\n# changing arounds its position\n\ncircle = visual.Polygon(\n win=win, name='polygon',units='pix', \n edges=128, size=(CircleSize, CircleSize),\n ori=0, pos=(0, 0),\n lineWidth=1, lineColor='black', lineColorSpace='rgb',\n fillColor='black', fillColorSpace='rgb',\n opacity=1, depth=0.0, interpolate=True)\n \nProbeCircle = visual.Polygon(\n win=win, name='polygon',units='pix', \n edges=128, size=(CircleSize, CircleSize),\n ori=0, pos=(0, 0),\n lineWidth=1, lineColor=VSTM_ProbeColor, lineColorSpace='rgb',\n fillColor=VSTM_ProbeColor, fillColorSpace='rgb',\n opacity=1, depth=0.0, interpolate=True)\n\n# Cross hairs\nRedCross = visual.TextStim(win=win, name='RedCross',\n text='+',\n font='Times New Roman',\n units=VSTM_FontSizeUnits, pos=(0, 0), height=VSTM_FontSize, wrapWidth=None, ori=0, \n color='red', colorSpace='rgb', opacity=1,\n depth=-9.0);\nWhiteCross = visual.TextStim(win=win, name='RedCross',\n text='+',\n font='Times New Roman',\n units=VSTM_FontSizeUnits, pos=(0, 0), height=VSTM_FontSize, wrapWidth=None, ori=0, \n color='white', colorSpace='rgb', opacity=1,\n depth=-9.0);\nGreenCross = visual.TextStim(win=win, name='RedCross',\n text='+',\n font='Times New Roman',\n units=VSTM_FontSizeUnits, pos=(0, 0), height=VSTM_FontSize, wrapWidth=None, ori=0, \n color='green', colorSpace='rgb', opacity=1,\n depth=-9.0);\n \n# Instructions\ntextInstr1 = visual.TextStim(win=win, name='textInstr1',\n text=VSTM_Instructions, #Press [INDEX Finger] if the circle WAS in the set.\\nPress [MIDDLE Finger] if the circle was NOT in the set.\\n\\nTry to respond as quickly and as accurately as possible.\\n\\nPress [5] to begin.',\n font='Times New Roman',\n units=VSTM_FontSizeUnits, pos=(0, 0), height=VSTM_FontSize*0.75, wrapWidth=1200, ori=0, \n color=VSTM_FontColor, colorSpace='rgb', opacity=1,\n depth=0.0); \n \n# Initialize components for Routine \"Countdown\"\ntext3 = visual.TextStim(win=win, name='text3',\n text='3',\n font='Times New Roman',\n units=VSTM_FontSizeUnits, pos=(0, 0), height=VSTM_FontSize, wrapWidth=None, ori=0, \n color=VSTM_FontColor, colorSpace='rgb', opacity=1,\n depth=0.0);\ntext2 = visual.TextStim(win=win, name='text2',\n text='2',\n font='Times New Roman',\n units=VSTM_FontSizeUnits, pos=(0, 0), height=VSTM_FontSize, wrapWidth=None, ori=0, \n color=VSTM_FontColor, colorSpace='rgb', opacity=1,\n depth=-1.0);\ntext1 = visual.TextStim(win=win, name='text1',\n text='1',\n font='Times New Roman',\n units=VSTM_FontSizeUnits, pos=(0, 0), height=VSTM_FontSize, wrapWidth=None, ori=0, \n color=VSTM_FontColor, colorSpace='rgb', opacity=1,\n depth=-2.0);\n \ntextThankyou = visual.TextStim(win=win, name='textThankyou',\n text='Thank you for participating!',\n font='Times New Roman',\n units=VSTM_FontSizeUnits, pos=(0, 0), height=VSTM_FontSize, wrapWidth=None, ori=0, \n color=VSTM_FontColor, colorSpace='rgb', opacity=1,\n depth=0.0); \n\nimage = visual.ImageStim(\n win=win,\n name='image', \n image='../GUI/YesNoKeyboard.png', mask=None,\n ori=0, pos=(450, -300), size=(200,136),\n color=[1,1,1], colorSpace='rgb', opacity=1,\n flipHoriz=False, flipVert=False,\n texRes=128, interpolate=True, depth=0.0) \n\nRunningClock = core.Clock()\n\n# For each block change the selection list \n\n# Need instructions and wait\ntextInstr1.setAutoDraw(True)\n# Put the probe dot on the screen\nwin.flip()\n# Start the probe timer\n\nWaitingFlag = True\nwhile WaitingFlag is True:\n theseKeys = event.getKeys(keyList=['escape','5'])\n if 'escape' in theseKeys:\n core.quit()\n elif '5' in theseKeys:\n WaitingFlag = False\n textInstr1.setAutoDraw(False)\n else:\n pass \n\n# set up handler to look after randomisation of conditions etc\nBlocks = data.TrialHandler(nReps=VSTM_NumberOfBlocks, method='sequential', \n extraInfo=expInfo, originPath=-1,trialList=[None],\n seed=None, name='Blocks')\n\nRunningClock.reset()\n# ###########################\n# INTRO BlockCount\n# Need intro Blocks\n# Turn on the cross hair\nWhiteCross.setAutoDraw(True)\nwin.flip()\ncountDown.reset() \nWhiteCross.setAutoDraw(True)\ncountDown.add(VSTM_InterBlockTime)\nwhile countDown.getTime() > 0:\n pass\nwin.flip()\n# Turn on the countdown timer\nWhiteCross.setAutoDraw(False)\n# ###########################\n# BLOCK LOOP\nthisExp.addLoop(Blocks) # add the loop to the experiment\nthisBlock = Blocks.trialList[0] # so we can initialise stimuli with some values\n# abbreviate parameter names if possible (e.g. rgb = thisBlock.rgb)\nif thisBlock != None:\n for paramName in thisBlock.keys():\n exec(paramName + '= thisBlock.' + paramName)\n\nBlockCount = 0\nfor thisBlock in Blocks:\n currentLoop = Blocks\n # abbreviate parameter names if possible (e.g. rgb = thisBlock.rgb)\n if thisBlock != None:\n for paramName in thisBlock.keys():\n exec(paramName + '= thisBlock.' + paramName)\n \n CurrentLoad = LoadList[BlockCount]\n # If the current load exceeds the limits of the config file then override the fixed Locations\n # flag\n try: \n AllLocations[CurrentLoad - 1]\n except:\n FixedLocations = False\n \n print(\"Current load is: %d\"%(CurrentLoad))\n countDown.reset() \n# Start the 321 count down to the start of a block of trials\n# Start the countdown timer and use this time to set up the trials\n text3.setAutoDraw(True)\n countDown.add(1)\n# prepare the trials\n trials = data.TrialHandler(nReps=VSTM_NTrialsPerBlock, method='sequential', \n extraInfo=expInfo, originPath=-1,trialList=[None],\n seed=None, name='trials')\n# Prepare the stimuli\n if FixedLocations:\n ProbeList = AllProbes[CurrentLoad][CurrentRun] \n else:\n # Make sure there are an equal number of probe pos and Neg\n ProbeList = np.concatenate((np.zeros(int(VSTM_NTrialsPerBlock/2)),np.ones(int(VSTM_NTrialsPerBlock/2))))\n # Shuffle the list\n ProbeList = ProbeList[np.random.permutation(VSTM_NTrialsPerBlock)]\n win.flip()\n while countDown.getTime() > 0:\n pass\n text3.setAutoDraw(False)\n text2.setAutoDraw(True)\n countDown.add(1)\n win.flip()\n while countDown.getTime() > 0:\n pass \n text2.setAutoDraw(False)\n text1.setAutoDraw(True)\n countDown.add(1)\n win.flip()\n while countDown.getTime() > 0:\n pass \n win.flip()\n text1.setAutoDraw(False)\n# TRIAL LOOP\n\n TrialCount = 0\n for thisTrial in trials:\n countDown.add(VSTM_StimOnTime)\n GreenCross.setAutoDraw(True)\n TrialStartTime = RunningClock.getTime()\n theseKeys = event.getKeys()\n if FixedLocations:\n print(\"Fixed Locations: %r\"%(FixedLocations))\n print(\"Working with fixed locations\")\n print(\"Current load is: %d\"%(CurrentLoad))\n print(\"Current Run is: %d\"%(CurrentRun))\n print(\"Current Trial is: %d\"%(TrialCount))\n \n Locations = AllLocations[CurrentLoad - 1][CurrentRun][TrialCount]\n else:\n Locations = np.random.permutation(VSTM_GridCount**2)[0:CurrentLoad]\n print(\"This trial is %s\"%(thisTrial))\n # Create the probe Locations \n PosProbeLocation = Locations[np.random.permutation(CurrentLoad)[0]]\n NotLocations = np.arange(0,VSTM_GridCount**2)\n NotLocations = [x for x in NotLocations if x not in Locations]\n #NegProbeLocation = np.random.randint(0,len(NotLocations),1)[0]\n NegProbeLocation = NotLocations[np.random.permutation(len(NotLocations))[0]]\n\n # Make sure the Locations does not include the central location because \n # the cross hair is to remain on the screen\n count = 0\n for y_offset in OffSet:\n for x_offset in OffSet:\n for stim in [circle]:\n stim.pos = [x_offset, y_offset]\n if (count in Locations):\n stim.draw()\n count += 1\n \n CurrentTime = RunningClock.getTime()\n\n # Put the circles on the screen\n win.flip()\n \n # Prepare the mask dots\n count = 0\n for y_offset in OffSet:\n for x_offset in OffSet:\n for stim in [circle]:\n stim.pos = [x_offset, y_offset]\n if (count in MaskLocations):\n stim.draw()\n count += 1\n while countDown.getTime() > 0:\n pass \n \n # Put the mask dots on the screen\n #GreenCross.setAutoDraw(False)\n # Reset the countdown clock so that the mask is on a fixed amount of time\n win.flip()\n countDown.reset()\n countDown.add(VSTM_MaskOnTime)\n while countDown.getTime() > 0:\n pass\n \n # check for quit (the Esc key)\n if event.getKeys(keyList=[\"escape\"]):\n core.quit()\n \n print(countDown.getTime())\n \n # Clear any button presses\n event.clearEvents(eventType='keyboard')\n \n # Take the dots off the screen and put the cross hair up \n win.flip() \n countDown.add(VSTM_RetOnTime)\n \n # Prepare the probe dot during the retention time\n ProbeLoc = -99\n # Is this a fixed location probe?\n if FixedLocations:\n # Then use the probe location from the config file\n CurrentProbeLocation = AllProbes[CurrentLoad - 1][CurrentRun][TrialCount]\n # If it is a random location probe then pick the POS or NEG probe location\n else:\n if ProbeList[TrialCount] == 0:\n CurrentProbeLocation = NegProbeLocation\n else:\n CurrentProbeLocation = PosProbeLocation\n \n # The coding of fixed versus random locations differs\n if FixedLocations:\n if (Locations == CurrentProbeLocation).any():\n # This is a positive probe\n ProbeType = 1\n corr = '1'\n else:\n ProbeType = 0\n corr = '2'\n else:\n if ProbeList[TrialCount] == 1:\n ProbeType = 1\n corr = '1'\n else:\n ProbeType = 0\n corr = '2'\n \n \n # Use a single code chunk for displaying the probe dot regardless of whether this is a random\n # location or fixed location or whether this is a POS or NEG probe\n count = 0\n for y_offset in OffSet:\n for x_offset in OffSet:\n for stim in [ProbeCircle]:\n stim.pos = [x_offset, y_offset]\n if (count in [CurrentProbeLocation]):\n stim.draw()\n count += 1\n \n while countDown.getTime() > 0:\n pass\n # Turn off the cross hair\n GreenCross.setAutoDraw(True)\n image.setAutoDraw(True)\n # Put the probe dot on the screen\n win.flip()\n thisResp.clock.reset()\n # Start the probe timer\n countDown.add(VSTM_ProbeOnTime)\n event.clearEvents(eventType='keyboard')\n print(countDown.getTime())\n thisResp.keys = -99\n thisResp.rt = -99\n # Changethis while loop to have a flag\n # The flag is set to false if a response is made or if the timer elapses\n continueRoutine = True \n while continueRoutine:\n # while countDown.getTime() > 0:\n theseKeys = event.getKeys(keyList=['escape','left', 'right','1','2'])\n if 'escape' in theseKeys:\n core.quit()\n elif len(theseKeys) > 0: # at least one key was pressed\n thisResp.keys = theseKeys[-1] # just the last key pressed\n thisResp.rt = thisResp.clock.getTime()\n continueRoutine = False\n if corr == '1':\n if ((thisResp.keys == '1') or (thisResp.keys == 'left')):\n thisResp.corr = 1\n else:\n thisResp.corr = 0\n if corr == '2':\n if ((thisResp.keys == '2') or (thisResp.keys == 'right')):\n thisResp.corr = 1\n else:\n thisResp.corr = 0\n break\n \n if countDown.getTime() > 0:\n pass \n else:\n continueRoutine = False\n \n GreenCross.setAutoDraw(False)\n image.setAutoDraw(False)\n RedCross.setAutoDraw(True)\n win.flip()\n # Check to see how much time is left after a response is made\n # Remove that amount from the clock\n countDown.add(-1*countDown.getTime())\n #while countDown.getTime() > 0: \n # pass\n # prepare the cross hair\n\n countDown.add(VSTM_ITITime)\n\n RedCross.setAutoDraw(False)\n \n print(\"Checking for responses\")\n # Change the response to zero and one for later pivot tables\n CorrectResp = 0\n RT = thisResp.rt\n # If there is no response then the RT is an empty list\n if type(RT) == list:\n RT = 0.0\n if thisResp.corr == 1:\n CorrectResp = 1\n CorrectRT = RT\n else:\n CorrectRT = 0\n \n print(RT)\n print(CorrectRT)\n dataFile.write('%i,%i,%s, %s, %i,%0.3f,%0.4f,%i, %i,' %(TrialCount,CurrentLoad, TrialStartTime, thisResp.keys, CorrectResp,RT,CorrectRT, ProbeType, CurrentProbeLocation))\n for ii in Locations:\n dataFile.write('%i,'%(ii))\n dataFile.write('\\n')\n trials.addData('CurrentTime',CurrentTime)\n trials.addData('Response.keys',thisResp.keys)\n trials.addData('Response.corr', thisResp.corr)\n if thisResp.keys != None: # we had a response\n trials.addData('Response.rt', thisResp.rt)\n thisExp.nextEntry()\n print(\"Finished Trial\")\n TrialCount += 1\n while countDown.getTime() > 0:\n pass\n RedCross.setAutoDraw(False) \n WhiteCross.setAutoDraw(True)\n win.flip()\n countDown.add(VSTM_InterBlockTime - 3)\n while countDown.getTime() > 0:\n pass\n WhiteCross.setAutoDraw(False) \n win.flip()\n# Turn on the countdown timer\n BlockCount += 1 \n\n\ndataFile.write(',,%s\\n'%(RunningClock.getTime()))\ntextThankyou.setAutoDraw(True)\ncountDown.add(VSTM_ThankYouOnTime)\nwin.flip()\nwhile countDown.getTime() > 0:\n pass \nwin.flip()\n\n#thisExp.saveAsWideText(filename+'.csv') \nlogging.flush()\n# make sure everything is closed down\nthisExp.abort() # or data files will save again on exit\nwin.close()\ncore.quit() " ]
[ [ "numpy.random.permutation", "numpy.arange", "numpy.array" ] ]
Yoshino-master/FreeAnchor_TensorFlow
[ "656a07c85da8b3de21416d1e5162134665abd164" ]
[ "utils/loss.py" ]
[ "import tensorflow as tf\nimport math\nfrom utils.evals import calc_iou_tf\nfrom utils.evals import decode, encode\n\nclass FreeAnchorLoss(object):\n def __init__(self, cfg):\n self.cfg = cfg\n self.xywh_weights = (10.0, 10.0, 5.0, 5.0)\n self.bbox_xform_clip = math.log(1000. / 16)\n \n def matched_box_prob(self, indices, labels, object_box_prob_select, len_anchors, nums_classes):\n labels = tf.expand_dims(labels, axis=-1)\n s = tf.shape(object_box_prob_select)\n nonzero_box_prob = tf.where(tf.equal(labels, tf.cast(tf.gather(indices, 0), tf.float32)), object_box_prob_select, tf.zeros(s))\n nonzero_box_prob = tf.reduce_max(nonzero_box_prob, axis=0)\n indices_f = tf.transpose(tf.gather(indices, [1,0]), (1,0))\n image_box_prob = tf.sparse.SparseTensor(indices_f, nonzero_box_prob, dense_shape=((len_anchors, nums_classes)))\n image_box_prob = tf.sparse.to_dense(image_box_prob, validate_indices=False)\n return image_box_prob\n \n def dismatched_box_prob(self, len_anchors, nums_classes):\n return tf.zeros((len_anchors, nums_classes))\n \n def forward(self, anchors, box_cls, box_regression, bboxs, batch_labels, batch_img_size, bboxs_num):\n box_cls_flattened, box_regression_flattened = [], []\n for box_cls_per_level, box_regression_per_level in zip(\n box_cls, box_regression\n ):\n cls_shape = tf.shape(box_cls_per_level)\n _, H, W, A = cls_shape[0], cls_shape[1], cls_shape[2], cls_shape[3]\n C = self.cfg.num_classes\n N = self.cfg.batch_size\n box_cls_per_level = tf.reshape(box_cls_per_level, shape=[N, -1, C])\n box_regression_per_level = tf.reshape(box_regression_per_level, shape=[N, -1, 4]) #$$$$$$$$$$$$$$$$$\n box_cls_flattened.append(box_cls_per_level)\n box_regression_flattened.append(box_regression_per_level)\n \n box_cls = tf.concat(box_cls_flattened, axis=1)\n box_regression_cat = tf.concat(box_regression_flattened, axis=1)\n anchors = tf.concat(anchors, axis=0)\n cls_prob = tf.nn.sigmoid(box_cls)\n anchor_shape = tf.shape(anchors)\n box_prob, positive_losses = [], []\n \n for i in range(N):\n box = tf.gather(bboxs[i], tf.range(0, bboxs_num[i], 1))\n labels = tf.gather(batch_labels[i], tf.range(0, bboxs_num[i], 1))\n cls_prob_ = cls_prob[i]\n \n box_localization = decode(box_regression_cat[i], anchors, self.xywh_weights, self.bbox_xform_clip)\n ious = calc_iou_tf(box, box_localization)\n \n t1 = self.cfg.bbox_threshold\n t2 = tf.clip_by_value(tf.expand_dims(tf.reduce_max(ious, axis=[1]), axis=-1), t1+1e-12, float('inf'))\n object_box_prob = tf.clip_by_value((ious - t1) / (t2 - t1), 0, 1)\n \n oh_labels = tf.one_hot(tf.cast(labels, tf.int64), tf.cast(tf.reduce_max(labels, 0) + 1, dtype=tf.int32))\n oh_labels = tf.transpose(oh_labels, perm=(1,0))\n object_cls_box_prob = tf.expand_dims(tf.transpose(object_box_prob, perm=(1,0)), axis=1) * oh_labels\n object_cls_box_prob = tf.transpose(object_cls_box_prob, perm=(2,1,0))\n \n indices = tf.reduce_sum(object_cls_box_prob, axis=0)\n indices = tf.transpose(tf.where(indices > 0), (1,0))\n \n object_box_prob_select = tf.gather(object_box_prob, indices[1], axis=1)\n image_box_prob = tf.cond(tf.equal(tf.size(indices), 0), \n lambda : self.dismatched_box_prob(anchor_shape[0], self.cfg.num_classes),\n lambda : self.matched_box_prob(indices, labels, object_box_prob_select,\n anchor_shape[0], self.cfg.num_classes))\n box_prob.append(image_box_prob)\n \n match_quality_matrix = calc_iou_tf(box, anchors)\n matched = tf.nn.top_k(match_quality_matrix, self.cfg.pre_anchor_topk, sorted=False).indices\n \n index_ = tf.range(0, tf.shape(labels)[0], 1)\n label_index = tf.transpose(tf.concat([[index_, tf.cast(labels, tf.int32)]], axis=0), (1,0))\n cls_prob_tmp = tf.gather(cls_prob_, indices=matched, axis=0)\n cls_prob_tmp = tf.transpose(cls_prob_tmp, (0,2,1))\n matched_cls_prob = tf.gather_nd(cls_prob_tmp, indices = label_index) #checked\n \n matched_object_targets = encode(tf.expand_dims(box, axis=1), tf.gather(anchors, indices=matched, axis=0), self.xywh_weights)\n retinanet_regression_loss = smooth_l1_loss(tf.gather(box_regression_cat[i], matched, axis=0),\n matched_object_targets,\n self.cfg.bbox_reg_weight, self.cfg.bbox_reg_beta)\n matched_box_prob = tf.exp(-retinanet_regression_loss)\n positive_losses.append(positive_bag_loss(matched_cls_prob * matched_box_prob, dims=1))\n \n positive_numels = tf.reduce_sum(bboxs_num)\n positive_loss = tf.reduce_sum(tf.concat(positive_losses, axis=0)) / tf.cast(tf.maximum(1, tf.cast(positive_numels, tf.int32)), tf.float32)\n box_prob = tf.stack(box_prob)\n negative_loss = focal_loss(cls_prob * (1 - box_prob), self.cfg.focal_loss_gamma) \\\n / tf.cast(tf.maximum(1, tf.cast(positive_numels * self.cfg.pre_anchor_topk, tf.int32)), tf.float32)\n \n return positive_loss * self.cfg.focal_loss_alpha + negative_loss * (1 - self.cfg.focal_loss_alpha)\n \ndef tensor2sparse(tensor):\n arr_idx = tf.where(tf.not_equal(tensor, 0))\n arr_sparse = tf.SparseTensor(arr_idx, tf.gather_nd(tensor, arr_idx), tensor.get_shape())\n return arr_sparse\n\ndef smooth_l1_loss(pred, target, weight, beta):\n val = target - pred\n abs_val = tf.abs(val)\n return weight * tf.reduce_sum(tf.where(abs_val < beta, 0.5 / beta * tf.pow(val, 2), (abs_val - 0.5 * beta)), axis=-1)\n\ndef positive_bag_loss(logits, dims):\n weight = 1.0 / tf.clip_by_value(1 - logits, 1e-12, float('inf'))\n weight_div = tf.reduce_sum(weight, axis=dims)\n weight = tf.transpose(tf.transpose(weight, (1,0)) / weight_div, (1,0))\n bag_prob = tf.reduce_sum((weight * logits), axis=dims)\n return tf.keras.backend.binary_crossentropy(tf.ones_like(bag_prob), bag_prob)\n \ndef focal_loss(logits, gamma):\n #count focal loss for negative_loss\n logits_ = tf.pow(logits, gamma)\n bce_loss = tf.keras.backend.binary_crossentropy(tf.zeros_like(logits), logits)\n return tf.reduce_sum(bce_loss * logits_)\n\n" ]
[ [ "tensorflow.concat", "tensorflow.zeros", "tensorflow.reduce_sum", "tensorflow.stack", "tensorflow.cast", "tensorflow.where", "tensorflow.gather", "tensorflow.nn.top_k", "tensorflow.nn.sigmoid", "tensorflow.gather_nd", "tensorflow.shape", "tensorflow.pow", "tensorflow.exp", "tensorflow.zeros_like", "tensorflow.sparse.SparseTensor", "tensorflow.size", "tensorflow.clip_by_value", "tensorflow.not_equal", "tensorflow.reduce_max", "tensorflow.sparse.to_dense", "tensorflow.transpose", "tensorflow.range", "tensorflow.reshape", "tensorflow.ones_like", "tensorflow.expand_dims", "tensorflow.abs" ] ]
singhgargi/SEM2
[ "d0f260e10a3e34067b8c2d73abdeae36e5fa55bb" ]
[ "sem/utils.py" ]
[ "import os\nimport sys\nimport traceback\nimport numpy as np\nfrom functools import wraps\nfrom multiprocessing import Process, Queue\n\n\ndef unroll_data(x, t=1):\n \"\"\"\n This function is used by recurrent neural nets to do back-prop through time.\n\n Unrolls a data_set for with time-steps, truncated for t time-steps\n appends t-1 D-dimensional zero vectors at the beginning.\n\n Parameters:\n x: array, shape (N, D) or shape (D,)\n\n t: int\n time-steps to truncate the unroll\n\n output\n ------\n\n X_unrolled: array, shape (N-1, t, D)\n\n \"\"\"\n if np.ndim(x) == 2:\n n, d = np.shape(x)\n elif np.ndim(x):\n n, d = 1, np.shape(x)[0]\n x = np.reshape(x, (1, d))\n\n x_unrolled = np.zeros((n, t, d))\n\n # append a t-1 blank (zero) input patterns to the beginning\n data_set = np.concatenate([np.zeros((t - 1, d)), x])\n\n for ii in range(n):\n x_unrolled[ii, :, :] = data_set[ii: ii + t, :]\n\n return x_unrolled\n\n# precompute for speed (doesn't really help but whatever)\nlog_2pi = np.log(2.0 * np.pi)\n\ndef fast_mvnorm_diagonal_logprob(x, variances):\n \"\"\"\n Assumes a zero-mean mulitivariate normal with a diagonal covariance function\n\n Parameters:\n\n x: array, shape (D,)\n observations\n\n variances: array, shape (D,)\n Diagonal values of the covariance function\n\n output\n ------\n\n log-probability: float\n\n \"\"\"\n return -0.5 * (log_2pi * np.shape(x)[0] + np.sum(np.log(variances) + (x**2) / variances ))\n\n\ndef get_prior_scale(df, target_variance):\n \"\"\"\n This function solves for the scale parameter need for a scaled inverse chi-squard \n distribution, give degrees of freedom (df) and the desired variance (i.e. the \n mode of the distribution, is this function is intended to determine the prior over\n a Guassian variance).\n \n The mode of a scaled-inverse chi-squared is defined:\n (see Gelman, et al., Bayesian Data Analysis 2004)\n\n mode(theta) = df / (df + 2) * scale\n\n hense, if we set mode(theta) to our target, then the scale is\n\n scale = target_variance * (df + 2) / df\n\n \"\"\"\n return target_variance * (df + 2) / df\n\ndef delete_object_attributes(myobj):\n # take advantage of mutability here\n while myobj.__dict__.items():\n attr = [k for k in myobj.__dict__.keys()][0]\n myobj.__delattr__(attr)\n \n\ndef processify(func):\n '''Decorator to run a function as a process.\n Be sure that every argument and the return value\n is *pickable*.\n The created process is joined, so the code does not\n run in parallel.\n\n Credit: I took this function from Marc Schlaich's github:\n https://gist.github.com/schlamar/2311116\n '''\n\n def process_func(q, *args, **kwargs):\n try:\n ret = func(*args, **kwargs)\n except Exception:\n ex_type, ex_value, tb = sys.exc_info()\n error = ex_type, ex_value, ''.join(traceback.format_tb(tb))\n ret = None\n else:\n error = None\n\n q.put((ret, error))\n\n # register original function with different name\n # in sys.modules so it is pickable\n process_func.__name__ = func.__name__ + 'processify_func'\n setattr(sys.modules[__name__], process_func.__name__, process_func)\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n q = Queue()\n p = Process(target=process_func, args=[q] + list(args), kwargs=kwargs)\n p.start()\n ret, error = q.get()\n p.join()\n\n if error:\n ex_type, ex_value, tb_str = error\n message = '%s (in subprocess)\\n%s' % (ex_value.message, tb_str)\n raise ex_type(message)\n\n return ret\n return wrapper" ]
[ [ "numpy.log", "numpy.reshape", "numpy.ndim", "numpy.shape", "numpy.zeros" ] ]
filipeRmlh/Trabalho_Final_AD_2020_2
[ "eed64b1d383674a8212de570aaa6e1ec46f6bd52" ]
[ "trabalho_final/events/start_request.py" ]
[ "from ..request_data import RequestData\nfrom .event import Event\nfrom numpy import random\nfrom .timeout import Timeout\nfrom .client_2_cache import Client2Cache\nfrom ..config import Config\n\n\n\nclass StartRequest(Event):\n request_id = 0\n\n\n def __init__(self, config, timestamp=0, cache_list=[], max_requests = 10000):\n request_data = RequestData(\n request_id=StartRequest.request_id,\n content=random.randint(1,config.contentSize+1)\n )\n super().__init__(request_data, timestamp, config)\n self.cache_list = cache_list\n StartRequest.request_id += 1\n self.max_requests = max_requests\n\n\n def new_timestamp(self, timestamp, config):\n new_time = timestamp + self.exp_delay(config.userRequestRate)\n self.request_data.init_timestamp = new_time\n return new_time\n\n\n def handle_event(self, timeline):\n timeout = Timeout(self.request_data, \n self.timestamp, self.config)\n timeline.insert(timeout)\n\n for cache in self.cache_list:\n cache_request = Client2Cache(\n self.request_data, self.timestamp, self.config, cache)\n timeline.insert(cache_request)\n\n if self.request_data.request_id < self.max_requests-1:\n next_request = StartRequest(\n self.config,\n self.timestamp,\n self.cache_list,\n self.max_requests\n )\n timeline.insert(next_request)\n" ]
[ [ "numpy.random.randint" ] ]
josephnoir/indexing
[ "99f6a02c22451d0db204731a6c53ed56ad751365" ]
[ "evaluation/plot01.py" ]
[ "#!/usr/local/bin/python3\n\n# import modules\n#import matplotlib.pyplot as plt\n#import numpy as np\n#from sys import argv\n#script, filename = argv\n#txt = open(filename)\n#print(txt.read())\n#plt.plot([1,2,3,4])\n#plt.ylabel('some numbers')\n#plt.show();\n\nimport csv\nfrom sys import argv\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\n\nscript, filename = argv\n\nf = open(filename, 'r')\nreader = csv.reader(f, delimiter=',')\nrows = [x for x in reader]\nalgos = [row[0] for row in rows]\nnums = [row[1:] for row in rows]\n#nums.pop() # remove total value\ndel nums[-1]\n#print(algos)\n#print(nums)\n\nN = len(nums[0])\n#print(N)\n\nind = np.arange(N)\ndata = np.array(nums, dtype='float')\n\np = plt.subplot(111)\n\nrects = []\nfor i in range(data.shape[0]):\n r = p.barh(ind, data[i], height = 0.2, left = np.sum(data[:i], axis = 0), label = algos[i])\n rects.append(r)\n\ncount = 0\nfor rs in rects:\n for r in rs:\n width = r.get_width()\n p.text(r.get_x() + width / 2, r.get_y() + 0.05, '%d' % count,\n ha='center',va='bottom')\n count += 1\n\n\np.yaxis.set_visible(False)\nticks = ticker.FuncFormatter(lambda x, pos: '{0:g}'.format(x / 1000))\np.xaxis.set_major_formatter(ticks)\np.spines['right'].set_visible(False)\np.spines['top'].set_visible(False)\np.spines['left'].set_visible(False)\n\nbox = p.get_position()\np.set_position([box.x0, box.y0 + box.height * 0.2, box.width, box.height * 0.1])\nhandles, labels = p.get_legend_handles_labels()\n# for h in handles:\n# print(h)\nplt.legend(handles, labels, loc='upper center', bbox_to_anchor=(0.5,-1.5), frameon=False, shadow=False, ncol=4)\n\nplSize = plt.gcf()\n#print(plSize)\n\nplt.xlabel('Time [ms]')\nplt.title(filename.replace('.txt', ''))\nplt.show()\n" ]
[ [ "matplotlib.pyplot.legend", "numpy.arange", "matplotlib.pyplot.gcf", "matplotlib.pyplot.subplot", "matplotlib.pyplot.xlabel", "numpy.array", "numpy.sum", "matplotlib.pyplot.show" ] ]
SabevAtGitHub/qspreadsheet
[ "29127dd6f38573c7ede7680cf8f4852368fb2c38" ]
[ "qspreadsheet/_ndx.py" ]
[ "import logging\nimport sys\nimport os\nfrom typing import Dict\nfrom numpy.core.fromnumeric import alltrue\nimport pandas as pd\nfrom PySide2.QtCore import *\nfrom PySide2.QtGui import *\nfrom PySide2.QtWidgets import *\n\nfrom qspreadsheet.common import DF, SER, pandas_obj_insert_rows, pandas_obj_remove_rows\nfrom qspreadsheet import resources_rc\n\nlogger = logging.getLogger(__name__)\n\n\nclass _Ndx():\n VIRTUAL_COUNT = 1\n\n def __init__(self, index: pd.Index) -> None:\n i_index = range(index.size)\n self._data = self._make_index_data_for(i_index)\n self.is_mutable = False\n self.count_virtual = 0\n\n @property\n def count_committed(self) -> int:\n \"\"\"Row count of 'committed' data rows, excluding `in progress` and `virtual` rows, if any\"\"\"\n return self._data.index.size - self.count_in_progress - self.count_virtual\n\n @property\n def count(self) -> int:\n \"\"\"Row count, excluding `virtual` rows, if any\"\"\"\n return self._data.index.size + self.count_virtual\n\n @property\n def _size(self) -> int:\n \"\"\"Row count of `committed` + `in_progress` + `virtual` rows\"\"\"\n return self._data.index.size\n\n @property\n def count_in_progress(self) -> int:\n \"\"\"Row count of the `in progress` rows\"\"\"\n return self.in_progress_mask.sum()\n\n @property\n def in_progress_mask(self) -> SER:\n \"\"\"`pd.Series[bool]` with the rows/columns in progress\"\"\"\n return self._data['in_progress']\n\n @property\n def disabled_mask(self) -> SER:\n \"\"\"`pd.Series[bool]` with the disabled rows/columns\"\"\"\n return self._data['disabled']\n\n def set_disabled_mask(self, index, value: bool):\n self._data.loc[index, 'disabled'] = value\n\n @property\n def non_nullable_mask(self) -> SER:\n \"\"\"`pd.Series[bool]` with the disabled rows/columns\"\"\"\n return self._data['non_nullable']\n\n def set_non_nullable(self, index, value: bool):\n self._data.loc[index, 'non_nullable'] = value\n\n def set_disabled_in_progress(self, index, count: int):\n self._data.loc[index, 'disabled_in_progress_count'] = count\n self._update_in_progress(index)\n\n def set_non_nullable_in_progress(self, index, count: int):\n self._data.loc[index, 'non_nullable_in_progress_count'] = count\n self._update_in_progress(index)\n\n def reduce_disabled_in_progress(self, index):\n self._data.loc[index, 'disabled_in_progress_count'] -= 1\n self._update_in_progress(index)\n\n def reduce_non_nullable_in_progress(self, index):\n self._data.loc[index, 'non_nullable_in_progress_count'] -= 1\n self._update_in_progress(index)\n\n def _update_in_progress(self, index):\n self._data.loc[index, 'in_progress'] = (\n self._data.loc[index, 'disabled_in_progress_count'] +\n self._data.loc[index, 'non_nullable_in_progress_count'] > 0)\n\n def insert(self, at_index: int, count: int):\n \"\"\"Inserts rows/columns into the index data\"\"\"\n # set new index as 'not in progress' by default\n index = range(at_index, at_index + count)\n new_rows = self._make_index_data_for(index)\n self._data = pandas_obj_insert_rows(\n obj=self._data, at_index=at_index, new_rows=new_rows)\n\n def remove(self, at_index: int, count: int):\n \"\"\"Removes rows/columns into the index data\"\"\"\n self._data = pandas_obj_remove_rows(\n self._data, at_index, count)\n \n def is_virtual(self, index: int) -> bool:\n return self.count_virtual \\\n and index >= self._data.index.size\n\n @property\n def virtual_enabled(self) -> bool:\n self.count_virtual == self.VIRTUAL_COUNT\n\n @staticmethod\n def _make_index_data_for(index) -> DF:\n '''Default 'in progress' `DataFrame` to manage the index'''\n return pd.DataFrame(\n data={'in_progress': False, 'disabled': False, 'non_nullable': False,\n 'non_nullable_in_progress_count': 0, 'disabled_in_progress_count': 0},\n index=index)" ]
[ [ "pandas.DataFrame" ] ]
YuLin-code/MP-MIM
[ "0871a27d1717dc6f5dad623c6721824b104c3bb8" ]
[ "RESEPT/Embedding_Ground_Truth_Quality_Rank.py" ]
[ "import numpy as np\r\nimport pandas as pd\r\nimport os\r\nfrom sklearn.cluster import KMeans\r\nfrom sklearn.metrics.cluster import adjusted_rand_score\r\nimport argparse\r\n\r\n\r\nparser = argparse.ArgumentParser(description='Embedding_Ground_Truth_Quality_Rank_')\r\nparser.add_argument('--sampleName', type=str, default='151507')\r\nargs = parser.parse_args()\r\n\r\n\r\nif __name__ == '__main__':\r\n sample = args.sampleName\r\n meta_folder_path = os.path.abspath('./meta_data_folder/metaData_brain_16_coords') \r\n embedding_folder_path = os.path.abspath('./RESEPT_embedding_folder')\r\n output_folder_path = os.path.abspath('./Embedding_Ground_Truth_Quality_Rank_'+sample+'/')\r\n if not os.path.exists(output_folder_path):\r\n os.makedirs(output_folder_path)\r\n \r\n knn_distanceList=['euclidean']\r\n PEalphaList = ['0.1','0.2','0.3', '0.5', '1.0', '1.2', '1.5','2.0']\r\n zdimList = ['3','10', '16','32', '64', '128', '256']\r\n ####sample_list\r\n sample_list = [ '151507','151508', '151509', '151510', '151669', '151670', '151671', '151672', '151673', '151674', '151675', '151676','18-64','2-5', '2-8', 'T4857']\r\n letter_list = [ 'a','b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l','m', 'n', 'o', 'p']\r\n count_init = sample_list.index(sample)\r\n count = 56*count_init\r\n letter = letter_list[count_init]\r\n \r\n embedding_name_list = []\r\n ari_result_list = []\r\n n_clusters_num = 7\r\n \r\n if sample=='151669' or sample=='151670' or sample=='151671' or sample=='151672':\r\n n_clusters_num = 5\r\n if sample=='2-8':\r\n n_clusters_num = 6\r\n \r\n for i in range(len(PEalphaList)):\r\n for j in range((len(zdimList))):\r\n count = count + 1\r\n embedding_root_path = '/'+sample+'_embedding_raw/'+letter+'_'+str(count)+'_outputdir-3S-'+sample+'_raw_EM1_resolution0.3_euclidean_dummy_add_PEalpha'+str(PEalphaList[i])+'_k6_NA_zdim'+str(zdimList[j])+'/'+sample+'_raw_6_euclidean_NA_dummy_add_'+str(PEalphaList[i])+'_intersect_160_GridEx19_embedding.csv'\r\n meta_df = pd.read_csv(meta_folder_path+'/'+sample+'_humanBrain_metaData.csv')\r\n embedding_df = pd.read_csv(embedding_folder_path+embedding_root_path)\r\n embedding_name = sample+'_'+letter+'_'+str(count)+'_raw_res0.3_euclidean_NA_dummy_add_PEalpha'+str(PEalphaList[i])+'_k6_NA_zdim'+str(zdimList[j])\r\n embedding_name_list.append(embedding_name)\r\n \r\n X = embedding_df[['embedding0','embedding1','embedding2']].values\r\n #X = embedding_df.iloc[:,1:4].values\r\n print(X.shape)\r\n kmeans = KMeans(n_clusters=n_clusters_num, random_state=0).fit(X)\r\n kmeans_label = kmeans.labels_\r\n print(kmeans_label)\r\n \r\n ground_truth_init_np = np.array(meta_df['benmarklabel'])\r\n ground_truth_label_np = np.zeros((len(ground_truth_init_np),))\r\n \r\n if sample == '2-5' or sample == '2-8' or sample == '18-64' or sample == 'T4857':\r\n for k in range(len(ground_truth_init_np)):\r\n if ground_truth_init_np[k] == 'Layer 1':\r\n ground_truth_label_np[k] = 1\r\n if ground_truth_init_np[k] == 'Layer 2':\r\n ground_truth_label_np[k] = 2\r\n if ground_truth_init_np[k] == 'Layer 3':\r\n ground_truth_label_np[k] = 3\r\n if ground_truth_init_np[k] == 'Layer 4':\r\n ground_truth_label_np[k] = 4\r\n if ground_truth_init_np[k] == 'Layer 5':\r\n ground_truth_label_np[k] = 5\r\n if ground_truth_init_np[k] == 'Layer 6':\r\n ground_truth_label_np[k] = 6\r\n if ground_truth_init_np[k] == 'White matter' or ground_truth_init_np[k] == 'Noise' or ground_truth_init_np[k] is np.NAN:\r\n ground_truth_label_np[k] = 0\r\n if sample == '151507' or sample == '151508' or sample == '151509' or sample == '151510' or sample == '151669' or sample == '151670' or sample == '151671' or sample == '151672' or sample == '151673' or sample == '151674' or sample == '151675' or sample == '151676':\r\n for k in range(len(ground_truth_init_np)):\r\n if ground_truth_init_np[k] == 'Layer1':\r\n ground_truth_label_np[k] = 1\r\n if ground_truth_init_np[k] == 'Layer2':\r\n ground_truth_label_np[k] = 2\r\n if ground_truth_init_np[k] == 'Layer3':\r\n ground_truth_label_np[k] = 3\r\n if ground_truth_init_np[k] == 'Layer4':\r\n ground_truth_label_np[k] = 4\r\n if ground_truth_init_np[k] == 'Layer5':\r\n ground_truth_label_np[k] = 5\r\n if ground_truth_init_np[k] == 'Layer6':\r\n ground_truth_label_np[k] = 6\r\n if ground_truth_init_np[k] == 'WM' or ground_truth_init_np[k] is np.NAN:\r\n ground_truth_label_np[k] = 0\r\n print(ground_truth_label_np)\r\n ari = adjusted_rand_score(kmeans_label , ground_truth_label_np)\r\n ari_result_list.append(ari)\r\n order_num_list = []\r\n for l in range(len(ari_result_list)):\r\n order_num_list.append(l+1)\r\n order_num_pd = pd.DataFrame({'Order_num':order_num_list})\r\n ARI_k_means_result = pd.DataFrame({'Name':embedding_name_list,'ARI_k_means':ari_result_list})\r\n ARI_k_means_result_sort = ARI_k_means_result.sort_values(by=['ARI_k_means'], ascending=False)\r\n ARI_k_means_result_sort.index = order_num_list\r\n ARI_k_means_result_sort.to_csv(output_folder_path+'/'+sample+'_raw_embedding_ground_truth_rank.csv')\r\n\r\n" ]
[ [ "pandas.read_csv", "sklearn.cluster.KMeans", "pandas.DataFrame", "numpy.array", "sklearn.metrics.cluster.adjusted_rand_score" ] ]
Shadoward/splsensors
[ "308875e807df0924de81f2ef9ec2b8fc1cd04a89" ]
[ "src/splsensors/geodetic.py" ]
[ "#!/usr/bin/python\n#\n# --------------------------------------------------------------------- \n# | |\n# |\tgeodetic.cc - a collection of geodetic functions |\n# |\tPaul Kennedy May 2016 |\n# |\tJim Leven - Dec 99 |\n# | |\n# | originally from: |\n# | http://wegener.mechanik.tu-darmstadt.de/GMT-Help/Archiv/att-8710/Geodetic_py |\n# |ftp://pdsimage2.wr.usgs.gov/pub/pigpen/Python/Geodetic_py.py\t\t|\n# | |\n# --------------------------------------------------------------------- \n# \n# \n# ------------------------------------------------------------------------------\n# | Algrothims from Geocentric Datum of Australia Technical Manual\t |\n# | \t\t\t\t\t\t\t\t |\n# | http://www.anzlic.org.au/icsm/gdatum/chapter4.html\t \t\t|\n# | \t\t\t\t\t\t\t\t |\n# | This page last updated 11 May 1999 \t \t\t\t|\n# | \t\t\t\t\t\t\t\t |\n# | Computations on the Ellipsoid\t \t\t\t|\n# | \t\t\t\t\t\t\t\t |\n# | There are a number of formulae that are available \t\t|\n# | to calculate accurate geodetic positions, \t\t \t\t|\n# | azimuths and distances on the ellipsoid.\t\t\t |\n# | \t\t\t\t\t\t\t\t |\n# | Vincenty's formulae (Vincenty, 1975) may be used \t\t |\n# | for lines ranging from a few cm to nearly 20,000 km, \t |\n# | with millimetre accuracy. \t\t\t\t\t |\n# | The formulae have been extensively tested \t\t |\n# | for the Australian region, by comparison with results \t\t|\n# | from other formulae (Rainsford, 1955 & Sodano, 1965). \t |\n# |\t\t\t\t\t\t\t\t |\n# | * Inverse problem: azimuth and distance from known \t \t\t|\n# |\t\t\tlatitudes and longitudes \t\t\t |\n# | * Direct problem: Latitude and longitude from known \t |\n# |\t\t\tposition, azimuth and distance. \t\t |\n# | * Sample data \t\t\t\t\t\t |\n# | * Excel spreadsheet \t\t\t \t|\n# | \t\t\t\t\t\t\t\t |\n# | Vincenty's Inverse formulae\t\t\t\t \t|\n# | Given: latitude and longitude of two points \t\t|\n# |\t\t\t(latitude1, longitude1 and latitude2, longitude2), \t|\n# | Calculate: the ellipsoidal distance (s) and \t \t\t|\n# | forward and reverse azimuths between the points (alpha1Tp2, alpha21).\t|\n# |\t\t\t\t\t\t\t\t\t |\n# ------------------------------------------------------------------------------\n\nimport math\nimport numpy as np\n\ndef medfilt (x, k):\n \"\"\"Apply a length-k median filter to a 1D array x.\n Boundaries are extended by repeating endpoints.\n \"\"\"\n assert k % 2 == 1, \"Median filter length must be odd.\"\n assert x.ndim == 1, \"Input must be one-dimensional.\"\n k2 = (k - 1) // 2\n y = np.zeros ((len (x), k), dtype=x.dtype)\n y[:,k2] = x\n for i in range (k2):\n j = k2 - i\n y[j:,i] = x[:-j]\n y[:j,i] = x[0]\n y[:-j,-(i+1)] = x[j:]\n y[-j:,-(i+1)] = x[-1]\n return np.median (y, axis=1)\n \n# from: http://mathforum.org/library/drmath/view/62034.html\ndef calculateRangeBearingFromGridPosition(easting1, northing1, easting2, northing2):\n \"\"\"given 2 east, north, pairs, compute the range and bearing\"\"\"\n\n dx = easting2-easting1\n dy = northing2-northing1\n\n bearing = 90 - (180/math.pi)*math.atan2(northing2-northing1, easting2-easting1)\n return (math.sqrt((dx*dx)+(dy*dy)), bearing)\n\n\n# taken frm http://gis.stackexchange.com/questions/76077/how-to-create-points-based-on-the-distance-and-bearing-from-a-survey-point\ndef calculateGridPositionFromRangeBearing(easting, northing, distance, bearing):\n \"\"\"given an east, north, range and bearing, compute a new coordinate on the grid\"\"\"\n point = (easting, northing)\n angle = 90 - bearing\n bearing = math.radians(bearing)\n angle = math.radians(angle)\n\n # polar coordinates\n dist_x = distance * math.cos(angle)\n dist_y = distance * math.sin(angle)\n\n xfinal = point[0] + dist_x\n yfinal = point[1] + dist_y\n\n # direction cosines\n cosa = math.cos(angle)\n cosb = math.cos(bearing)\n xfinal = point[0] + (distance * cosa)\n yfinal = point[1] + (distance * cosb)\n \n return [xfinal, yfinal]\n\ndef calculateRangeBearingFromGeographicals(longitude1, latitude1, longitude2, latitude2 ) :\n \"\"\" \n Returns s, the distance between two geographic points on the ellipsoid\n and alpha1, alpha2, the forward and reverse azimuths between these points.\n lats, longs and azimuths are in decimal degrees, distance in metres \n\n Returns ( s, alpha1Tp2, alpha21 ) as a tuple\n \"\"\"\n f = 1.0 / 298.257223563\t\t# WGS84\n a = 6378137.0 \t\t\t# metres\n\n if (abs( latitude2 - latitude1 ) < 1e-8) and ( abs( longitude2 - longitude1) < 1e-8 ) :\n return 0.0, 0.0, 0.0\n\n piD4 = math.atan( 1.0 )\n two_pi = piD4 * 8.0\n\n latitude1 = latitude1 * piD4 / 45.0\n longitude1 = longitude1 * piD4 / 45.0\t\t# unfortunately lambda is a key word!\n latitude2 = latitude2 * piD4 / 45.0\n longitude2 = longitude2 * piD4 / 45.0\n\n b = a * (1.0 - f)\n\n TanU1 = (1-f) * math.tan( latitude1 )\n TanU2 = (1-f) * math.tan( latitude2 )\n\n U1 = math.atan(TanU1)\n U2 = math.atan(TanU2)\n\n lembda = longitude2 - longitude1\n last_lembda = -4000000.0\t\t# an impossibe value\n omega = lembda\n\n # Iterate the following equations, \n # until there is no significant change in lembda \n\n while ( last_lembda < -3000000.0 or lembda != 0 and abs( (last_lembda - lembda)/lembda) > 1.0e-9 ) :\n\n sqr_sin_sigma = pow( math.cos(U2) * math.sin(lembda), 2) + \\\n pow( (math.cos(U1) * math.sin(U2) - \\\n math.sin(U1) * math.cos(U2) * math.cos(lembda) ), 2 )\n\n Sin_sigma = math.sqrt( sqr_sin_sigma )\n\n Cos_sigma = math.sin(U1) * math.sin(U2) + math.cos(U1) * math.cos(U2) * math.cos(lembda)\n \n sigma = math.atan2( Sin_sigma, Cos_sigma )\n\n Sin_alpha = math.cos(U1) * math.cos(U2) * math.sin(lembda) / math.sin(sigma)\n alpha = math.asin( Sin_alpha )\n\n Cos2sigma_m = math.cos(sigma) - (2 * math.sin(U1) * math.sin(U2) / pow(math.cos(alpha), 2) )\n\n C = (f/16) * pow(math.cos(alpha), 2) * (4 + f * (4 - 3 * pow(math.cos(alpha), 2)))\n\n last_lembda = lembda\n\n lembda = omega + (1-C) * f * math.sin(alpha) * (sigma + C * math.sin(sigma) * \\\n (Cos2sigma_m + C * math.cos(sigma) * (-1 + 2 * pow(Cos2sigma_m, 2) )))\n\n u2 = pow(math.cos(alpha),2) * (a*a-b*b) / (b*b)\n\n A = 1 + (u2/16384) * (4096 + u2 * (-768 + u2 * (320 - 175 * u2)))\n\n B = (u2/1024) * (256 + u2 * (-128+ u2 * (74 - 47 * u2)))\n\n delta_sigma = B * Sin_sigma * (Cos2sigma_m + (B/4) * \\\n (Cos_sigma * (-1 + 2 * pow(Cos2sigma_m, 2) ) - \\\n (B/6) * Cos2sigma_m * (-3 + 4 * sqr_sin_sigma) * \\\n (-3 + 4 * pow(Cos2sigma_m,2 ) )))\n\n s = b * A * (sigma - delta_sigma)\n\n alpha1Tp2 = math.atan2( (math.cos(U2) * math.sin(lembda)), \\\n (math.cos(U1) * math.sin(U2) - math.sin(U1) * math.cos(U2) * math.cos(lembda)))\n\n alpha21 = math.atan2( (math.cos(U1) * math.sin(lembda)), \\\n (-math.sin(U1) * math.cos(U2) + math.cos(U1) * math.sin(U2) * math.cos(lembda)))\n\n if ( alpha1Tp2 < 0.0 ) : \n alpha1Tp2 = alpha1Tp2 + two_pi\n if ( alpha1Tp2 > two_pi ) : \n alpha1Tp2 = alpha1Tp2 - two_pi\n\n alpha21 = alpha21 + two_pi / 2.0\n if ( alpha21 < 0.0 ) : \n alpha21 = alpha21 + two_pi\n if ( alpha21 > two_pi ) : \n alpha21 = alpha21 - two_pi\n\n alpha1Tp2 = alpha1Tp2 * 45.0 / piD4\n alpha21 = alpha21 * 45.0 / piD4\n return s, alpha1Tp2, alpha21 \n\n # END of Vincenty's Inverse formulae \n\n\n#-------------------------------------------------------------------------------\n# Vincenty's Direct formulae\t\t\t\t\t\t\t|\n# Given: latitude and longitude of a point (latitude1, longitude1) and \t\t\t|\n# the geodetic azimuth (alpha1Tp2) \t\t\t\t\t\t|\n# and ellipsoidal distance in metres (s) to a second point,\t\t\t|\n# \t\t\t\t\t\t\t\t\t\t|\n# Calculate: the latitude and longitude of the second point (latitude2, longitude2) \t|\n# and the reverse azimuth (alpha21).\t\t\t\t\t\t|\n# \t\t\t\t\t\t\t\t\t\t|\n#-------------------------------------------------------------------------------\n\ndef calculateGeographicalPositionFromRangeBearing(latitude1, longitude1, alpha1To2, s ) :\n \"\"\"\n Returns the lat and long of projected point and reverse azimuth\n given a reference point and a distance and azimuth to project.\n lats, longs and azimuths are passed in decimal degrees\n\n Returns ( latitude2, longitude2, alpha2To1 ) as a tuple \n\n \"\"\"\n f = 1.0 / 298.257223563\t\t# WGS84\n a = 6378137.0 \t\t\t# metres\n\n piD4 = math.atan( 1.0 )\n two_pi = piD4 * 8.0\n\n latitude1 = latitude1 * piD4 / 45.0\n longitude1 = longitude1 * piD4 / 45.0\n alpha1To2 = alpha1To2 * piD4 / 45.0\n if ( alpha1To2 < 0.0 ) : \n alpha1To2 = alpha1To2 + two_pi\n if ( alpha1To2 > two_pi ) : \n alpha1To2 = alpha1To2 - two_pi\n\n b = a * (1.0 - f)\n\n TanU1 = (1-f) * math.tan(latitude1)\n U1 = math.atan( TanU1 )\n sigma1 = math.atan2( TanU1, math.cos(alpha1To2) )\n Sinalpha = math.cos(U1) * math.sin(alpha1To2)\n cosalpha_sq = 1.0 - Sinalpha * Sinalpha\n\n u2 = cosalpha_sq * (a * a - b * b ) / (b * b)\n A = 1.0 + (u2 / 16384) * (4096 + u2 * (-768 + u2 * \\\n (320 - 175 * u2) ) )\n B = (u2 / 1024) * (256 + u2 * (-128 + u2 * (74 - 47 * u2) ) )\n\n # Starting with the approximation\n sigma = (s / (b * A))\n\n last_sigma = 2.0 * sigma + 2.0\t# something impossible\n\n # Iterate the following three equations \n # until there is no significant change in sigma \n\n # two_sigma_m , delta_sigma\n while ( abs( (last_sigma - sigma) / sigma) > 1.0e-9 ) :\n two_sigma_m = 2 * sigma1 + sigma\n\n delta_sigma = B * math.sin(sigma) * ( math.cos(two_sigma_m) \\\n + (B/4) * (math.cos(sigma) * \\\n (-1 + 2 * math.pow( math.cos(two_sigma_m), 2 ) - \\\n (B/6) * math.cos(two_sigma_m) * \\\n (-3 + 4 * math.pow(math.sin(sigma), 2 )) * \\\n (-3 + 4 * math.pow( math.cos (two_sigma_m), 2 ))))) \\\n\n last_sigma = sigma\n sigma = (s / (b * A)) + delta_sigma\n\n latitude2 = math.atan2 ( (math.sin(U1) * math.cos(sigma) + math.cos(U1) * math.sin(sigma) * math.cos(alpha1To2) ), \\\n ((1-f) * math.sqrt( math.pow(Sinalpha, 2) + \\\n pow(math.sin(U1) * math.sin(sigma) - math.cos(U1) * math.cos(sigma) * math.cos(alpha1To2), 2))))\n\n lembda = math.atan2( (math.sin(sigma) * math.sin(alpha1To2 )), (math.cos(U1) * math.cos(sigma) - \\\n math.sin(U1) * math.sin(sigma) * math.cos(alpha1To2)))\n\n C = (f/16) * cosalpha_sq * (4 + f * (4 - 3 * cosalpha_sq ))\n\n omega = lembda - (1-C) * f * Sinalpha * \\\n (sigma + C * math.sin(sigma) * (math.cos(two_sigma_m) + \\\n C * math.cos(sigma) * (-1 + 2 * math.pow(math.cos(two_sigma_m),2) )))\n\n longitude2 = longitude1 + omega\n\n alpha21 = math.atan2 ( Sinalpha, (-math.sin(U1) * math.sin(sigma) + \\\n math.cos(U1) * math.cos(sigma) * math.cos(alpha1To2)))\n\n alpha21 = alpha21 + two_pi / 2.0\n if ( alpha21 < 0.0 ) :\n alpha21 = alpha21 + two_pi\n if ( alpha21 > two_pi ) :\n alpha21 = alpha21 - two_pi\n\n latitude2 = latitude2 * 45.0 / piD4\n longitude2 = longitude2 * 45.0 / piD4\n alpha21 = alpha21 * 45.0 / piD4\n\n return latitude2, longitude2, alpha21 \n\n # END of Vincenty's Direct formulae\n\n#--------------------------------------------------------------------------\n# Notes: \n# \n# * \"The inverse formulae may give no solution over a line \n# \tbetween two nearly antipodal points. This will occur when \n# \tlembda ... is greater than pi in absolute value\". (Vincenty, 1975)\n# \n# * In Vincenty (1975) L is used for the difference in longitude, \n# \thowever for consistency with other formulae in this Manual, \n# \tomega is used here. \n# \n# * Variables specific to Vincenty's formulae are shown below, \n# \tothers common throughout the manual are shown in the Glossary. \n# \n# \n# alpha = Azimuth of the geodesic at the equator\n# U = Reduced latitude\n# lembda = Difference in longitude on an auxiliary sphere (longitude1 & longitude2 \n# \t\tare the geodetic longitudes of points 1 & 2)\n# sigma = Angular distance on a sphere, from point 1 to point 2\n# sigma1 = Angular distance on a sphere, from the equator to point 1\n# sigma2 = Angular distance on a sphere, from the equator to point 2\n# sigma_m = Angular distance on a sphere, from the equator to the \n# \t\tmidpoint of the line from point 1 to point 2\n# u, A, B, C = Internal variables\n# \n# \n# Sample Data\n# \n# Flinders Peak\n# -37 57'03.72030\"\n# 144 25'29.52440\"\n# Buninyong\n# -37 39'10.15610\"\n# 143 55'35.38390\"\n# Ellipsoidal Distance\n# 54,972.271 m\n# \n# Forward Azimuth\n# 306 52'05.37\"\n# \n# Reverse Azimuth\n# 127 10'25.07\"\n# \n# \n#*******************************************************************\n\n# Test driver\n\nif __name__ == \"__main__\" :\n\n f = 1.0 / 298.257223563\t\t# WGS84\n a = 6378137.0 \t\t\t# metres\n\n print (\"\\n Ellipsoidal major axis = %12.3f metres\\n\" % ( a ))\n print (\"\\n Inverse flattening = %15.9f\\n\" % ( 1.0/f ))\n\n print (\"\\n Test Flinders Peak to Buninyon\")\n print (\"\\n ****************************** \\n\")\n latitude1 = -(( 3.7203 / 60. + 57) / 60. + 37 )\n longitude1 = ( 29.5244 / 60. + 25) / 60. + 144\n print (\"Flinders Peak = %12.6f, %13.6f \\n\" % ( latitude1, longitude1 ))\n deg = int(latitude1)\n min = int(abs( ( latitude1 - deg) * 60.0 ))\n sec = abs(latitude1 * 3600 - deg * 3600) - min * 60\n print (\" Flinders Peak = %3i\\xF8%3i\\' %6.3f\\\", \" % ( deg, min, sec ),)\n deg = int(longitude1)\n min = int(abs( ( longitude1 - deg) * 60.0 ))\n sec = abs(longitude1 * 3600 - deg * 3600) - min * 60\n print (\" %3i\\xF8%3i\\' %6.3f\\\" \\n\" % ( deg, min, sec ))\n\n latitude2 = -(( 10.1561 / 60. + 39) / 60. + 37 )\n longitude2 = ( 35.3839 / 60. + 55) / 60. + 143\n print (\"\\n Buninyon = %12.6f, %13.6f \\n\" % ( latitude2, longitude2 ))\n\n deg = int(latitude2)\n min = int(abs( ( latitude2 - deg) * 60.0 ))\n sec = abs(latitude2 * 3600 - deg * 3600) - min * 60\n print (\" Buninyon = %3i\\xF8%3i\\' %6.3f\\\", \" % ( deg, min, sec ),)\n deg = int(longitude2)\n min = int(abs( ( longitude2 - deg) * 60.0 ))\n sec = abs(longitude2 * 3600 - deg * 3600) - min * 60\n print (\" %3i\\xF8%3i\\' %6.3f\\\" \\n\" % ( deg, min, sec ))\n\n dist, alpha1Tp2, alpha21 = vinc_dist ( f, a, latitude1, longitude1, latitude2, longitude2 )\n\n print (\"\\n Ellipsoidal Distance = %15.3f metres\\n should be 54972.271 m\\n\" % ( dist ))\n print (\"\\n Forward and back azimuths = %15.6f, %15.6f \\n\" % ( alpha1Tp2, alpha21 ))\n deg = int(alpha1Tp2)\n min = int( abs(( alpha1Tp2 - deg) * 60.0 ) )\n sec = abs(alpha1Tp2 * 3600 - deg * 3600) - min * 60\n print (\" Forward azimuth = %3i\\xF8%3i\\' %6.3f\\\"\\n\" % ( deg, min, sec ))\n deg = int(alpha21)\n min = int(abs( ( alpha21 - deg) * 60.0 ))\n sec = abs(alpha21 * 3600 - deg * 3600) - min * 60\n print (\" Reverse azimuth = %3i\\xF8%3i\\' %6.3f\\\"\\n\" % ( deg, min, sec ))\n\n\n # Test the direct function */\n latitude1 = -(( 3.7203 / 60. + 57) / 60. + 37 )\n longitude1 = ( 29.5244 / 60. + 25) / 60. + 144\n dist = 54972.271\n alpha1Tp2 = ( 5.37 / 60. + 52) / 60. + 306\n latitude2 = longitude2 = 0.0\n alpha21 = 0.0\n\n latitude2, longitude2, alpha21 = vincentyDirect (latitude1, longitude1, alpha1Tp2, dist )\n\n print (\"\\n Projected point =%11.6f, %13.6f \\n\" % ( latitude2, longitude2 ))\n deg = int(latitude2)\n min = int(abs( ( latitude2 - deg) * 60.0 ))\n sec = abs( latitude2 * 3600 - deg * 3600) - min * 60\n print (\" Projected Point = %3i\\xF8%3i\\' %6.3f\\\", \" % ( deg, min, sec ),)\n deg = int(longitude2)\n min = int(abs( ( longitude2 - deg) * 60.0 ))\n sec = abs(longitude2 * 3600 - deg * 3600) - min * 60\n print (\" %3i\\xF8%3i\\' %6.3f\\\"\\n\" % ( deg, min, sec ))\n print (\" Should be Buninyon \\n\" )\n print (\"\\n Reverse azimuth = %10.6f \\n\" % ( alpha21 ))\n deg = int(alpha21)\n min = int(abs( ( alpha21 - deg) * 60.0 ))\n sec = abs(alpha21 * 3600 - deg * 3600) - min * 60\n print (\" Reverse azimuth = %3i\\xF8%3i\\' %6.3f\\\"\\n\\n\" % ( deg, min, sec ))\n\n#*******************************************************************\n\ndef est_dist( latitude1, longitude1, latitude2, longitude2 ) :\n \"\"\" \n\n Returns an estimate of the distance between two geographic points\n This is a quick and dirty vinc_dist \n which will generally estimate the distance to within 1%\n Returns distance in metres\n\n \"\"\"\n f = 1.0 / 298.257223563\t\t# WGS84\n a = 6378137.0 \t\t\t# metres\n\n piD4 = 0.785398163397 \n\n latitude1 = latitude1 * piD4 / 45.0\n longitude1 = longitude1 * piD4 / 45.0\n latitude2 = latitude2 * piD4 / 45.0\n longitude2 = longitude2 * piD4 / 45.0\n\n c = math.cos((latitude2+latitude1)/2.0)\n\n return math.sqrt( pow(math.fabs(latitude2-latitude1), 2) + \\\n pow(math.fabs(longitude2-longitude1)*c, 2) ) * a * ( 1.0 - f + f * c )\n # END of rough estimate of the distance." ]
[ [ "numpy.median" ] ]
jackyoung96/gym-pybullet-drones
[ "770a22a4f1102f08ce4617560fab06ba405715bd" ]
[ "custom/learn.py" ]
[ "\"\"\"Script demonstrating the use of `gym_pybullet_drones`' Gym interface.\n\nClass TakeoffAviary is used as a learning env for the A2C and PPO algorithms.\n\nExample\n-------\nIn a terminal, run as:\n\n $ python learn.py\n\nNotes\n-----\nThe boolean argument --rllib switches between `stable-baselines3` and `ray[rllib]`.\nThis is a minimal working example integrating `gym-pybullet-drones` with \nreinforcement learning libraries `stable-baselines3` and `ray[rllib]`.\nIt is not meant as a good/effective learning example.\n\n\"\"\"\nimport time\nimport argparse\nimport gym\nimport numpy as np\nimport yaml\nimport os\nimport shutil\n\nfrom torchsummary import summary\nimport torch\n\nimport stable_baselines3\nfrom stable_baselines3 import A2C, SAC\nfrom stable_baselines3.a2c import MlpPolicy\nfrom stable_baselines3.common.env_checker import check_env\nfrom stable_baselines3.common.callbacks import CallbackList, CheckpointCallback, EvalCallback\n\nfrom gym_pybullet_drones.utils.Logger import Logger\nfrom gym_pybullet_drones.envs.single_agent_rl.TakeoffAviary import TakeoffAviary\nfrom gym_pybullet_drones.envs.BaseAviary import DroneModel, Physics, BaseAviary\nfrom gym_pybullet_drones.envs.single_agent_rl.BaseSingleAgentAviary import ActionType, ObservationType, BaseSingleAgentAviary\nfrom gym_pybullet_drones.utils.utils import sync, str2bool\nfrom envs.singleEnv.customEnv import customAviary\n\nfrom utils import configCallback, saveCallback\n\n\ndef make_env(gui=False,record=False, **kwargs):\n env = gym.make(id=\"takeoff-aviary-v0\", # arbitrary environment that has state normalization and clipping\n drone_model=DroneModel.CF2X,\n initial_xyzs=np.array([[0.0,0.0,2.0]]),\n initial_rpys=np.array([[0.0,0.0,0.0]]),\n physics=Physics.PYB_GND_DRAG_DW,\n freq=240,\n aggregate_phy_steps=1,\n gui=gui,\n record=record, \n obs=ObservationType.KIN,\n act=ActionType.RPM)\n env = customAviary(env, **kwargs)\n\n return env\n\ndef net_arch(cfg):\n # network architecture\n net_dict = cfg['model']['policy_kwargs']['net_arch']\n if 'share' in net_dict:\n share = net_dict.pop('share')\n cfg['model']['policy_kwargs']['net_arch'] = [*share, net_dict]\n\n # Activation function\n actv_ftn = cfg['model']['policy_kwargs']['activation_fn']\n cfg['model']['policy_kwargs']['activation_fn'] = getattr(torch.nn, actv_ftn)\n\n return cfg\n\nif __name__ == \"__main__\":\n\n #### Define and parse (optional) arguments for the script ##\n with open('config/train.yaml','r') as f:\n cfg = yaml.safe_load(f)\n\n #### Check the environment's spaces ########################\n env = make_env(gui=False,record=False,**cfg['env_kwargs'])\n check_env(env,\n warn=True,\n skip_render_check=True\n )\n\n #### Train the model #######################################\n cfg = net_arch(cfg)\n RL_algo = getattr(stable_baselines3, cfg['RL_algo'])\n model = RL_algo(\"MlpPolicy\",\n env,\n verbose=1,\n **cfg['model']\n )\n try:\n summary(model.actor, env.observation_space.shape)\n summary(model.critic, [env.observation_space.shape, env.action_space.shape])\n except:\n pass\n\n if cfg['train']['pretrained'] is not None:\n if os.path.exists(os.path.join(cfg['train']['pretrained'], \"final_model.zip\")):\n model.set_parameters(os.path.join(cfg['train']['pretrained'], \"final_model\"))\n print(\"final model loaded\")\n else:\n flist = os.listdir(os.path.join(cfg['train']['pretrained'], \"ckpt\"))\n n = np.sort([int(f.split('_')[1]) for f in flist])[-1]\n model.set_parameters(os.path.join(cfg['train']['pretrained'], \"ckpt\",\"ckpt_%d_steps\"%n))\n print(\"ckpt %d model loaded\"%n)\n\n # save callbacks\n savecallback = saveCallback(save_freq=cfg['train']['save_freq'],\n name_prefix='ckpt')\n configcallback = configCallback() # configuration save\n callback = CallbackList([savecallback, configcallback])\n\n # model learning\n try: \n model.learn(cfg['train']['total_timesteps'], callback=callback) # Typically not enough\n model.save(os.path.join(model._logger.dir, \"final_model\"))\n except KeyboardInterrupt:\n model.save(os.path.join(model._logger.dir, \"final_model\"))\n" ]
[ [ "numpy.array" ] ]
kozzion/tensealstat
[ "93314a8721ec6b1a2bea466aaeadb06cbf5a4f63" ]
[ "test/student_t_repeated_measures.py" ]
[ "import sys\nimport os\nfrom scipy import stats\nimport tenseal as ts\nimport numpy as np\nfrom scipy.stats import t\n\nsys.path.append(os.path.abspath('../../tensealstat'))\n\nfrom tensealstat.tools_context import ToolsContext as tc\nfrom tensealstat.algebra.algebra_numpy import AlgebraNumpy\nfrom tensealstat.algebra.algebra_tenseal import AlgebraTenseal\nfrom tensealstat.statistic.student_t_repeated_measures import StudentTRepeatedMeasures\n\n\nstatistic_generator = StudentTRepeatedMeasures()\n#\n# This test follows Larsen Marc 4Th edition P790\n#\n\n# 1 done by the key holder\ncontext = tc.get_context_default()\nalgebra_tenseal = AlgebraTenseal(context)\nalgebra_numpy = AlgebraNumpy()\n\n# 2 done by the data holders\nsample_0 = np.array([14.6, 17.3, 10.9, 12.8, 16.6, 12.2, 11.2, 15.4, 14.8, 16.2])\nsample_1 = np.array([13.8, 15.4, 11.3, 11.6, 16.4, 12.6, 11.8, 15.0, 14.4, 15.0])\nlist_sample = [sample_0, sample_1]\nlist_sample_encrypted = [algebra_tenseal.encrypt_vector(sample) for sample in list_sample] \n\n# 3 done by the agregator\nstatistic_encrypted = statistic_generator.encrypt_statistic(algebra_tenseal, list_sample_encrypted)\n\n# 4 done by the key holder\nt_statistic, degrees_of_freedom = statistic_generator.decrypt_statistic(algebra_tenseal, statistic_encrypted)\np_value = t.cdf(t_statistic, degrees_of_freedom)\n# p value should be between about 0.94 and 0.95\nprint('via tensealstat')\nprint('t_statistic: ' + str(t_statistic))\nprint('p_value: ' + str(p_value))\n\n# Test version\nstatistic_encrypted = statistic_generator.encrypt_statistic(algebra_numpy, list_sample)\nt_statistic, degrees_of_freedom = statistic_generator.decrypt_statistic(algebra_numpy, statistic_encrypted)\np_value = t.cdf(t_statistic, degrees_of_freedom)\nprint('')\nprint('via tensealstattest')\nprint('t_statistic: ' + str(t_statistic))\nprint('p_value: ' + str(p_value))\n\n# Scipy version\nt_statistic, p_value = stats.ttest_rel(sample_0 ,sample_1)\nprint('')\nprint('via scipy')\nprint('t_statistic: ' + str(t_statistic))\nprint('p_value: ' + str(1 - (p_value / 2)))\n" ]
[ [ "scipy.stats.t.cdf", "scipy.stats.ttest_rel", "numpy.array" ] ]
Verduxo/FreeGames
[ "e68bd3d2264b7f8c61af94889ceb5c682311efcc" ]
[ "FreeGames.py" ]
[ "import requests\nimport pandas as pd\n\ndef save(url):\n jsonF = open('db.json','w+')\n r= requests.get(url)\n jsonF.write(r.text)\n df = pd.read_json('db.json')\n df.to_csv (r'OpenWithLibreOffice.csv', index = None)\n \n \n \ndef todos():\n url = ' https://www.freetogame.com/api/games'\n seVienenCositas = 'https://www.freetogame.com/api/comingsoon'\n\n save(url)\n\n\n\ndef custom():\n try:\n plataforma = str(input('''\n╔══════════════════════════════════╗\n║ introduzca la plataforma deseada ║\n╠══════════════════════════════════╣\n║ pc[default] ║\n║ browser ║\n║ all ║\n╚══════════════════════════════════╝\n\n> '''))\n if plataforma == '':\n plataforma = 'pc'\n except ValueError:\n plataforma = 'pc'\n print('\\ndebido a un fallo, se escogera la opcion por defecto(pc)') \n \n \n tags = '''\n╔══════════════════════════════════════════════════════════════════════════╗\n║ categorias ║\n╠══════════════════════════════════════════════════════════════════════════╣\n║ all[default] ║\n║ all mmorpg shooter strategy moba racing ║\n║ sports social sandbox open-world survival pvp ║\n║ pve pixel voxel zombie tank turn-based ║\n║ space third-Person sailing top-down permadeath first-person ║\n║ mmofps 3d 2d sci-fi low-spec battle-royale ║\n║ anime fantasy fighting action-rpg flight ║\n║ military martial-arts mmorts horror tower-defense ║\n║ ║\n╚══════════════════════════════════════════════════════════════════════════╝\n'''\n\n print(tags+'\\n')\n try:\n tag = str(input('''\n\n╔══════════════════════════════════════════════════════════════════════════╗\n║ Elija las categorias.Puedes escoger más de una, separalas con un espacio ║\n╚══════════════════════════════════════════════════════════════════════════╝\n\n> '''))\n if not tag == 'all' and tag != '':\n tagFull = '&category='+tag\n else:\n tagFull = ''\n\n except ValueError:\n tagFull = ''\n print('\\ndebido a un fallo, se escogerá la opción por defecto(pc)') \n\n\n try:\n ordenar = str(input('''\n \n╔═════════════════════════════════════╗\n║ ¿como desea ordenar los resultados? ║\n╠═════════════════════════════════════╣\n║ popularidad [default] ║\n║ fecha de salida ║\n║ alfabeticamente ║\n║ relevancia ║\n╚═════════════════════════════════════╝\n\n> '''))\n if ordenar == 'popularidad':\n ordenar = 'popularity'\n \n elif ordenar == 'fecha de salida':\n ordenar = 'release-date'\n \n elif ordenar == 'alfabeticamente':\n ordenar = 'alphabetical'\n elif ordenar == 'relevance':\n ordenar = 'relevancia'\n else:\n ordenar = 'popularity'\n except ValueError:\n ordenar = 'popularity'\n print('\\ndebido a un fallo, se escogerá la opción por defecto(popularidad)') \n\n\n url= 'https://www.freetogame.com/api/games?platform='+plataforma+'&sort-by='+ordenar + tagFull\n \n #seVienenCositas = 'https://www.freetogame.com/api/comingsoon'\n\n r= requests.get(url)\n save(url)\n\n\ndef bannerDef():\n try: \n\n banner = int(input('''\n \n ╔════════════════════════════╗\n ║ Juegos Gratis ║\n ╠════════════════════════════╣\n ║ [1]todos los juegos gratis ║\n ║ [2]búsqueda personalizada ║\n ╚════════════════════════════╝\n\n > '''))\n if banner == 1:\n todos()\n elif banner == 2:\n custom()\n else:\n print('introduzca una respuesta valida')\n bannerDef()\n except ValueError:\n print('Introduzca una opción válida')\n bannerDef()\nbannerDef()" ]
[ [ "pandas.read_json" ] ]
AliDaVinci/ConnectFour
[ "82060777784616d11256a06e44222e91dd7baecf" ]
[ "Connect4.py" ]
[ "import numpy as np\nimport pygame\nimport sys\nimport math\n\nBLUE = (0,0,255)\nBLACK = (0,0,0,)\nRED = (255,0,0)\nYELLOW = (255,255,0)\n\nROW_COUNT = 6\nCOLUMN_COUNT = 7\n\ndef create_board():\n\tboard = np.zeros((ROW_COUNT,COLUMN_COUNT))\n\treturn board\n\ndef drop_piece(board, row, col, piece):\n\tboard[row][col] = piece\n\ndef is_valid_location(board, col):\n\treturn board[ROW_COUNT-1][col] == 0\n\ndef get_next_open_row(board, col):\n\tfor r in range(ROW_COUNT):\n\t\tif board[r][col] == 0:\n\t\t\treturn r\n\ndef print_board(board):\n\tprint(np.flip(board, 0))\n\ndef winning_move(board, piece):\n\t# Check horizontal locations for win \n\tfor c in range(COLUMN_COUNT-3):\n\t\tfor r in range(ROW_COUNT):\n\t\t\tif board[r][c] == piece and board[r][c+1] == piece and board [r][c+2] == piece and board[r][c+3] == piece:\n\t\t\t\treturn True\n\n\t# Check vertical locations for win\n\tfor c in range(COLUMN_COUNT):\n\t\tfor r in range(ROW_COUNT-3):\n\t\t\tif board[r][c] == piece and board[r+1][c] == piece and board [r+2][c] == piece and board[r+3][c] == piece:\n\t\t\t\treturn True\t\n\n\t# Check positively sloped diaganols\n\tfor c in range(COLUMN_COUNT-3):\n\t\tfor r in range(ROW_COUNT-3):\n\t\t\tif board[r][c] == piece and board[r+1][c+1] == piece and board [r+2][c+2] == piece and board[r+3][c+3] == piece:\n\t\t\t\treturn True\t\t\n\n\t# Check negatively sloped diaganols\n\tfor c in range(COLUMN_COUNT-3):\n\t\tfor r in range(3, ROW_COUNT):\n\t\t\tif board[r][c] == piece and board[r-1][c+1] == piece and board [r-2][c+2] == piece and board[r-3][c+3] == piece:\n\t\t\t\treturn True\n\ndef draw_board(board):\n\tfor c in range(COLUMN_COUNT):\n\t\tfor r in range(ROW_COUNT):\n\t\t\tpygame.draw.rect(screen, BLUE, (c*SQUARESIZE, r*SQUARESIZE+SQUARESIZE, SQUARESIZE,SQUARESIZE))\n\t\t\tpygame.draw.circle(screen, BLACK, (int(c*SQUARESIZE+SQUARESIZE/2), int(r*SQUARESIZE+SQUARESIZE+SQUARESIZE/2)), RADIUS)\n\t\t\t\n\t\tfor c in range(COLUMN_COUNT):\n\t\t\tfor r in range(ROW_COUNT):\n\t\t\t\tif board[r][c] == 1:\n\t\t\t\t\tpygame.draw.circle(screen, RED, (int(c*SQUARESIZE+SQUARESIZE/2), height-int(r*SQUARESIZE+SQUARESIZE/2)), RADIUS)\n\t\t\t\telif board[r][c] == 2:\n\t\t\t\t\tpygame.draw.circle(screen, YELLOW, (int(c*SQUARESIZE+SQUARESIZE/2), height-int(r*SQUARESIZE+SQUARESIZE/2)), RADIUS)\n\t\tpygame.display.update()\n\n\nboard = create_board()\nprint_board(board)\ngame_over = False\nturn = 0\n\npygame.init()\n\nSQUARESIZE = 100\n\nwidth = COLUMN_COUNT * SQUARESIZE\nheight = (ROW_COUNT+1) * SQUARESIZE\n\nsize = (width, height)\n\nRADIUS = int(SQUARESIZE/2 - 5)\n\nscreen = pygame.display.set_mode(size)\ndraw_board(board)\npygame.display.update()\n\nmyfont = pygame.font.SysFont(\"monospace\", 50)\n\nwhile not game_over: \n\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\tsys.exit()\n\n\t\t\tif event.type == pygame.MOUSEMOTION:\n\t\t\t\tpygame.draw.rect(screen, BLACK, (0,0, width, SQUARESIZE))\n\t\t\t\tposx = event.pos[0]\n\t\t\t\tif turn == 0:\n\t\t\t\t\tpygame.draw.circle(screen,RED,(posx, int(SQUARESIZE/2)), RADIUS)\n\t\t\t\telse:\n\t\t\t\t\tpygame.draw.circle(screen,YELLOW,(posx, int(SQUARESIZE/2)), RADIUS)\n\t\t\tpygame.display.update()\n\n\t\t\tif event.type == pygame.MOUSEBUTTONDOWN:\n\t\t\t\tpygame.draw.rect(screen, BLACK, (0,0, width, SQUARESIZE))\n\t\t\t\t# print(event.pos)\n\t\t\t\t# Ask for Player 1 Input\n\t\t\t\tif turn == 0:\n\t\t\t\t\tposx = event.pos[0]\n\t\t\t\t\tcol = int(math.floor(posx/SQUARESIZE))\n\n\t\t\t\t\tif is_valid_location(board, col):\n\t\t\t\t\t\trow = get_next_open_row(board, col)\n\t\t\t\t\t\tdrop_piece(board, row, col, 1)\n\n\t\t\t\t\t\tif winning_move(board, 1):\n\t\t\t\t\t\t\tlabel = myfont.render(\"Player 1 is Victorious!\", 1, RED)\n\t\t\t\t\t\t\tscreen.blit(label, (10,10))\n\t\t\t\t\t\t\tgame_over = True\n\n\n\t\t\t\t# Ask for Player 2 Input\n\t\t\t\telse:\n\t\t\t\t\tcol = posx = event.pos[0]\n\t\t\t\t\tcol = int(math.floor(posx/SQUARESIZE))\n\n\t\t\t\t\tif is_valid_location(board, col):\n\t\t\t\t\t\trow = get_next_open_row(board, col)\n\t\t\t\t\t\tdrop_piece(board, row, col, 2)\n\n\t\t\t\t\t\tif winning_move(board, 2):\n\t\t\t\t\t\t\tlabel = myfont.render(\"Player 2 is Victorious!\", 1, YELLOW)\n\t\t\t\t\t\t\tscreen.blit(label, (10,10))\n\t\t\t\t\t\t\tgame_over = True\n\n\t\t\t\tprint_board(board)\n\t\t\t\tdraw_board(board)\t\t\n\n\t\t\t\tturn += 1\n\t\t\t\tturn = turn % 2\n\n\t\t\t\tif game_over:\n\t\t\t\t\tpygame.time.wait(6000)\n" ]
[ [ "numpy.flip", "numpy.zeros" ] ]
luispedro/milksets
[ "84fc8cba4d4a87acf573ce562cd065b0ee37fadd" ]
[ "milksets/seeds/seeds.py" ]
[ "# -*- coding: utf-8 -*-\n# Copyright (C) 2012, Luis Pedro Coelho <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nfrom __future__ import division\nimport numpy as np\nfrom os.path import dirname\nfrom ..vtypes import continuous\nfrom ..utils import standard_properties, standard_classification_loader\n\n__all__ = ['load'] + standard_properties\n\nname = 'seeds'\nshort_name = 'Seeds'\nlong_name = 'Seeds Flower Data Set'\nreference = '''\\\nM. Charytanowicz, J. Niewczas, P. Kulczycki, P.A. Kowalski, S. Lukasik, S. Zak,\n'A Complete Gradient Clustering Algorithm for Features Analysis of X-ray\nImages', in: Information Technologies in Biomedicine, Ewa Pietka, Jacek Kawa\n(eds.), Springer-Verlag, Berlin-Heidelberg, 2010, pp. 15-24.\n'''\nurl = 'http://archive.ics.uci.edu/ml/datasets/seeds'\ndata_source = 'UCI'\nlabel_names = ['Kama', 'Rosa', 'Canadian']\n\nmissing_values = False\nvalue_types = [\n continuous('area'), \n continuous('perimeter'),\n continuous('compactness'),\n continuous('length of kernel'),\n continuous('width of kernel'),\n continuous('asymmetry coefficien'),\n continuous('length of kernel groove'),\n ]\n\n@standard_classification_loader(name)\ndef load(force_contiguous=True):\n from bz2 import BZ2File\n base = dirname(__file__) + '/data/'\n data = np.loadtxt(base+'seeds_dataset.txt.gz')\n features = data[:,:-1]\n labels = data[:,-1]\n labels -= 1\n labels = labels.astype(int)\n if force_contiguous:\n features = features.copy()\n labels = labels.copy()\n return features, labels\n\n" ]
[ [ "numpy.loadtxt" ] ]
dksifoua/NMT
[ "a651d5f957868ab4879d028060fdbec3e09263cb" ]
[ "scripts/evaluate.py" ]
[ "import os\nimport argparse\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\nimport torch\nfrom nmt.config.global_config import GlobalConfig\nfrom nmt.config.train_config import TrainConfig\nfrom nmt.config.dataset_config import DatasetConfig\nfrom nmt.processing.processing import load_dataset, load_field\nfrom nmt.train.trainer import Trainer\nfrom nmt.train.train_utils import count_parameters\nfrom nmt.utils.logger import Logger\nfrom nmt.utils.utils import seed_everything\nfrom scripts.train import init_seq_to_seq_lstm_model, init_seq_to_seq_bi_lstm_model, \\\n init_seq_to_seq_luong_attn_model, init_seq_to_seq_badhanau_attn_model\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Train a model')\n parser.add_argument('--model', action='store', type=str, required=True,\n help='The model name (SeqToSeqLSTM, SeqToSeqBiLSTM, SeqToSeqLuongAttentionLSTM, '\n 'SeqToSeqBadhanauAttentionLSTM).')\n parser.add_argument('--src_lang', action='store', type=str, default=DatasetConfig.SRC_LANG,\n help=f'The source language. Default: {DatasetConfig.SRC_LANG}.')\n parser.add_argument('--dest_lang', action='store', type=str, default=DatasetConfig.DEST_LANG,\n help=f'The destination language. Default: {DatasetConfig.DEST_LANG}.')\n args = parser.parse_args()\n seed_everything(GlobalConfig.SEED)\n logger = Logger(name=f'Evaluate{args.model}')\n\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n logger.info(f'Device: {device}')\n\n module = getattr(__import__('nmt.model'), 'model')\n\n logger.info('Load Fields')\n src_field = load_field(filename=f'{args.src_lang}')\n dest_field = load_field(filename=f'{args.dest_lang}')\n\n logger.info('Init the model')\n if args.model == 'SeqToSeqLSTM':\n model = init_seq_to_seq_lstm_model(_module=module, _src_vocab_size=len(src_field.vocab),\n _dest_vocab_size=len(dest_field.vocab), _device=device)\n elif args.model == 'SeqToSeqBiLSTM':\n model = init_seq_to_seq_bi_lstm_model(_module=module, _src_vocab_size=len(src_field.vocab),\n _dest_vocab_size=len(dest_field.vocab), _device=device)\n elif args.model == 'SeqToSeqLuongAttentionLSTM':\n model = init_seq_to_seq_luong_attn_model(_module=module, _src_vocab_size=len(src_field.vocab),\n _dest_vocab_size=len(dest_field.vocab), _device=device,\n _pad_index=dest_field.vocab.stoi[dest_field.pad_token])\n elif args.model == 'SeqToSeqBadhanauAttentionLSTM':\n model = init_seq_to_seq_badhanau_attn_model(_module=module, _src_vocab_size=len(src_field.vocab),\n _dest_vocab_size=len(dest_field.vocab), _device=device,\n _pad_index=dest_field.vocab.stoi[dest_field.pad_token])\n else:\n raise NotImplementedError(f'The {args.model} has not been implemented!')\n model.to(device)\n logger.info(str(model))\n logger.info(f'Number of parameters of the model: {count_parameters(model):,}')\n\n logger.info('Load datasets')\n train_dataset = load_dataset(filename='train', src_field=src_field, dest_field=dest_field, logger=logger)\n valid_dataset = load_dataset(filename='valid', src_field=src_field, dest_field=dest_field, logger=logger)\n test_dataset = load_dataset(filename='test', src_field=src_field, dest_field=dest_field, logger=logger)\n\n logger.info('Init trainer')\n trainer = Trainer(model=model, optimizer=None, criterion=None, src_field=src_field, dest_field=dest_field,\n train_data=train_dataset, valid_data=valid_dataset, test_data=test_dataset, logger=logger)\n\n logger.info('Build data iterators')\n trainer.build_data_iterator(batch_size=TrainConfig.BATCH_SIZE, device=device)\n\n logger.info('Start the model evaluation...')\n attention = trainer.model.__class__.__name__.__contains__('Attention')\n for dataset_name in ['valid', 'test']:\n indexes = np.random.choice(len(getattr(trainer, f'{dataset_name}_data').examples), size=20, replace=False)\n for beam_size in [1, 5]:\n hypotheses, references, sources, bleu4, pred_logps, attention_weights = trainer.evaluate(\n dataset_name='valid', beam_size=beam_size, max_len=DatasetConfig.MAX_LEN, device=device\n )\n logger.info(f'BLEU-4: {bleu4*100:.3f}% on {dataset_name} dataset with beam_size={beam_size}')\n for index in indexes:\n logger.info(f'Source: {\" \".join(sources[index])}')\n logger.info(f'Ground truth translation: {\" \".join(references[index])}')\n logger.info(f'Predicted translation: {\" \".join(hypotheses[index])}')\n logger.info('='*100)\n if attention:\n fig = plt.figure(figsize=(10, 10))\n ax = fig.add_subplot(111)\n cax = ax.matshow(attention_weights[index])\n fig.colorbar(cax)\n ax.tick_params(labelsize=15)\n ax.set_xticklabels(sources[index], rotation=45)\n ax.set_yticklabels(hypotheses[index])\n ax.xaxis.set_major_locator(ticker.MultipleLocator(1))\n ax.yaxis.set_major_locator(ticker.MultipleLocator(1))\n plt.savefig(os.path.join(GlobalConfig.IMG_PATH,\n f'{args.model}_{dataset_name}_index_{index}_beam_size_{beam_size}.png'))\n plt.show()\n # TODO\n # Error analysis\n" ]
[ [ "matplotlib.ticker.MultipleLocator", "matplotlib.pyplot.show", "torch.cuda.is_available", "matplotlib.pyplot.figure" ] ]
luwangg/digital_rf
[ "e5bc28606af2d5071c1fd1561595f816ff335085" ]
[ "python/tools/thor.py" ]
[ "#!python\n# ----------------------------------------------------------------------------\n# Copyright (c) 2017 Massachusetts Institute of Technology (MIT)\n# All rights reserved.\n#\n# Distributed under the terms of the BSD 3-clause license.\n#\n# The full license is in the LICENSE file, distributed with this software.\n# ----------------------------------------------------------------------------\n\"\"\"Record data from synchronized USRPs in Digital RF format.\"\"\"\nfrom __future__ import absolute_import, division, print_function\n\nimport argparse\nimport math\nimport os\nimport re\nimport sys\nimport time\nfrom ast import literal_eval\nfrom datetime import datetime, timedelta\nfrom fractions import Fraction\nfrom itertools import chain, cycle, islice, repeat\nfrom subprocess import call\nfrom textwrap import TextWrapper, dedent, fill\n\nimport digital_rf as drf\nimport gr_digital_rf as gr_drf\nimport numpy as np\nimport pytz\nfrom gnuradio import filter as grfilter\nfrom gnuradio import blocks, gr, uhd\n\n\ndef equiripple_lpf(\n cutoff=0.45, transition_width=0.1, attenuation=80, pass_ripple=None,\n):\n \"\"\"Get taps for an equiripple low-pass filter.\n\n All frequencies given must be normalized in the range [0, 1], with 1\n corresponding to the Nyquist frequency (Fs/2).\n\n Parameters\n ----------\n\n cutoff : float\n Normalized cutoff frequency (beginning of transition band).\n\n transition_width : float\n Normalized width (in frequency) of transition region from pass band to\n stop band.\n\n attenuation : float\n Attenuation of the stop band in dB.\n\n pass_ripple : float | None\n Maximum ripple in the pass band in dB. If None, the attenuation value\n is used.\n\n\n Returns\n -------\n\n taps : array_like\n Type I (even order) FIR low-pass filter taps meeting the given\n requirements.\n\n \"\"\"\n if pass_ripple is None:\n pass_ripple = attenuation\n\n if cutoff <= 0:\n errstr = 'Cutoff ({0}) must be strictly greater than zero.'\n raise ValueError(errstr.format(cutoff))\n\n if transition_width <= 0:\n errstr = 'Transition width ({0}) must be strictly greater than zero.'\n raise ValueError(errstr.format(transition_width))\n\n if cutoff + transition_width >= 1:\n errstr = (\n 'Cutoff ({0}) + transition width ({1}) must be strictly less than'\n ' one, but it is {2}.'\n ).format(cutoff, transition_width, cutoff + transition_width)\n raise ValueError(errstr)\n\n # pm_remez arguments\n bands = [0, cutoff, cutoff + transition_width, 1]\n ampl = [1, 1, 0, 0]\n error_weight = [10**((pass_ripple - attenuation) / 20.0), 1]\n\n # get estimate for the filter order (Oppenheim + Schafer 2nd ed, 7.104)\n M = (((attenuation + pass_ripple) / 2.0 - 13)\n / 2.324 / (np.pi * transition_width))\n # round up to nearest even-order (Type I) filter\n M = int(np.ceil(M / 2.0)) * 2\n\n for attempts in range(20):\n # get taps for order M\n try:\n taps = np.asarray(grfilter.pm_remez(\n order=M, bands=bands, ampl=ampl, error_weight=error_weight,\n ))\n except RuntimeError:\n M = M + 2\n continue\n\n # calculate frequency response and get error from ideal\n nfft = 16 * len(taps)\n h = np.fft.fft(taps, nfft)\n w = np.fft.fftfreq(nfft, 0.5)\n\n passband = h[(np.abs(w) >= bands[0]) & (np.abs(w) <= bands[1])]\n stopband = h[(np.abs(w) >= bands[2]) & (np.abs(w) <= bands[3])]\n\n act_ripple = -20*np.log10(np.max(np.abs(ampl[0] - np.abs(passband))))\n act_atten = -20*np.log10(np.max(np.abs(ampl[2] - np.abs(stopband))))\n\n if act_ripple >= pass_ripple and act_atten >= attenuation:\n break\n else:\n M = M + 2\n else:\n errstr = (\n 'Could not calculate equiripple filter that meets requirements'\n 'after {0} attempts (final order {1}).'\n )\n raise RuntimeError(errstr.format(attempts, M))\n\n return taps\n\n\nclass Thor(object):\n \"\"\"Record data from synchronized USRPs in DigitalRF format.\"\"\"\n\n def __init__(\n self, datadir, verbose=True,\n # mainboard group (num: len of mboards)\n mboards=[], subdevs=['A:A'], clock_rates=[None],\n clock_sources=[''], time_sources=[''],\n # receiver group (apply to all)\n samplerate=1e6,\n dev_args=['recv_buff_size=100000000', 'num_recv_frames=512'],\n stream_args=[], tune_args=[],\n time_sync=True, wait_for_lock=True,\n stop_on_dropped=False, realtime=False, test_settings=True,\n # receiver channel group (num: matching channels from mboards/subdevs)\n centerfreqs=[100e6],\n lo_offsets=[0], lo_sources=[''], lo_exports=[None],\n dc_offsets=[False], iq_balances=[None],\n gains=[0], bandwidths=[0], antennas=[''],\n # output channel group (num: len of channel_names)\n channel_names=['ch0'], channels=[None], ch_samplerates=[None],\n ch_centerfreqs=[False], ch_scalings=[1.0], ch_nsubchannels=[1],\n ch_lpf_cutoffs=[0.9], ch_lpf_transition_widths=[0.2],\n ch_lpf_attenuations=[80.0], ch_lpf_pass_ripples=[None],\n ch_out_types=[None],\n # digital_rf group (apply to all)\n file_cadence_ms=1000, subdir_cadence_s=3600, metadata={}, uuid=None,\n ):\n options = locals()\n del options['self']\n op = self._parse_options(**options)\n self.op = op\n\n # test usrp device settings, release device when done\n if op.test_settings:\n if op.verbose:\n print('Initialization: testing device settings.')\n u = self._usrp_setup()\n del u\n\n # finalize options (for settings that depend on USRP setup)\n self._finalize_options()\n\n @staticmethod\n def _parse_options(**kwargs):\n \"\"\"Put all keyword options in a namespace and normalize them.\"\"\"\n op = argparse.Namespace(**kwargs)\n\n # check that subdevice specifications are unique per-mainboard\n for sd in op.subdevs:\n sds = sd.split()\n if len(set(sds)) != len(sds):\n errstr = (\n 'Invalid subdevice specification: \"{0}\". '\n 'Each subdevice specification for a given mainboard must '\n 'be unique.'\n )\n raise ValueError(errstr.format(sd))\n\n # get USRP cpu_format based on output type and decimation requirements\n processing_required = (\n any(sr is not None for sr in op.ch_samplerates) or\n any(cf is not False for cf in op.ch_centerfreqs) or\n any(s != 1 for s in op.ch_scalings) or\n any(nsch != 1 for nsch in op.ch_nsubchannels)\n )\n if (all(ot is None or ot == 'sc16' for ot in op.ch_out_types)\n and not processing_required):\n # with only sc16 output and no processing, can use sc16 as cpu\n # format and disable conversion\n op.cpu_format = 'sc16'\n op.ch_out_specs = [dict(\n convert=None,\n convert_kwargs=None,\n dtype=np.dtype([(str('r'), np.int16), (str('i'), np.int16)]),\n name='sc16',\n )]\n else:\n op.cpu_format = 'fc32'\n # get full specification for output types\n supported_out_types = {\n 'sc8': dict(\n convert='float_to_char',\n convert_kwargs=dict(vlen=2, scale=float(2**7-1)),\n dtype=np.dtype([(str('r'), np.int8), (str('i'), np.int8)]),\n name='sc8',\n ),\n 'sc16': dict(\n convert='float_to_short',\n convert_kwargs=dict(vlen=2, scale=float(2**15-1)),\n dtype=np.dtype(\n [(str('r'), np.int16), (str('i'), np.int16)]\n ),\n name='sc16',\n ),\n 'sc32': dict(\n convert='float_to_int',\n convert_kwargs=dict(vlen=2, scale=float(2**31-1)),\n dtype=np.dtype(\n [(str('r'), np.int32), (str('i'), np.int32)]\n ),\n name='sc32',\n ),\n 'fc32': dict(\n convert=None,\n convert_kwargs=None,\n dtype=np.dtype('complex64'),\n name='fc32',\n ),\n }\n supported_out_types[None] = supported_out_types['fc32']\n type_dicts = []\n for ot in op.ch_out_types:\n try:\n type_dict = supported_out_types[ot]\n except KeyError:\n errstr = (\n 'Output type {0} is not supported. Must be one of {1}.'\n ).format(ot, list(supported_out_types.keys()))\n raise ValueError(errstr)\n else:\n type_dicts.append(type_dict)\n op.ch_out_specs = type_dicts\n # replace out_types to fill in None values with type name\n op.ch_out_types = [os['name'] for os in op.ch_out_specs]\n\n # repeat mainboard arguments as necessary\n op.nmboards = len(op.mboards) if len(op.mboards) > 0 else 1\n for mb_arg in (\n 'subdevs', 'clock_rates', 'clock_sources', 'time_sources',\n ):\n val = getattr(op, mb_arg)\n mbval = list(islice(cycle(val), 0, op.nmboards))\n setattr(op, mb_arg, mbval)\n\n # get number of receiver channels by total number of subdevices over\n # all mainboards\n op.mboards_bychan = []\n op.subdevs_bychan = []\n op.mboardnum_bychan = []\n mboards = op.mboards if op.mboards else ['default']\n for mbnum, (mb, sd) in enumerate(zip(mboards, op.subdevs)):\n sds = sd.split()\n mbs = list(repeat(mb, len(sds)))\n mbnums = list(repeat(mbnum, len(sds)))\n op.mboards_bychan.extend(mbs)\n op.subdevs_bychan.extend(sds)\n op.mboardnum_bychan.extend(mbnums)\n\n # repeat receiver channel arguments as necessary\n op.nrchs = len(op.subdevs_bychan)\n for rch_arg in (\n 'antennas', 'bandwidths', 'centerfreqs', 'dc_offsets',\n 'iq_balances', 'lo_offsets', 'lo_sources', 'lo_exports', 'gains',\n ):\n val = getattr(op, rch_arg)\n rval = list(islice(cycle(val), 0, op.nrchs))\n setattr(op, rch_arg, rval)\n\n # repeat output channel arguments as necessary\n op.nochs = len(op.channel_names)\n for och_arg in (\n 'channels', 'ch_centerfreqs', 'ch_lpf_attenuations',\n 'ch_lpf_cutoffs', 'ch_lpf_pass_ripples',\n 'ch_lpf_transition_widths', 'ch_nsubchannels', 'ch_out_specs',\n 'ch_out_types', 'ch_samplerates', 'ch_scalings',\n ):\n val = getattr(op, och_arg)\n rval = list(islice(cycle(val), 0, op.nochs))\n setattr(op, och_arg, rval)\n\n # fill in unspecified (None) channels values\n rchannels = set(range(op.nrchs))\n ochannels = set(c for c in op.channels if c is not None)\n if not ochannels.issubset(rchannels):\n errstr = (\n 'Invalid channel specification. Output channel uses'\n ' non-existent receiver channel: {0}.'\n )\n raise ValueError(errstr.format(list(ochannels - rchannels)))\n avail = sorted(rchannels - ochannels)\n try:\n op.channels = [\n c if c is not None else avail.pop(0) for c in op.channels\n ]\n except IndexError:\n errstr = (\n 'No remaining receiver channels left to assign to unspecified'\n ' (None) output channel. You probably need to explicitly'\n ' specify the receiver channels to output.'\n )\n raise ValueError(errstr)\n unused_rchs = set(range(op.nrchs)) - set(op.channels)\n if unused_rchs:\n errstr = (\n 'Receiver channels {0} are unused in the output. Either'\n ' remove them from the mainboard/subdevice specification or'\n ' correct the output channel specification.'\n )\n raise ValueError(errstr.format(unused_rchs))\n\n # copy desired centerfreq from receiver to output channel if requested\n op.ch_centerfreqs = [\n op.centerfreqs[rch] if f in (None, True) else f\n for f, rch in zip(op.ch_centerfreqs, op.channels)\n ]\n\n # create device_addr string to identify the requested device(s)\n op.mboard_strs = []\n for n, mb in enumerate(op.mboards):\n if re.match(r'[^0-9]+=.+', mb):\n idtype, mb = mb.split('=')\n elif re.match(\n r'[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}', mb\n ):\n idtype = 'addr'\n elif (\n re.match(r'usrp[123]', mb) or re.match(r'b2[01]0', mb)\n or re.match(r'x3[01]0', mb)\n ):\n idtype = 'type'\n elif re.match(r'[0-9A-Fa-f]{1,}', mb):\n idtype = 'serial'\n else:\n idtype = 'name'\n if len(op.mboards) == 1:\n # do not use identifier numbering if only using one mainboard\n s = '{type}={mb}'.format(type=idtype, mb=mb.strip())\n else:\n s = '{type}{n}={mb}'.format(type=idtype, n=n, mb=mb.strip())\n op.mboard_strs.append(s)\n\n if op.verbose:\n opstr = dedent('''\\\n Main boards: {mboard_strs}\n Subdevices: {subdevs}\n Clock rates: {clock_rates}\n Clock sources: {clock_sources}\n Time sources: {time_sources}\n Sample rate: {samplerate}\n Device arguments: {dev_args}\n Stream arguments: {stream_args}\n Tune arguments: {tune_args}\n Antenna: {antennas}\n Bandwidth: {bandwidths}\n Frequency: {centerfreqs}\n LO frequency offset: {lo_offsets}\n LO source: {lo_sources}\n LO export: {lo_exports}\n Gain: {gains}\n DC offset: {dc_offsets}\n IQ balance: {iq_balances}\n Output channels: {channels}\n Output channel names: {channel_names}\n Output sample rate: {ch_samplerates}\n Output frequency: {ch_centerfreqs}\n Output scaling: {ch_scalings}\n Output subchannels: {ch_nsubchannels}\n Output type: {ch_out_types}\n Data dir: {datadir}\n Metadata: {metadata}\n UUID: {uuid}\n ''').strip().format(**op.__dict__)\n print(opstr)\n\n return op\n\n def _usrp_setup(self):\n \"\"\"Create, set up, and return USRP source object.\"\"\"\n op = self.op\n # create usrp source block\n op.otw_format = 'sc16'\n u = uhd.usrp_source(\n device_addr=','.join(chain(op.mboard_strs, op.dev_args)),\n stream_args=uhd.stream_args(\n cpu_format=op.cpu_format,\n otw_format=op.otw_format,\n channels=list(range(op.nrchs)),\n args=','.join(op.stream_args)\n ),\n )\n\n # set mainboard options\n for mb_num in range(op.nmboards):\n u.set_subdev_spec(op.subdevs[mb_num], mb_num)\n\n # set master clock rate\n clock_rate = op.clock_rates[mb_num]\n if clock_rate is not None:\n u.set_clock_rate(clock_rate, mb_num)\n op.clock_rates[mb_num] = u.get_clock_rate(mb_num)\n\n # set clock source\n clock_source = op.clock_sources[mb_num]\n if not clock_source and op.wait_for_lock:\n clock_source = 'external'\n if clock_source:\n try:\n u.set_clock_source(clock_source, mb_num)\n except RuntimeError:\n errstr = (\n \"Setting mainboard {0} clock_source to '{1}' failed.\"\n \" Must be one of {2}. If setting is valid, check that\"\n \" the source (REF) is operational.\"\n ).format(\n mb_num, clock_source, u.get_clock_sources(mb_num),\n )\n raise ValueError(errstr)\n op.clock_sources[mb_num] = u.get_clock_source(mb_num)\n\n # set time source\n time_source = op.time_sources[mb_num]\n if not time_source and op.time_sync:\n time_source = 'external'\n if time_source:\n try:\n u.set_time_source(time_source, mb_num)\n except RuntimeError:\n errstr = (\n \"Setting mainboard {0} time_source to '{1}' failed.\"\n \" Must be one of {2}. If setting is valid, check that\"\n \" the source (PPS) is operational.\"\n ).format(\n mb_num, time_source, u.get_time_sources(mb_num),\n )\n raise ValueError(errstr)\n op.time_sources[mb_num] = u.get_time_source(mb_num)\n\n # check for ref lock\n mbnums_with_ref = [\n mb_num for mb_num in range(op.nmboards)\n if 'ref_locked' in u.get_mboard_sensor_names(mb_num)\n ]\n if op.wait_for_lock and mbnums_with_ref:\n if op.verbose:\n sys.stdout.write('Waiting for reference lock...')\n sys.stdout.flush()\n timeout = 0\n if op.wait_for_lock is True:\n timeout_thresh = 30\n else:\n timeout_thresh = op.wait_for_lock\n while not all(\n u.get_mboard_sensor('ref_locked', mb_num).to_bool()\n for mb_num in mbnums_with_ref\n ):\n if op.verbose:\n sys.stdout.write('.')\n sys.stdout.flush()\n time.sleep(1)\n timeout += 1\n if timeout > timeout_thresh:\n if op.verbose:\n sys.stdout.write('failed\\n')\n sys.stdout.flush()\n unlocked_mbs = [\n mb_num for mb_num in mbnums_with_ref\n if u.get_mboard_sensor('ref_locked', mb_num).to_bool()\n ]\n errstr = (\n 'Failed to lock to 10 MHz reference on mainboards {0}.'\n ' To skip waiting for lock, set `wait_for_lock` to'\n ' False (pass --nolock on the command line).'\n ).format(unlocked_mbs)\n raise RuntimeError(errstr)\n if op.verbose:\n sys.stdout.write('locked\\n')\n sys.stdout.flush()\n\n # set global options\n # sample rate\n u.set_samp_rate(float(op.samplerate))\n # read back actual value\n samplerate = u.get_samp_rate()\n # calculate longdouble precision/rational sample rate\n # (integer division of clock rate)\n cr = u.get_clock_rate(0)\n srdec = int(round(cr / samplerate))\n samplerate_ld = np.longdouble(cr) / srdec\n op.samplerate = samplerate_ld\n op.samplerate_frac = Fraction(cr).limit_denominator() / srdec\n\n # set per-channel options\n # set command time so settings are synced\n COMMAND_DELAY = 0.2\n cmd_time = u.get_time_now() + uhd.time_spec(COMMAND_DELAY)\n u.set_command_time(cmd_time, uhd.ALL_MBOARDS)\n for ch_num in range(op.nrchs):\n # local oscillator sharing settings\n lo_source = op.lo_sources[ch_num]\n if lo_source:\n try:\n u.set_lo_source(lo_source, uhd.ALL_LOS, ch_num)\n except RuntimeError:\n errstr = (\n \"Unknown LO source option: '{0}'. Must be one of {1},\"\n \" or it may not be possible to set the LO source on\"\n \" this daughterboard.\"\n ).format(lo_source, u.get_lo_sources(uhd.ALL_LOS, ch_num))\n raise ValueError(errstr)\n lo_export = op.lo_exports[ch_num]\n if lo_export is not None:\n if not lo_source:\n errstr = (\n 'Channel {0}: must set an LO source in order to set'\n ' LO export.'\n ).format(ch_num)\n raise ValueError(errstr)\n u.set_lo_export_enabled(lo_export, uhd.ALL_LOS, ch_num)\n # center frequency and tuning offset\n tune_res = u.set_center_freq(\n uhd.tune_request(\n op.centerfreqs[ch_num], op.lo_offsets[ch_num],\n args=uhd.device_addr(','.join(op.tune_args)),\n ),\n ch_num,\n )\n # store actual values from tune result\n op.centerfreqs[ch_num] = (\n tune_res.actual_rf_freq - tune_res.actual_dsp_freq\n )\n op.lo_offsets[ch_num] = tune_res.actual_dsp_freq\n # dc offset\n dc_offset = op.dc_offsets[ch_num]\n if dc_offset is True:\n u.set_auto_dc_offset(True, ch_num)\n elif dc_offset is False:\n u.set_auto_dc_offset(False, ch_num)\n elif dc_offset is not None:\n u.set_auto_dc_offset(False, ch_num)\n u.set_dc_offset(dc_offset, ch_num)\n # iq balance\n iq_balance = op.iq_balances[ch_num]\n if iq_balance is True:\n u.set_auto_iq_balance(True, ch_num)\n elif iq_balance is False:\n u.set_auto_iq_balance(False, ch_num)\n elif iq_balance is not None:\n u.set_auto_iq_balance(False, ch_num)\n u.set_iq_balance(iq_balance, ch_num)\n # gain\n u.set_gain(op.gains[ch_num], ch_num)\n # bandwidth\n bw = op.bandwidths[ch_num]\n if bw:\n u.set_bandwidth(bw, ch_num)\n # antenna\n ant = op.antennas[ch_num]\n if ant:\n try:\n u.set_antenna(ant, ch_num)\n except RuntimeError:\n errstr = (\n \"Unknown RX antenna option: '{0}'. Must be one of {1}.\"\n ).format(ant, u.get_antennas(ch_num))\n raise ValueError(errstr)\n\n # commands are done, clear time\n u.clear_command_time(uhd.ALL_MBOARDS)\n time.sleep(COMMAND_DELAY)\n\n # read back actual channel settings\n for ch_num in range(op.nrchs):\n if op.lo_sources[ch_num]:\n op.lo_sources[ch_num] = u.get_lo_source(uhd.ALL_LOS, ch_num)\n if op.lo_exports[ch_num] is not None:\n op.lo_exports[ch_num] = u.get_lo_export_enabled(\n uhd.ALL_LOS, ch_num,\n )\n op.gains[ch_num] = u.get_gain(ch_num)\n op.bandwidths[ch_num] = u.get_bandwidth(chan=ch_num)\n op.antennas[ch_num] = u.get_antenna(chan=ch_num)\n\n if op.verbose:\n print('Using the following devices:')\n chinfostrs = [\n 'Motherboard: {mb_id} ({mb_addr}) | Daughterboard: {db_name}',\n 'Subdev: {sub} | Antenna: {ant} | Gain: {gain} | Rate: {sr}',\n 'Frequency: {freq:.3f} ({lo_off:+.3f}) | Bandwidth: {bw}',\n ]\n if any(op.lo_sources) or any(op.lo_exports):\n chinfostrs.append(\n 'LO source: {lo_source} | LO export: {lo_export}'\n )\n chinfo = '\\n'.join([' ' + l for l in chinfostrs])\n for ch_num in range(op.nrchs):\n header = '---- receiver channel {0} '.format(ch_num)\n header += '-' * (78 - len(header))\n print(header)\n usrpinfo = dict(u.get_usrp_info(chan=ch_num))\n info = {}\n info['mb_id'] = usrpinfo['mboard_id']\n mba = op.mboards_bychan[ch_num]\n if mba == 'default':\n mba = usrpinfo['mboard_serial']\n info['mb_addr'] = mba\n info['db_name'] = usrpinfo['rx_subdev_name']\n info['sub'] = op.subdevs_bychan[ch_num]\n info['ant'] = op.antennas[ch_num]\n info['bw'] = op.bandwidths[ch_num]\n info['freq'] = op.centerfreqs[ch_num]\n info['gain'] = op.gains[ch_num]\n info['lo_off'] = op.lo_offsets[ch_num]\n info['lo_source'] = op.lo_sources[ch_num]\n info['lo_export'] = op.lo_exports[ch_num]\n info['sr'] = op.samplerate\n print(chinfo.format(**info))\n print('-' * 78)\n\n return u\n\n def _finalize_options(self):\n op = self.op\n\n op.ch_samplerates_frac = []\n op.resampling_ratios = []\n op.resampling_filter_taps = []\n op.resampling_filter_delays = []\n op.channelizer_filter_taps = []\n op.channelizer_filter_delays = []\n for ko, (osr, nsc) in enumerate(\n zip(op.ch_samplerates, op.ch_nsubchannels)\n ):\n # get output sample rate fraction\n # (op.samplerate_frac final value is set in _usrp_setup\n # so can't get output sample rate until after that is done)\n if osr is None:\n ch_samplerate_frac = op.samplerate_frac\n else:\n ch_samplerate_frac = Fraction(osr).limit_denominator()\n op.ch_samplerates_frac.append(ch_samplerate_frac)\n\n # get resampling ratio\n ratio = ch_samplerate_frac / op.samplerate_frac\n op.resampling_ratios.append(ratio)\n\n # get resampling low-pass filter taps\n if ratio == 1:\n op.resampling_filter_taps.append(np.zeros(0))\n op.resampling_filter_delays.append(0)\n else:\n taps = equiripple_lpf(\n cutoff=float(op.ch_lpf_cutoffs[ko] * ratio),\n transition_width=float(\n op.ch_lpf_transition_widths[ko] * ratio\n ),\n attenuation=op.ch_lpf_attenuations[ko],\n pass_ripple=op.ch_lpf_pass_ripples[ko],\n )\n op.resampling_filter_taps.append(taps)\n op.resampling_filter_delays.append((len(taps) - 1) // 2)\n\n # get channelizer low-pass filter taps\n if nsc > 1:\n taps = equiripple_lpf(\n cutoff=(op.ch_lpf_cutoffs[ko] / nsc),\n transition_width=(op.ch_lpf_transition_widths[ko] / nsc),\n attenuation=op.ch_lpf_attenuations[ko],\n pass_ripple=op.ch_lpf_pass_ripples[ko],\n )\n op.channelizer_filter_taps.append(taps)\n op.channelizer_filter_delays.append((len(taps) - 1) // 2)\n else:\n op.channelizer_filter_taps.append(np.zeros(0))\n op.channelizer_filter_delays.append(0)\n\n def run(self, starttime=None, endtime=None, duration=None, period=10):\n op = self.op\n\n # window in seconds that we allow for setup time so that we don't\n # issue a start command that's in the past when the flowgraph starts\n SETUP_TIME = 10\n\n # print current time and NTP status\n if op.verbose and sys.platform.startswith('linux'):\n try:\n call(('timedatectl', 'status'))\n except OSError:\n # no timedatectl command, ignore\n pass\n\n # parse time arguments\n st = drf.util.parse_identifier_to_time(starttime)\n if st is not None:\n # find next suitable start time by cycle repeat period\n now = datetime.utcnow()\n now = now.replace(tzinfo=pytz.utc)\n soon = now + timedelta(seconds=SETUP_TIME)\n diff = max(soon - st, timedelta(0)).total_seconds()\n periods_until_next = (diff - 1) // period + 1\n st = st + timedelta(seconds=periods_until_next * period)\n\n if op.verbose:\n ststr = st.strftime('%a %b %d %H:%M:%S %Y')\n stts = (st - drf.util.epoch).total_seconds()\n print('Start time: {0} ({1})'.format(ststr, stts))\n\n et = drf.util.parse_identifier_to_time(endtime, ref_datetime=st)\n if et is not None:\n if op.verbose:\n etstr = et.strftime('%a %b %d %H:%M:%S %Y')\n etts = (et - drf.util.epoch).total_seconds()\n print('End time: {0} ({1})'.format(etstr, etts))\n\n if ((et < (pytz.utc.localize(datetime.utcnow())\n + timedelta(seconds=SETUP_TIME)))\n or (st is not None and et <= st)):\n raise ValueError('End time is before launch time!')\n\n if op.realtime:\n r = gr.enable_realtime_scheduling()\n\n if op.verbose:\n if r == gr.RT_OK:\n print('Realtime scheduling enabled')\n else:\n print('Note: failed to enable realtime scheduling')\n\n # create data directory so ringbuffer code can be started while waiting\n # to launch\n if not os.path.isdir(op.datadir):\n os.makedirs(op.datadir)\n\n # wait for the start time if it is not past\n while (st is not None) and (\n (st - pytz.utc.localize(datetime.utcnow())) >\n timedelta(seconds=SETUP_TIME)\n ):\n ttl = int((\n st - pytz.utc.localize(datetime.utcnow())\n ).total_seconds())\n if (ttl % 10) == 0:\n print('Standby {0} s remaining...'.format(ttl))\n sys.stdout.flush()\n time.sleep(1)\n\n # get UHD USRP source\n u = self._usrp_setup()\n\n # finalize options (for settings that depend on USRP setup)\n self._finalize_options()\n\n # force creation of the RX streamer ahead of time with a start/stop\n # (after setting time/clock sources, before setting the\n # device time)\n # this fixes timing with the B210\n u.start()\n # need to wait >0.1 s (constant in usrp_source_impl.c) for start/stop\n # to actually take effect, so sleep a bit, 0.5 s seems more reliable\n time.sleep(0.5)\n u.stop()\n time.sleep(0.2)\n\n # set device time\n tt = time.time()\n if op.time_sync:\n # wait until time 0.2 to 0.5 past full second, then latch\n # we have to trust NTP to be 0.2 s accurate\n while tt - math.floor(tt) < 0.2 or tt - math.floor(tt) > 0.3:\n time.sleep(0.01)\n tt = time.time()\n if op.verbose:\n print('Latching at ' + str(tt))\n # waits for the next pps to happen\n # (at time math.ceil(tt))\n # then sets the time for the subsequent pps\n # (at time math.ceil(tt) + 1.0)\n u.set_time_unknown_pps(uhd.time_spec(math.ceil(tt) + 1.0))\n else:\n u.set_time_now(uhd.time_spec(tt), uhd.ALL_MBOARDS)\n\n # set launch time\n # (at least 2 seconds out so USRP start time can be set properly and\n # there is time to set up flowgraph)\n if st is not None:\n lt = st\n else:\n now = pytz.utc.localize(datetime.utcnow())\n # launch on integer second by default for convenience (ceil + 2)\n lt = now.replace(microsecond=0) + timedelta(seconds=3)\n ltts = (lt - drf.util.epoch).total_seconds()\n # adjust launch time forward so it falls on an exact sample since epoch\n lt_rsamples = np.ceil(ltts * op.samplerate)\n ltts = lt_rsamples / op.samplerate\n lt = drf.util.sample_to_datetime(lt_rsamples, op.samplerate)\n if op.verbose:\n ltstr = lt.strftime('%a %b %d %H:%M:%S.%f %Y')\n print('Launch time: {0} ({1})'.format(ltstr, repr(ltts)))\n # command launch time\n ct_td = lt - drf.util.epoch\n ct_secs = ct_td.total_seconds() // 1.0\n ct_frac = ct_td.microseconds / 1000000.0\n u.set_start_time(\n uhd.time_spec(ct_secs) + uhd.time_spec(ct_frac)\n )\n\n # populate flowgraph one channel at a time\n fg = gr.top_block()\n for ko in range(op.nochs):\n # receiver channel number corresponding to this output channel\n kr = op.channels[ko]\n # mainboard number corresponding to this receiver's channel\n mbnum = op.mboardnum_bychan[kr]\n\n # output settings that get modified depending on processing\n ch_samplerate_frac = op.ch_samplerates_frac[ko]\n ch_centerfreq = op.ch_centerfreqs[ko]\n\n # make resampling filter blocks if necessary\n rs_ratio = op.resampling_ratios[ko]\n scaling = op.ch_scalings[ko]\n if rs_ratio != 1:\n rs_taps = op.resampling_filter_taps[ko]\n\n # integrate scaling into filter taps\n rs_taps *= scaling\n conv_scaling = 1.0\n\n # frequency shift filter taps to band-pass if necessary\n if ch_centerfreq is not False:\n f_shift = ch_centerfreq - op.centerfreqs[kr]\n phase_inc = 2*np.pi*f_shift/op.samplerate\n rotator = np.exp(phase_inc*1j*np.arange(len(rs_taps)))\n rs_taps = (rs_taps * rotator).astype('complex64')\n\n # create band-pass filter (complex taps)\n resampler = grfilter.rational_resampler_ccc(\n interpolation=rs_ratio.numerator,\n decimation=rs_ratio.denominator,\n taps=rs_taps.tolist(),\n )\n else:\n # create low-pass filter (float taps)\n resampler = grfilter.rational_resampler_ccf(\n interpolation=rs_ratio.numerator,\n decimation=rs_ratio.denominator,\n taps=rs_taps.tolist(),\n )\n\n # skip first samples to account for filter delay so first\n # sample going to output is first valid filtered sample\n # (skip is in terms of output samples, so fix rate)\n resampler_skiphead = blocks.skiphead(\n gr.sizeof_gr_complex,\n op.resampling_filter_delays[ko] // rs_ratio,\n )\n else:\n conv_scaling = scaling\n resampler = None\n\n # make frequency shift block if necessary\n if ch_centerfreq is not False:\n f_shift = ch_centerfreq - op.centerfreqs[kr]\n phase_inc = -2*np.pi*f_shift/ch_samplerate_frac\n rotator = blocks.rotator_cc(phase_inc)\n else:\n ch_centerfreq = op.centerfreqs[kr]\n rotator = None\n\n # make channelizer if necessary\n nsc = op.ch_nsubchannels[ko]\n if nsc > 1:\n sc_taps = op.channelizer_filter_taps[ko]\n\n # build a hierarchical block for the channelizer so output\n # is a vector of channels as expected by digital_rf\n channelizer = gr.hier_block2(\n 'lpf',\n gr.io_signature(1, 1, gr.sizeof_gr_complex),\n gr.io_signature(1, 1, nsc*gr.sizeof_gr_complex),\n )\n s2ss = blocks.stream_to_streams(gr.sizeof_gr_complex, nsc)\n filt = grfilter.pfb_channelizer_ccf(\n numchans=nsc, taps=sc_taps, oversample_rate=1.0,\n )\n s2v = blocks.streams_to_vector(gr.sizeof_gr_complex, nsc)\n channelizer.connect(channelizer, s2ss)\n for ksc in range(nsc):\n channelizer.connect((s2ss, ksc), (filt, ksc), (s2v, ksc))\n channelizer.connect(s2v, channelizer)\n\n # skip first samples to account for filter delay so first\n # sample going to output is first valid filtered sample\n # (skip is in terms of output samples, so fix rate)\n channelizer_skiphead = blocks.skiphead(\n gr.sizeof_gr_complex*nsc,\n op.channelizer_filter_delays[ko] // nsc,\n )\n\n # modify output settings accordingly\n ch_centerfreq = (\n ch_centerfreq\n + np.fft.fftfreq(nsc, 1 / float(ch_samplerate_frac))\n )\n ch_samplerate_frac = ch_samplerate_frac / nsc\n else:\n channelizer = None\n\n # make conversion block if necessary\n ot_dict = op.ch_out_specs[ko]\n converter = ot_dict['convert']\n if converter is not None:\n kw = ot_dict['convert_kwargs']\n # increase vector length of input due to channelizer\n kw['vlen'] *= nsc\n # incorporate any scaling into type conversion block\n kw['scale'] *= conv_scaling\n convert = getattr(blocks, converter)(**kw)\n elif conv_scaling != 1:\n convert = blocks.multiply_const_cc(conv_scaling, nsc)\n else:\n convert = None\n\n # get start sample\n ch_samplerate_ld = (\n np.longdouble(ch_samplerate_frac.numerator)\n / np.longdouble(ch_samplerate_frac.denominator)\n )\n start_sample = int(np.uint64(ltts * ch_samplerate_ld))\n\n # create digital RF sink\n dst = gr_drf.digital_rf_channel_sink(\n channel_dir=os.path.join(op.datadir, op.channel_names[ko]),\n dtype=op.ch_out_specs[ko]['dtype'],\n subdir_cadence_secs=op.subdir_cadence_s,\n file_cadence_millisecs=op.file_cadence_ms,\n sample_rate_numerator=ch_samplerate_frac.numerator,\n sample_rate_denominator=ch_samplerate_frac.denominator,\n start=start_sample,\n ignore_tags=False,\n is_complex=True,\n num_subchannels=nsc,\n uuid_str=op.uuid,\n center_frequencies=ch_centerfreq,\n metadata=dict(\n # receiver metadata for USRP\n receiver=dict(\n description='UHD USRP source using GNU Radio',\n info=dict(u.get_usrp_info(chan=kr)),\n antenna=op.antennas[kr],\n bandwidth=op.bandwidths[kr],\n center_freq=op.centerfreqs[kr],\n clock_rate=op.clock_rates[mbnum],\n clock_source=op.clock_sources[mbnum],\n dc_offset=op.dc_offsets[kr],\n gain=op.gains[kr],\n id=op.mboards_bychan[kr],\n iq_balance=op.iq_balances[kr],\n lo_export=op.lo_exports[kr],\n lo_offset=op.lo_offsets[kr],\n lo_source=op.lo_sources[kr],\n otw_format=op.otw_format,\n samp_rate=u.get_samp_rate(),\n stream_args=','.join(op.stream_args),\n subdev=op.subdevs_bychan[kr],\n time_source=op.time_sources[mbnum],\n ),\n processing=dict(\n channelizer_filter_taps=op.channelizer_filter_taps[ko],\n decimation=op.resampling_ratios[ko].denominator,\n interpolation=op.resampling_ratios[ko].numerator,\n resampling_filter_taps=op.resampling_filter_taps[ko],\n scaling=op.ch_scalings[ko],\n ),\n ),\n is_continuous=True,\n compression_level=0,\n checksum=False,\n marching_periods=True,\n stop_on_skipped=op.stop_on_dropped,\n debug=op.verbose,\n )\n\n connections = [(u, kr)]\n if resampler is not None:\n connections.append((resampler, 0))\n connections.append((resampler_skiphead, 0))\n if rotator is not None:\n connections.append((rotator, 0))\n if channelizer is not None:\n connections.append((channelizer, 0))\n connections.append((channelizer_skiphead, 0))\n if convert is not None:\n connections.append((convert, 0))\n connections.append((dst, 0))\n connections = tuple(connections)\n\n # make channel connections in flowgraph\n fg.connect(*connections)\n\n # start the flowgraph, samples should start at launch time\n fg.start()\n\n # check that we get samples after launch\n while not u.nitems_written(0):\n if (\n (pytz.utc.localize(datetime.utcnow())) - lt\n > timedelta(seconds=5)\n ):\n fg.stop()\n # need to wait for the flowgraph to clean up,\n # otherwise it won't exit\n fg.wait()\n errstr = (\n 'No samples streamed after launch. Exiting with failure.'\n )\n raise RuntimeError(errstr)\n time.sleep(1)\n\n # wait until end time or until flowgraph stops\n if et is None and duration is not None:\n et = lt + timedelta(seconds=duration)\n try:\n if et is None:\n fg.wait()\n else:\n # sleep until end time nears\n while(pytz.utc.localize(datetime.utcnow()) <\n et - timedelta(seconds=2)):\n time.sleep(1)\n else:\n # issue stream stop command at end time\n ct_td = et - drf.util.epoch\n ct_secs = ct_td.total_seconds() // 1.0\n ct_frac = ct_td.microseconds / 1000000.0\n u.set_command_time(\n (uhd.time_spec(ct_secs) + uhd.time_spec(ct_frac)),\n uhd.ALL_MBOARDS,\n )\n stop_enum = uhd.stream_cmd.STREAM_MODE_STOP_CONTINUOUS\n u.issue_stream_cmd(uhd.stream_cmd(stop_enum))\n u.clear_command_time(uhd.ALL_MBOARDS)\n # sleep until after end time\n time.sleep(2)\n except KeyboardInterrupt:\n # catch keyboard interrupt and simply exit\n pass\n fg.stop()\n # need to wait for the flowgraph to clean up, otherwise it won't exit\n fg.wait()\n print('done')\n sys.stdout.flush()\n\n\ndef evalint(s):\n \"\"\"Evaluate string to an integer.\"\"\"\n return int(eval(s, {}, {}))\n\n\ndef evalfloat(s):\n \"\"\"Evaluate string to a float.\"\"\"\n return float(eval(s, {}, {}))\n\n\ndef intstrtuple(s):\n \"\"\"Get (int, string) tuple from int:str strings.\"\"\"\n parts = [p.strip() for p in s.split(':', 1)]\n if len(parts) == 2:\n return int(parts[0]), parts[1]\n else:\n return None, parts[0]\n\n\ndef noneorstr(s):\n \"\"\"Turn empty or 'none' string to None.\"\"\"\n if s.lower() in ('', 'none'):\n return None\n else:\n return s\n\n\ndef noneorfloat(s):\n \"\"\"Turn empty or 'none' to None, else evaluate to float.\"\"\"\n if s.lower() in ('', 'none'):\n return None\n else:\n return evalfloat(s)\n\n\ndef noneorbool(s):\n \"\"\"Turn empty or 'none' string to None, all others to boolean.\"\"\"\n if s.lower() in ('', 'none'):\n return None\n elif s.lower() in ('true', 't', 'yes', 'y', '1'):\n return True\n else:\n return False\n\n\ndef noneorboolorfloat(s):\n \"\"\"Turn empty or 'none' to None, else evaluate to a boolean or float.\"\"\"\n if s.lower() in ('', 'none'):\n return None\n elif s.lower() in ('auto', 'true', 't', 'yes', 'y'):\n return True\n elif s.lower() in ('false', 'f', 'no', 'n'):\n return False\n else:\n return evalfloat(s)\n\n\ndef noneorboolorcomplex(s):\n \"\"\"Turn empty or 'none' to None, else evaluate to a boolean or complex.\"\"\"\n if s.lower() in ('', 'none'):\n return None\n elif s.lower() in ('auto', 'true', 't', 'yes', 'y'):\n return True\n elif s.lower() in ('false', 'f', 'no', 'n'):\n return False\n else:\n return complex(eval(s, {}, {}))\n\n\nclass Extend(argparse.Action):\n \"\"\"Action to split comma-separated arguments and add to a list.\"\"\"\n\n def __init__(self, option_strings, dest, type=None, **kwargs):\n if type is not None:\n itemtype = type\n else:\n def itemtype(s):\n return s\n\n def split_string_and_cast(s):\n return [itemtype(a.strip()) for a in s.strip().split(',')]\n\n super(Extend, self).__init__(\n option_strings, dest, type=split_string_and_cast, **kwargs\n )\n\n def __call__(self, parser, namespace, values, option_string=None):\n cur_list = getattr(namespace, self.dest, [])\n if cur_list is None:\n cur_list = []\n cur_list.extend(values)\n setattr(namespace, self.dest, cur_list)\n\n\ndef _add_dir_group(parser):\n dirgroup = parser.add_mutually_exclusive_group(required=True)\n dirgroup.add_argument(\n 'datadir', nargs='?', default=None,\n help='''Data directory, to be filled with channel subdirectories.''',\n )\n dirgroup.add_argument(\n '-o', '--out', dest='outdir', default=None,\n help='''Data directory, to be filled with channel subdirectories.''',\n )\n return parser\n\n\ndef _add_mainboard_group(parser):\n mbgroup = parser.add_argument_group(title='mainboard')\n mbgroup.add_argument(\n '-m', '--mainboard', dest='mboards', action=Extend,\n help='''Mainboard address. (default: first device found)''',\n )\n mbgroup.add_argument(\n '-d', '--subdevice', dest='subdevs', action=Extend,\n help='''USRP subdevice string. (default: \"A:A\")''',\n )\n mbgroup.add_argument(\n '--clock_rate', dest='clock_rates', action=Extend, type=noneorfloat,\n help='''Master clock rate for mainboard. Can be 'None'/'' to use\n device default or a value in Hz. (default: None)''',\n )\n mbgroup.add_argument(\n '--clock_source', dest='clock_sources', action=Extend, type=noneorstr,\n help='''Clock source (i.e. 10 MHz REF) for mainboard. Can be 'None'/''\n to use default (do not set if --nolock, otherwise 'external')\n or a string like 'external' or 'internal'. (default: '')''',\n )\n mbgroup.add_argument(\n '--time_source', dest='time_sources', action=Extend, type=noneorstr,\n help='''Time source (i.e. PPS) for mainboard. Can be 'None'/''\n to use default (do not set if --nosync, otherwise 'external')\n or a string like 'external' or 'internal'. (default: '')''',\n )\n return parser\n\n\ndef _add_receiver_group(parser):\n recgroup = parser.add_argument_group(title='receiver')\n recgroup.add_argument(\n '-r', '--samplerate', dest='samplerate', type=evalfloat,\n help='''Sample rate in Hz. (default: 1e6)''',\n )\n recgroup.add_argument(\n '-A', '--devargs', dest='dev_args', action=Extend,\n help='''Device arguments, e.g. \"master_clock_rate=30e6\".\n (default: 'recv_buff_size=100000000,num_recv_frames=512')''',\n )\n recgroup.add_argument(\n '-a', '--streamargs', dest='stream_args', action=Extend,\n help='''Stream arguments, e.g. \"peak=0.125,fullscale=1.0\".\n (default: '')''',\n )\n recgroup.add_argument(\n '-T', '--tuneargs', dest='tune_args', action=Extend,\n help='''Tune request arguments, e.g. \"mode_n=integer,int_n_step=100e3\".\n (default: '')''',\n )\n # kept for backward compatibility,\n # replaced by clock_source/time_source in 2.6\n recgroup.add_argument(\n '--sync_source', dest='sync_source',\n help=argparse.SUPPRESS,\n )\n recgroup.add_argument(\n '--nosync', dest='time_sync', action='store_false',\n help='''Skip syncing with reference time. (default: False)''',\n )\n recgroup.add_argument(\n '--nolock', dest='wait_for_lock', action='store_false',\n help='''Don't wait for reference clock to lock. (default: False)''',\n )\n recgroup.add_argument(\n '--stop_on_dropped', dest='stop_on_dropped', action='store_true',\n help='''Stop on dropped packet. (default: %(default)s)''',\n )\n recgroup.add_argument(\n '--realtime', dest='realtime', action='store_true',\n help='''Enable realtime scheduling if possible.\n (default: %(default)s)''',\n )\n recgroup.add_argument(\n '--notest', dest='test_settings', action='store_false',\n help='''Do not test USRP settings until experiment start.\n (default: False)''',\n )\n return parser\n\n\ndef _add_rchannel_group(parser):\n chgroup = parser.add_argument_group(title='receiver channel')\n chgroup.add_argument(\n '-f', '--centerfreq', dest='centerfreqs', action=Extend,\n type=evalfloat,\n help='''Center frequency in Hz. (default: 100e6)''',\n )\n chgroup.add_argument(\n '-F', '--lo_offset', dest='lo_offsets', action=Extend, type=evalfloat,\n help='''Frontend tuner offset from center frequency, in Hz.\n (default: 0)''',\n )\n chgroup.add_argument(\n '--lo_source', dest='lo_sources', action=Extend, type=noneorstr,\n help='''Local oscillator source. Typically 'None'/'' (do not set),\n 'internal' (e.g. LO1 for CH1, LO2 for CH2),\n 'companion' (e.g. LO2 for CH1, LO1 for CH2), or\n 'external' (neighboring board via connector).\n (default: '')''',\n )\n chgroup.add_argument(\n '--lo_export', dest='lo_exports', action=Extend, type=noneorbool,\n help='''Whether to export the LO's source to the external connector.\n Can be 'None'/'' to skip the channel, otherwise it can be\n 'True' or 'False' provided the LO source is set.\n (default: None)''',\n )\n chgroup.add_argument(\n '--dc_offset', dest='dc_offsets', action=Extend,\n type=noneorboolorcomplex,\n help='''DC offset correction to use. Can be 'None'/'' to keep device\n default, 'True'/'auto' to enable automatic correction, 'False'\n to disable automatic correction, or a complex value\n (e.g. \"1+1j\"). (default: False)''',\n )\n chgroup.add_argument(\n '--iq_balance', dest='iq_balances', action=Extend,\n type=noneorboolorcomplex,\n help='''IQ balance correction to use. Can be 'None'/'' to keep device\n default, 'True'/'auto' to enable automatic correction, 'False'\n to disable automatic correction, or a complex value\n (e.g. \"1+1j\"). (default: None)''',\n )\n chgroup.add_argument(\n '-g', '--gain', dest='gains', action=Extend, type=evalfloat,\n help='''Gain in dB. (default: 0)''',\n )\n chgroup.add_argument(\n '-b', '--bandwidth', dest='bandwidths', action=Extend, type=evalfloat,\n help='''Frontend bandwidth in Hz. (default: 0 == frontend default)''',\n )\n chgroup.add_argument(\n '-y', '--antenna', dest='antennas', action=Extend, type=noneorstr,\n help='''Name of antenna to select on the frontend.\n (default: frontend default))''',\n )\n return parser\n\n\ndef _add_ochannel_group(parser):\n chgroup = parser.add_argument_group(title='output channel')\n chgroup.add_argument(\n '+c', '-c', '--channel', dest='chs', action=Extend, type=intstrtuple,\n help='''Output channel specification, including names and mapping from\n receiver channels. Each output channel must be specified here\n and given a unique name. Specifications are given as a receiver\n channel number and name pair, e.g. \"0:ch0\". The number and\n colon are optional; if omitted, any unused receiver channels\n will be assigned to output channels in the supplied name order.\n (default: \"ch0\")''',\n )\n chgroup.add_argument(\n '+r', '--ch_samplerate', dest='ch_samplerates', action=Extend,\n type=noneorfloat,\n help='''Output channel sample rate in Hz. If 'None'/'', use the\n receiver sample rate. Filtering and resampling will be\n performed to achieve the desired rate (set filter specs with\n lpf_* options). Must be less than or equal to the receiver\n sample rate. (default: None)''',\n )\n # deprecated by ch_samplerate in 2.6\n # if used, all ch_samplerate arguments will be ignored\n chgroup.add_argument(\n '-i', '--dec', dest='decimations', action=Extend,\n type=evalint, help=argparse.SUPPRESS,\n )\n chgroup.add_argument(\n '+f', '--ch_centerfreq', dest='ch_centerfreqs', action=Extend,\n type=noneorboolorfloat,\n help='''Output channel center frequency in Hz. Can be 'True'/'auto' to\n use the receiver channel target frequency (correcting for\n actual tuner offset), 'False' to use the receiver channel\n frequency unchanged, or a float value. (default: False)''',\n )\n chgroup.add_argument(\n '+k', '--scale', dest='ch_scalings', action=Extend, type=evalfloat,\n help='''Scale output channel by this factor. (default: 1)''',\n )\n chgroup.add_argument(\n '+n', '--subchannels', dest='ch_nsubchannels', action=Extend,\n type=evalint,\n help='''Number of subchannels for channelizing the output. A polyphase\n filter bank will be applied after the otherwise specified\n resampling and frequency shifting to further decimate the\n output and divide it into this many equally-spaced channels.\n (default: 1)''',\n )\n chgroup.add_argument(\n '--lpf_cutoff', dest='ch_lpf_cutoffs', action=Extend, type=evalfloat,\n help='''Normalized low-pass filter cutoff frequency (start of\n transition band), where a value of 1 indicates half the\n *output* sampling rate. Value in Hz is therefore\n (cutoff * out_sample_rate / 2.0). (default: 0.9)''',\n )\n chgroup.add_argument(\n '--lpf_transition_width', dest='ch_lpf_transition_widths',\n action=Extend, type=evalfloat,\n help='''Normalized width (in frequency) of low-pass filter transition\n region from pass band to stop band, where a value of 1\n indicates half the *output* sampling rate. Value in Hz is\n therefore (transition_width * out_sample_rate / 2.0).\n (default: 0.2)''',\n )\n chgroup.add_argument(\n '--lpf_attenuation', dest='ch_lpf_attenuations', action=Extend,\n type=evalfloat,\n help='''Minimum attenuation of the low-pass filter stop band in dB.\n (default: 80)''',\n )\n chgroup.add_argument(\n '--lpf_pass_ripple', dest='ch_lpf_pass_ripples', action=Extend,\n type=noneorfloat,\n help='''Maximum ripple of the low-pass filter pass band in dB. If\n 'None', use the same value as `lpf_attenuation`.\n (default: None)''',\n )\n chgroup.add_argument(\n '+t', '--type', dest='ch_out_types', action=Extend, type=noneorstr,\n help='''Output channel data type to convert to ('scXX' for complex\n integer and 'fcXX' for complex float with XX bits). Use 'None'\n to skip conversion and use the USRP or filter output type.\n Conversion from float to integer will map a magnitude of 1.0\n (after any scaling) to the maximum integer value.\n (default: None)''',\n )\n return parser\n\n\ndef _add_drf_group(parser):\n drfgroup = parser.add_argument_group(title='digital_rf')\n drfgroup.add_argument(\n '-n', '--file_cadence_ms', dest='file_cadence_ms', type=evalint,\n help='''Number of milliseconds of data per file.\n (default: 1000)''',\n )\n drfgroup.add_argument(\n '-N', '--subdir_cadence_s', dest='subdir_cadence_s', type=evalint,\n help='''Number of seconds of data per subdirectory.\n (default: 3600)''',\n )\n drfgroup.add_argument(\n '--metadata', action=Extend, metavar='{KEY}={VALUE}',\n help='''Key, value metadata pairs to include with data.\n (default: \"\")''',\n )\n drfgroup.add_argument(\n '--uuid', dest='uuid',\n help='''Unique ID string for this data collection.\n (default: random)''',\n )\n return parser\n\n\ndef _add_time_group(parser):\n timegroup = parser.add_argument_group(title='time')\n timegroup.add_argument(\n '-s', '--starttime', dest='starttime',\n help='''Start time of the experiment as datetime (if in ISO8601 format:\n 2016-01-01T15:24:00Z) or Unix time (if float/int).\n (default: start ASAP)''',\n )\n timegroup.add_argument(\n '-e', '--endtime', dest='endtime',\n help='''End time of the experiment as datetime (if in ISO8601 format:\n 2016-01-01T16:24:00Z) or Unix time (if float/int).\n (default: wait for Ctrl-C)''',\n )\n timegroup.add_argument(\n '-l', '--duration', dest='duration', type=evalint,\n help='''Duration of experiment in seconds. When endtime is not given,\n end this long after start time. (default: wait for Ctrl-C)''',\n )\n timegroup.add_argument(\n '-p', '--cycle-length', dest='period', type=evalint,\n help='''Repeat time of experiment cycle. Align to start of next cycle\n if start time has passed. (default: 10)''',\n )\n return parser\n\n\ndef _build_thor_parser(Parser, *args):\n scriptname = os.path.basename(sys.argv[0])\n\n formatter = argparse.RawDescriptionHelpFormatter(scriptname)\n width = formatter._width\n\n title = 'THOR (The Haystack Observatory Recorder)'\n copyright = 'Copyright (c) 2017 Massachusetts Institute of Technology'\n shortdesc = 'Record data from synchronized USRPs in DigitalRF format.'\n desc = '\\n'.join((\n '*'*width,\n '*{0:^{1}}*'.format(title, width-2),\n '*{0:^{1}}*'.format(copyright, width-2),\n '*{0:^{1}}*'.format('', width-2),\n '*{0:^{1}}*'.format(shortdesc, width-2),\n '*'*width,\n ))\n\n usage = (\n '%(prog)s [-m MBOARD] [-d SUBDEV] [-c CH] [-y ANT] [-f FREQ]'\n ' [-F OFFSET] \\\\\\n'\n '{0:8}[-g GAIN] [-b BANDWIDTH] [-r RATE] [options] DIR\\n'.format('')\n )\n\n epi_pars = [\n '''\\\n Arguments in the \"mainboard\", \"receiver channel\", and \"output channel\"\n groups accept multiple values, allowing multiple mainboards and\n channels to be specified. Multiple arguments can be provided by\n repeating the argument flag, by passing a comma-separated list of\n values, or both. Within each argument group, parameters will be grouped\n in the order in which they are given to form the complete set of\n parameters for each mainboard/channel. For any argument with fewer\n values given than the number of mainboards/channels, its values will be\n extended by repeatedly cycling through the values given up to the\n needed number.\n ''',\n '''\\\n Arguments in other groups apply to all mainboards/channels (including\n the receiver sample rate).\n ''',\n '''\\\n Example usage:\n ''',\n ]\n epi_pars = [fill(dedent(s), width) for s in epi_pars]\n\n egtw = TextWrapper(\n width=(width - 2), break_long_words=False, break_on_hyphens=False,\n subsequent_indent=' ' * (len(scriptname) + 1),\n )\n egs = [\n '''\\\n {0} -m 192.168.20.2 -d \"A:A A:B\" -c h,v -f 95e6 -r 100e6/24\n /data/test\n ''',\n '''\\\n {0} -m 192.168.10.2 -d \"A:0\" -c ch1 -y \"TX/RX\" -f 20e6 -F 10e3 -g 20\n -b 0 -r 1e6 /data/test\n ''',\n ]\n egs = [' \\\\\\n'.join(egtw.wrap(dedent(s.format(scriptname)))) for s in egs]\n epi = '\\n' + '\\n\\n'.join(epi_pars + egs) + '\\n'\n\n # parse options\n parser = Parser(\n description=desc, usage=usage, epilog=epi, prefix_chars='-+',\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n\n parser.add_argument(\n '--version', action='version',\n version='THOR 3.1, using digital_rf {0}'.format(drf.__version__),\n )\n parser.add_argument(\n '-q', '--quiet', dest='verbose', action='store_false',\n help='''Reduce text output to the screen. (default: False)''',\n )\n\n parser = _add_dir_group(parser)\n parser = _add_mainboard_group(parser)\n parser = _add_receiver_group(parser)\n parser = _add_rchannel_group(parser)\n parser = _add_ochannel_group(parser)\n parser = _add_drf_group(parser)\n parser = _add_time_group(parser)\n\n parser.set_defaults(func=_run_thor)\n\n return parser\n\n\ndef _run_thor(args):\n if args.datadir is None:\n args.datadir = args.outdir\n del args.outdir\n\n # handle deprecated decimation argument, converting it to sample rate\n if args.decimations is not None:\n if args.samplerate is None:\n args.samplerate = 1e6\n args.ch_samplerates = [args.samplerate / d for d in args.decimations]\n del args.decimations\n\n # handle deprecated sync_source argument, converting it to clock_sources\n # and time_sources\n if args.sync_source is not None:\n if args.clock_sources is None:\n args.clock_sources = [args.sync_source]\n if args.time_sources is None:\n args.time_sources = [args.sync_source]\n del args.sync_source\n\n # separate args.chs (num, name) tuples into args.channels and\n # args.channel_names\n if args.chs is not None:\n args.channels, args.channel_names = map(list, zip(*args.chs))\n del args.chs\n\n # remove redundant arguments in dev_args, stream_args, tune_args\n if args.dev_args is not None:\n try:\n dev_args_dict = dict([a.split('=') for a in args.dev_args])\n except ValueError:\n raise ValueError(\n 'Device arguments must be {KEY}={VALUE} pairs.'\n )\n args.dev_args = [\n '{0}={1}'.format(k, v) for k, v in dev_args_dict.items()\n ]\n if args.stream_args is not None:\n try:\n stream_args_dict = dict([a.split('=') for a in args.stream_args])\n except ValueError:\n raise ValueError(\n 'Stream arguments must be {KEY}={VALUE} pairs.'\n )\n args.stream_args = [\n '{0}={1}'.format(k, v) for k, v in stream_args_dict.items()\n ]\n if args.tune_args is not None:\n try:\n tune_args_dict = dict([a.split('=') for a in args.tune_args])\n except ValueError:\n raise ValueError(\n 'Tune request arguments must be {KEY}={VALUE} pairs.'\n )\n args.tune_args = [\n '{0}={1}'.format(k, v) for k, v in tune_args_dict.items()\n ]\n\n # convert metadata strings to a dictionary\n if args.metadata is not None:\n metadata_dict = {}\n for a in args.metadata:\n try:\n k, v = a.split('=')\n except ValueError:\n k = None\n v = a\n try:\n v = literal_eval(v)\n except ValueError:\n pass\n if k is None:\n metadata_dict.setdefault('metadata', []).append(v)\n else:\n metadata_dict[k] = v\n args.metadata = metadata_dict\n\n # ignore test_settings option if no starttime is set (starting right now)\n if args.starttime is None:\n args.test_settings = False\n\n options = {k: v for k, v in args._get_kwargs() if v is not None}\n runopts = {\n k: options.pop(k) for k in list(options.keys())\n if k in ('starttime', 'endtime', 'duration', 'period')\n }\n del options['func']\n thor = Thor(**options)\n thor.run(**runopts)\n\n\nif __name__ == '__main__':\n parser = _build_thor_parser(argparse.ArgumentParser)\n args = parser.parse_args()\n args.func(args)\n" ]
[ [ "numpy.abs", "numpy.fft.fft", "numpy.dtype", "numpy.ceil", "numpy.longdouble", "numpy.uint64", "numpy.fft.fftfreq", "numpy.zeros" ] ]
astrojhgu/rt1d
[ "cb49510ae9850d1491dcf9336e3994fb1b153438" ]
[ "tests/test_rt06_2.py" ]
[ "\"\"\"\n\ntest_rt06_2.py\n\nAuthor: Jordan Mirocha\nAffiliation: University of Colorado at Boulder\nCreated on: Wed Dec 26 18:37:48 2012\n\nDescription: This is Test problem #2 from the Radiative Transfer\nComparison Project (Iliev et al. 2006; RT06).\n\n\"\"\"\n\nimport rt1d\nimport matplotlib.pyplot as pl\n\nsim = rt1d.run.Simulation(problem_type=2)\nsim.run()\n\nanl = rt1d.analyze.Simulation(sim.checkpoints)\n\nfig1 = pl.figure(1); ax1 = fig1.add_subplot(111)\nfig2 = pl.figure(2); ax2 = fig2.add_subplot(111)\n\nanl.TemperatureProfile(t=[10, 30, 100], ax=ax1)\nanl.IonizationProfile(t=[10, 30, 100], ax=ax2)\n\n\n" ]
[ [ "matplotlib.pyplot.figure" ] ]
nagasudhirpulla/wrldc_metering_warehouse
[ "d56568783fb282c72977d537e51ed66f64dedf62" ]
[ "fict_master_data.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Sep 12 16:41:28 2019\n\n@author: Nagasudhir\n\"\"\"\n\nimport pandas as pd\nimport pandas.io.sql as sqlio\nimport psycopg2\nfrom warehouse_db_config import getWarehouseDbConfigDict\n\nclass FictMasterData:\n '''\n returns meter master data dataframe with columns\n 'from_date', 'location_id', 'formula', 'loc_name', 'description'\n return None in case of problem\n '''\n masterDataDf = None\n \n def PushExcelToDb(self, filename = 'secret/fict_meter_master.xlsx', sheetName=0):\n self.parse(filename, sheetName)\n self.pushToDb()\n \n def parse(self, filename = 'secret/fict_meter_master.xlsx', sheetName=0):\n # read master data excel\n df = pd.read_excel(filename, sheet_name = sheetName) \n # check if the column names are ok\n reqColNames = ['from_time', 'location_id', 'formula', 'loc_name', 'description']\n if(df.columns.tolist()[0:5] != reqColNames):\n print('columns not as desired in master data excel file')\n return \n self.masterDataDf = df\n \n '''\n push data to database table, ovewrites existing data\n '''\n def pushToDb(self):\n warehouseConfigDict = getWarehouseDbConfigDict()\n conn = psycopg2.connect(host=warehouseConfigDict['db_host'], dbname=warehouseConfigDict['db_name'],\n user=warehouseConfigDict['db_username'], password=warehouseConfigDict['db_password'])\n cur = conn.cursor()\n # we will commit in multiples of 1 row\n rowIter = 0\n insIncr = 1\n numRows = self.masterDataDf.shape[0]\n while rowIter < numRows:\n # set iteration values\n iteratorEndVal = rowIter+insIncr\n if iteratorEndVal >= numRows:\n iteratorEndVal = numRows\n \n # Create row tuples\n dataInsertionTuples = []\n for insRowIter in range(rowIter, iteratorEndVal):\n dataRow = self.masterDataDf.iloc[insRowIter]\n \n dataInsertionTuple = (dataRow.from_time.strftime('%Y-%m-%d %H:%M:%S'), dataRow.location_id, dataRow.formula, dataRow.loc_name, dataRow.description)\n dataInsertionTuples.append(dataInsertionTuple)\n \n # prepare sql for insertion and execute\n dataText = ','.join(cur.mogrify('(%s,%s,%s,%s,%s)', row).decode(\"utf-8\") for row in dataInsertionTuples)\n cur.execute('INSERT INTO public.fict_master_data(\\\n \tfrom_time, location_id, loc_formula, loc_name, description)\\\n \tVALUES {0} on conflict (from_time, location_id) \\\n do update set loc_formula = excluded.loc_formula, loc_name = excluded.loc_name, description = excluded.description'.format(dataText))\n conn.commit()\n \n rowIter = iteratorEndVal\n \n # close cursor and connection\n cur.close()\n conn.close()\n print('Fict meter Master data overwrite done')\n \n '''\n Loads master data from db\n '''\n def loadFromDb(self):\n warehouseConfigDict = getWarehouseDbConfigDict()\n conn = psycopg2.connect(host=warehouseConfigDict['db_host'], dbname=warehouseConfigDict['db_name'],\n user=warehouseConfigDict['db_username'], password=warehouseConfigDict['db_password'])\n sql = \"select * from fict_master_data;\"\n df = sqlio.read_sql_query(sql, conn, index_col='id')\n conn = None\n self.masterDataDf = df\n \n '''\n Returns master data for a given date and meterId\n result is series like below\n from_date 2019-09-04 00:00:00\n location_id KO-01\n meter_id hgjhgjh\n ct_ratio 500\n pt_ratio 3636.36\n status M\n description 400kV SIDE OF GT1 AT KORBA STPS\n Name: 0, dtype: object\n '''\n def getLocMasterInfo(self, dateObj, locationId):\n df = self.masterDataDf\n filteredDf = df[(df.location_id==locationId) & (df.from_time<=dateObj)]\n locMasterInfo = filteredDf.loc[filteredDf.from_time.idxmax()]\n return locMasterInfo" ]
[ [ "pandas.read_excel", "pandas.io.sql.read_sql_query" ] ]
gsinuco/VariationalNN_Floquet
[ "46605e53c29801b9aaedfe9a61fe886239bc2112" ]
[ "src/variationalnn_floquet/TensorFlow_Floquet_old.py" ]
[ "#!/usr/bin/env python3\n\"\"\"\n\nmodified from https://www.tensorflow.org/tutorials/customization/custom_training\n https://www.tensorflow.org/tutorials/customization/custom_training_walkthrough\nCreated on Sat Aug 10 11:30:57 2019\n@author: German Sinuco\n\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport tensorflow as tf\nimport numpy as np\nimport math as m\nfrom scipy.stats import unitary_group\n\nclass Model(object):\n def __init__(self):\n # Initialize the spin value and number of floquet channels\n self.S = 2 # spin\n self.N = 0 # floquet manifolds\n self.dim = self.S*(2*self.N+1) # dimension of the extended floquet space\n aux_norm = tf.random.stateless_uniform([self.dim,self.dim],seed=[1,1],dtype=tf.float32,minval=0.0,maxval=1.0) \n aux_phase = tf.random.stateless_uniform([self.dim,self.dim],seed=[3,9],dtype=tf.float32,minval=-m.pi*1.0,maxval=m.pi*1.0) \n uf_ = tf.complex(aux_norm*tf.cos(aux_phase),aux_norm*tf.sin(aux_phase))\n s,u,v = tf.linalg.svd(uf_, full_matrices=True)\n uf_ = u\n self.UF_A = tf.Variable(aux_norm, dtype = tf.float32,trainable = True) # ext. micromotion operator amplitude\n self.UF_ph = tf.Variable(aux_phase,dtype = tf.float32,trainable = True) # ext. micromotion operator phase\n self.UF = tf.Variable(uf_,dtype = tf.complex64,trainable = False) # ext. micromotion operator\n self.H = tf.Variable(0.0*uf_,shape=(self.dim,self.dim),dtype = tf.complex64,trainable = False) # ext. Hamiltonian\n self.H_aux = tf.Variable(uf_,shape=(self.dim,self.dim),dtype = tf.complex64,trainable = False) # ext. aux operator\n \n coupling = tf.constant([[0,1],[-1, 0]],dtype = tf.complex64)\n H_qubit = tf.constant([[1,0],[0,-1]],dtype = tf.complex64)\n Identity = tf.constant([[1,0],[0, 1]],dtype = tf.complex64)\n omega = 0.9\n \n for i in range(0,2*self.N): \n i_r = self.S*(i) \n j_r = i_r + self.S \n \n i_c = i_r + self.S \n j_c = i_c + self.S \n self.H[i_r:j_r,i_c:j_c].assign(coupling) \n self.H[i_c:j_c,i_r:j_r].assign(coupling) \n \n for i in range(0,2*self.N+1): \n i_ = self.S*(i) \n j_ = i_ + self.S \n self.H[i_:j_,i_:j_].assign(H_qubit + (-self.N + i)*omega*Identity) #self.UF[0,0]\n \n self.trainable_variables = [self.UF_A,self.UF_ph]\n \n def getH(self):\n return self.H\n \n def __call__(self):\n uf_ = tf.complex(self.UF_A*tf.cos(self.UF_ph),self.UF_A*tf.sin(self.UF_ph))\n uf__ = tf.square(tf.abs(tf.transpose(tf.math.conj(uf_))@(self.H@uf_)))\n trace_ = tf.linalg.trace(uf__)/(self.dim*self.dim) \n residual = tf.sqrt(tf.reduce_mean(uf__) - trace_) \n return residual\n \n\n\ndef loss(predicted_y):\n return tf.reduce_mean(tf.square(predicted_y))\n\n\ndef train(model, learning_rate):\n with tf.GradientTape() as t:\n current_loss = loss(model())\n gradients = t.gradient(current_loss, model.trainable_variables)\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n \n \ndef loss_():\n loss_var = tf.Variable(1.0, dtype = tf.float32,trainable = True) \n return loss_var\n \nmodel = Model()\noptimizer = tf.keras.optimizers.SGD(learning_rate=0.5)\nlet = loss_\n#callable(let)\n\nepochs = range(0,500)\nfor epoch in epochs:\n optimizer.minimize(model,model.trainable_variables)\n\nuf_ = tf.complex(model.UF_A*tf.cos(model.UF_ph),model.UF_A*tf.sin(model.UF_ph))\nH = tf.transpose(tf.math.conj(uf_))@model.H@uf_\nprint(abs(H))\n\n#a=model.residual_fun()\n#optimizer.get_gradients(model.residual_fun(),model.trainable_variables)\n \n#optimizer.minimize(model(),model.trainable_variables)\n\n# Collect the history of W-values and b-values to plot later\nHs, bs = [], []\nepochs = range(0,500)\nfor epoch in epochs:\n #Hs.append(model.UF.numpy())\n #current_loss = loss(model())\n train(model, learning_rate=0.5)\n #print(current_loss.numpy())\n\nuf_ = tf.complex(model.UF_A*tf.cos(model.UF_ph),model.UF_A*tf.sin(model.UF_ph))\nH = tf.transpose(tf.math.conj(uf_))@model.H@uf_\nprint(abs(H))\n\n#tf.keras.estimator.model_to_estimator()" ]
[ [ "tensorflow.sin", "tensorflow.constant", "tensorflow.cos", "tensorflow.linalg.svd", "tensorflow.Variable", "tensorflow.reduce_mean", "tensorflow.GradientTape", "tensorflow.math.conj", "tensorflow.square", "tensorflow.random.stateless_uniform", "tensorflow.linalg.trace", "tensorflow.keras.optimizers.SGD" ] ]
Or-Tal/fairseq
[ "2be0d74e0034f95a32f24afb381e0e275957a0e4", "2be0d74e0034f95a32f24afb381e0e275957a0e4" ]
[ "lese/dsp.py", "lese/audio.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n# author: adefossez\n\nimport numpy as np\nimport torch\nfrom torch.nn import functional as F\n\n\ndef hz_to_mel(f):\n return 2595 * np.log10(1 + f / 700)\n\n\ndef mel_to_hz(m):\n return 700 * (10**(m / 2595) - 1)\n\n\ndef mel_frequencies(n_mels, fmin, fmax):\n low = hz_to_mel(fmin)\n high = hz_to_mel(fmax)\n mels = np.linspace(low, high, n_mels)\n return mel_to_hz(mels)\n\n\nclass LowPassFilters(torch.nn.Module):\n \"\"\"\n Bank of low pass filters.\n\n Args:\n cutoffs (list[float]): list of cutoff frequencies, in [0, 1] expressed as `f/f_s` where\n f_s is the samplerate.\n width (int): width of the filters (i.e. kernel_size=2 * width + 1).\n Default to `2 / min(cutoffs)`. Longer filters will have better attenuation\n but more side effects.\n Shape:\n - Input: `(*, T)`\n - Output: `(F, *, T` with `F` the len of `cutoffs`.\n \"\"\"\n\n def __init__(self, cutoffs: list, width: int = None):\n super().__init__()\n self.cutoffs = cutoffs\n if width is None:\n width = int(2 / min(cutoffs))\n self.width = width\n window = torch.hamming_window(2 * width + 1, periodic=False)\n t = np.arange(-width, width + 1, dtype=np.float32)\n filters = []\n for cutoff in cutoffs:\n sinc = torch.from_numpy(np.sinc(2 * cutoff * t))\n filters.append(2 * cutoff * sinc * window)\n self.register_buffer(\"filters\", torch.stack(filters).unsqueeze(1))\n\n def forward(self, input):\n *others, t = input.shape\n input = input.view(-1, 1, t)\n out = F.conv1d(input, self.filters, padding=self.width)\n return out.permute(1, 0, 2).reshape(-1, *others, t)\n\n def __repr__(self):\n return \"LossPassFilters(width={},cutoffs={})\".format(self.width, self.cutoffs)\n", "# Copyright (c) Facebook, Inc. and its affiliates.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n# author: adefossez\n\nfrom collections import namedtuple\nimport json\nfrom pathlib import Path\nimport math\nimport os\nimport sys\n\nimport torchaudio\nfrom torch.nn import functional as F\n\n\nInfo = namedtuple(\"Info\", [\"length\", \"sample_rate\", \"channels\"])\n\n\ndef get_info(path):\n info = torchaudio.info(path)\n if hasattr(info, 'num_frames'):\n # new version of torchaudio\n return Info(info.num_frames, info.sample_rate, info.num_channels)\n else:\n siginfo = info[0]\n return Info(siginfo.length // siginfo.channels, siginfo.rate, siginfo.channels)\n\n\ndef find_audio_files(path, exts=[\".wav\"], progress=True):\n audio_files = []\n for root, folders, files in os.walk(path, followlinks=True):\n for file in files:\n file = Path(root) / file\n if file.suffix.lower() in exts:\n audio_files.append(str(file.resolve()))\n meta = []\n for idx, file in enumerate(audio_files):\n info = get_info(file)\n meta.append((file, info.length))\n if progress:\n print(format((1 + idx) / len(audio_files), \" 3.1%\"), end='\\r', file=sys.stderr)\n meta.sort()\n return meta\n\n\nclass Audioset:\n def __init__(self, files=None, length=None, stride=None,\n pad=True, with_path=False, sample_rate=None):\n \"\"\"\n files should be a list [(file, length)]\n \"\"\"\n self.files = files\n self.num_examples = []\n self.length = length\n self.stride = stride or length\n self.with_path = with_path\n self.sample_rate = sample_rate\n for file, file_length in self.files:\n if length is None:\n examples = 1\n elif file_length < length:\n examples = 1 if pad else 0\n elif pad:\n examples = int(math.ceil((file_length - self.length) / self.stride) + 1)\n else:\n examples = (file_length - self.length) // self.stride + 1\n self.num_examples.append(examples)\n\n def __len__(self):\n return sum(self.num_examples)\n\n def __getitem__(self, index):\n for (file, _), examples in zip(self.files, self.num_examples):\n if index >= examples:\n index -= examples\n continue\n num_frames = 0\n offset = 0\n if self.length is not None:\n offset = self.stride * index\n num_frames = self.length\n if torchaudio.get_audio_backend() in ['soundfile', 'sox_io']:\n out, sr = torchaudio.load(str(file),\n frame_offset=offset,\n num_frames=num_frames or -1)\n else:\n out, sr = torchaudio.load(str(file), offset=offset, num_frames=num_frames)\n if self.sample_rate is not None:\n if sr != self.sample_rate:\n raise RuntimeError(f\"Expected {file} to have sample rate of \"\n f\"{self.sample_rate}, but got {sr}\")\n if num_frames:\n out = F.pad(out, (0, num_frames - out.shape[-1]))\n if self.with_path:\n return out, file\n else:\n return out\n\n\nif __name__ == \"__main__\":\n meta = []\n for path in sys.argv[1:]:\n meta += find_audio_files(path)\n json.dump(meta, sys.stdout, indent=4)\n" ]
[ [ "numpy.sinc", "torch.hamming_window", "numpy.linspace", "numpy.arange", "torch.nn.functional.conv1d", "numpy.log10", "torch.stack" ], [ "torch.nn.functional.pad" ] ]
raysr/Deep-Reinforcement-Learning-Notebooks
[ "a7e988205bb21a2bf6972cf51bbd46d624ecf9ef" ]
[ "test-yourself.py" ]
[ "# INITIALIZATION: libraries, parameters, network...\n\nfrom keras.models import Sequential # One layer after the other\nfrom keras.layers import Dense, Flatten # Dense layers are fully connected layers, Flatten layers flatten out multidimensional inputs\nfrom collections import deque # For storing moves \nimport os\nimport numpy as np\nimport gym # To train our network\nenv = gym.make('MountainCar-v0') # Choose game (any in the gym should work)\n\nimport random # For sampling batches from the observations\n\n\n# Create network. Input is two consecutive game states, output is Q-values of the possible moves.\nmodel = Sequential()\nmodel.add(Dense(20, input_shape=(2,) + env.observation_space.shape, init='uniform', activation='relu'))\nmodel.add(Flatten()) # Flatten input so as to have no problems with processing\nmodel.add(Dense(18, init='uniform', activation='relu'))\nmodel.add(Dense(10, init='uniform', activation='relu'))\nmodel.add(Dense(env.action_space.n, init='uniform', activation='linear')) # Same number of outputs as possible actions\n\nmodel.compile(loss='mse', optimizer='adam', metrics=['accuracy'])\n\n# Parameters\nD = deque() # Register where the actions will be stored\n\nobservetime = 1000000 # Number of timesteps we will be acting on the game and observing results\nepsilon = 0.7 # Probability of doing a random move\ngamma = 0.9 # Discounted future reward. How much we care about steps further in time\nmb_size = 50000 # Learning minibatch size\n\n# FIRST STEP: Knowing what each action does (Observing)\n\nobservation = env.reset() # Game begins\nobs = np.expand_dims(observation, axis=0) # (Formatting issues) Making the observation the first element of a batch of inputs \nstate = np.stack((obs, obs), axis=1)\ndone = False\nfor t in range(observetime):\n os.system(\"clear\")\n print(\"Observation (\"+str(t)+\"/\"+str(observetime)+\")\")\n if np.random.rand() <= epsilon:\n action = np.random.randint(0, env.action_space.n, size=1)[0]\n else:\n Q = model.predict(state) # Q-values predictions\n action = np.argmax(Q) # Move with highest Q-value is the chosen one\n observation_new, reward, done, info = env.step(action) # See state of the game, reward... after performing the action\n obs_new = np.expand_dims(observation_new, axis=0) # (Formatting issues)\n state_new = np.append(np.expand_dims(obs_new, axis=0), state[:, :1, :], axis=1) # Update the input with the new state of the game\n D.append((state, action, reward, state_new, done)) # 'Remember' action and consequence\n state = state_new # Update state\n if done:\n env.reset() # Restart game if it's finished\n obs = np.expand_dims(observation, axis=0) # (Formatting issues) Making the observation the first element of a batch of inputs \n state = np.stack((obs, obs), axis=1)\nprint('Observing Finished')\n\n\n\n\n\n\n\nminibatch = random.sample(D, mb_size) # Sample some moves\n\ninputs_shape = (mb_size,) + state.shape[1:]\ninputs = np.zeros(inputs_shape)\ntargets = np.zeros((mb_size, env.action_space.n))\n\nfor i in range(0, mb_size):\n os.system(\"clear\")\n print(\"Batch (\"+str(i)+\"/\"+str(mb_size)+\")\")\n state = minibatch[i][0]\n action = minibatch[i][1]\n reward = minibatch[i][2]\n state_new = minibatch[i][3]\n done = minibatch[i][4]\n \n# Build Bellman equation for the Q function\n inputs[i:i+1] = np.expand_dims(state, axis=0)\n targets[i] = model.predict(state)\n Q_sa = model.predict(state_new)\n \n if done:\n targets[i, action] = reward\n else:\n targets[i, action] = reward + gamma * np.max(Q_sa)\n\n# Train network to output the Q function\n model.train_on_batch(inputs, targets)\nprint('Learning Finished')\n\n\n\n\n\nobservation = env.reset()\nobs = np.expand_dims(observation, axis=0)\nstate = np.stack((obs, obs), axis=1)\ndone = False\ntot_reward = 0.0\nwhile not done:\n env.render() # Uncomment to see game running\n Q = model.predict(state) \n action = np.argmax(Q) \n observation, reward, done, info = env.step(action)\n obs = np.expand_dims(observation, axis=0)\n state = np.append(np.expand_dims(obs, axis=0), state[:, :1, :], axis=1) \n tot_reward += reward\nprint('Game ended! Total reward: {}'.format(reward))\n\n# serialize model to JSON\nmodel_json = model.to_json()\nwith open(\"model.json\", \"w\") as json_file:\n json_file.write(model_json)\n# serialize weights to HDF5\nmodel.save_weights(\"model_weights.h5\")\nprint(\"Saved model to disk\")\nprint(\"End of the game\")" ]
[ [ "numpy.expand_dims", "numpy.stack", "numpy.max", "numpy.argmax", "numpy.random.rand", "numpy.zeros", "numpy.random.randint" ] ]
AK391/models
[ "6ab1414db8b2895174c921b7c42ee8371e93d882" ]
[ "workflow_scripts/generate_onnx_hub_manifest.py" ]
[ "import hashlib\r\nimport json\r\nimport os\r\nimport re\r\nimport bs4\r\nimport markdown\r\nimport pandas as pd\r\nimport typepy\r\nfrom os.path import join, split\r\nimport onnxruntime as ort\r\nfrom onnxruntime.capi.onnxruntime_pybind11_state import NotImplemented\r\nimport onnx\r\nfrom onnx import shape_inference\r\n\r\n\r\n# Acknowledgments to pytablereader codebase for this function\r\ndef parse_html(table):\r\n headers = []\r\n data_matrix = []\r\n rows = table.find_all(\"tr\")\r\n re_table_val = re.compile(\"td|th\")\r\n for row in rows:\r\n td_list = row.find_all(\"td\")\r\n if typepy.is_empty_sequence(td_list):\r\n if typepy.is_not_empty_sequence(headers):\r\n continue\r\n th_list = row.find_all(\"th\")\r\n if typepy.is_empty_sequence(th_list):\r\n continue\r\n headers = [row.text.strip() for row in th_list]\r\n continue\r\n data_matrix.append(list(row.find_all(re_table_val)))\r\n\r\n if typepy.is_empty_sequence(data_matrix):\r\n raise ValueError(\"data matrix is empty\")\r\n\r\n return pd.DataFrame(data_matrix, columns=headers)\r\n\r\n\r\ndef parse_readme(filename):\r\n with open(filename, \"r\") as f:\r\n parsed = markdown.markdown(f.read(), extensions=[\"markdown.extensions.tables\"])\r\n soup = bs4.BeautifulSoup(parsed, \"html.parser\")\r\n return [parse_html(table) for table in soup.find_all(\"table\")]\r\n\r\n\r\ntop_level_readme = join(\"..\", \"README.md\")\r\ntop_level_tables = parse_readme(top_level_readme)\r\nmarkdown_files = set()\r\nfor top_level_table in top_level_tables:\r\n for i, row in top_level_table.iterrows():\r\n if \"Model Class\" in row:\r\n try:\r\n markdown_files.add(join(\r\n \"..\", row[\"Model Class\"].contents[0].contents[0].attrs['href'], \"README.md\"))\r\n except AttributeError:\r\n print(\"{} has no link to implementation\".format(row[\"Model Class\"].contents[0]))\r\n# Sort for reproducibility\r\nmarkdown_files = sorted(list(markdown_files))\r\n\r\nall_tables = []\r\nfor markdown_file in markdown_files:\r\n with open(markdown_file, \"r\") as f:\r\n for parsed in parse_readme(markdown_file):\r\n parsed = parsed.rename(columns={\"Opset Version\": \"Opset version\"})\r\n if all(col in parsed.columns.values for col in [\"Model\", \"Download\", \"Opset version\", \"ONNX version\"]):\r\n parsed[\"source_file\"] = markdown_file\r\n all_tables.append(parsed)\r\n else:\r\n print(\"Unrecognized table columns in file {}: {}\".format(markdown_file, parsed.columns.values))\r\n\r\ndf = pd.concat(all_tables, axis=0)\r\nnormalize_name = {\r\n \"Download\": \"model_path\",\r\n \"Download (with sample test data)\": \"model_with_data_path\",\r\n}\r\n\r\ntop_level_fields = [\"model\", \"model_path\", \"opset_version\", \"onnx_version\"]\r\n\r\n\r\ndef prep_name(col):\r\n if col in normalize_name:\r\n col = normalize_name[col]\r\n col = col.rstrip()\r\n prepped_col = col.replace(\" \", \"_\").lower()\r\n if prepped_col in top_level_fields:\r\n return prepped_col\r\n else:\r\n return col\r\n\r\n\r\nrenamed = df.rename(columns={col: prep_name(col) for col in df.columns.values})\r\nmetadata_fields = [f for f in renamed.columns.values if f not in top_level_fields]\r\n\r\n\r\ndef get_file_info(row, field):\r\n source_dir = split(row[\"source_file\"])[0]\r\n model_file = row[field].contents[0].attrs[\"href\"]\r\n ## So that model relative path is consistent across OS\r\n rel_path = \"/\".join(join(source_dir, model_file).split(os.sep)[1:])\r\n with open(join(\"..\", rel_path), \"rb\") as f:\r\n bytes = f.read()\r\n sha256 = hashlib.sha256(bytes).hexdigest()\r\n return {\r\n field: rel_path,\r\n field.replace(\"_path\", \"\") + \"_sha\": sha256,\r\n field.replace(\"_path\", \"\") + \"_bytes\": len(bytes),\r\n }\r\n\r\n\r\ndef get_model_tags(row):\r\n source_dir = split(row[\"source_file\"])[0]\r\n raw_tags = source_dir.split(\"/\")[1:]\r\n return [tag.replace(\"_\", \" \") for tag in raw_tags]\r\n\r\n\r\ndef get_model_ports(source_file, metadata, model_name):\r\n model_path = join(\"..\", source_file)\r\n try:\r\n # Hide graph warnings. Severity 3 means error and above.\r\n ort.set_default_logger_severity(3)\r\n # Start from ORT 1.10, ORT requires explicitly setting the providers parameter\r\n # if you want to use execution providers\r\n # other than the default CPU provider (as opposed to the previous behavior of\r\n # providers getting set/registered by default\r\n # based on the build flags) when instantiating InferenceSession.\r\n # For example, if NVIDIA GPU is available and ORT Python package is built with CUDA, then call API as following:\r\n # ort.InferenceSession(path/to/model, providers=['CUDAExecutionProvider'])\r\n session = ort.InferenceSession(model_path)\r\n inputs = session.get_inputs()\r\n outputs = session.get_outputs()\r\n io_ports = {\r\n \"inputs\": [{\"name\": input.name, \"shape\": input.shape, \"type\": input.type} for input in inputs],\r\n \"outputs\": [{\"name\": output.name, \"shape\": output.shape, \"type\": output.type} for output in outputs],\r\n }\r\n\r\n extra_ports = None\r\n if \"classification\" in metadata[\"tags\"]:\r\n inferred_model = shape_inference.infer_shapes(onnx.load(model_path))\r\n nodes = list(inferred_model.graph.value_info)\r\n if model_name in feature_tensor_names:\r\n node_name = feature_tensor_names[model_name]\r\n node = [n for n in nodes if n.name == node_name][0]\r\n shape = [d.dim_value for d in list(node.type.tensor_type.shape.dim)]\r\n extra_ports = {\"features\": [\r\n {\"name\": node.name, \"shape\": shape}\r\n ]}\r\n\r\n return io_ports, extra_ports\r\n\r\n except NotImplemented:\r\n print(\r\n 'Failed to load model from {}. Run `git lfs pull --include=\"{}\" --exclude=\"\"` '\r\n 'to download the model payload first.'.format(\r\n model_path, source_file\r\n )\r\n )\r\n return None, None\r\n\r\n\r\nfeature_tensor_names = {\r\n 'AlexNet': 'fc7_1',\r\n 'CaffeNet': 'fc7_1',\r\n 'DenseNet-121': 'pool5_1',\r\n 'EfficientNet-Lite4': 'efficientnet-lite4/model/head/AvgPool:0',\r\n 'GoogleNet': 'pool5/7x7_s1_2',\r\n 'Inception-1': 'pool5/7x7_s1_2',\r\n 'Inception-2': 'pool5/7x7_s1_1',\r\n 'MobileNet v2-1.0': '464',\r\n 'R-CNN ILSVRC13': 'fc7_1',\r\n 'ResNet18': 'resnetv15_pool1_fwd',\r\n 'ResNet34': 'resnetv16_pool1_fwd',\r\n 'ResNet50': 'resnetv17_pool1_fwd',\r\n 'ResNet101': 'resnetv18_pool1_fwd',\r\n 'ResNet152': 'resnetv19_pool1_fwd',\r\n 'ResNet50_fp32': 'resnetv17_pool1_fwd',\r\n 'ResNet50_int8': 'flatten_473_quantized',\r\n 'ResNet50-caffe2': 'gpu_0/pool5_1',\r\n 'ResNet18-v2': 'resnetv22_pool1_fwd',\r\n 'ResNet34-v2': 'resnetv23_pool1_fwd',\r\n 'ResNet50-v2': 'resnetv24_pool1_fwd',\r\n 'ResNet101-v2': 'resnetv25_pool1_fwd',\r\n 'ResNet152-v2': 'resnetv27_pool1_fwd',\r\n 'ShuffleNet-v1': 'gpu_0/final_avg_1',\r\n 'ShuffleNet-v2': '611',\r\n 'ShuffleNet-v2-fp32': '611',\r\n 'ShuffleNet-v2-int8': '611',\r\n 'SqueezeNet 1.1': 'squeezenet0_pool3_fwd',\r\n 'SqueezeNet 1.0': 'pool10_1',\r\n 'VGG 16': \"flatten_70\",\r\n 'VGG 16-bn': \"flatten_135\",\r\n 'VGG 19': \"flatten_82\",\r\n 'VGG 19-bn': \"flatten_162\",\r\n 'VGG 16-int8': \"flatten_70_quantized\",\r\n 'VGG 19-caffe2': \"fc7_3\",\r\n 'ZFNet-512': 'gpu_0/fc7_2'\r\n}\r\n\r\noutput = []\r\nfor i, row in renamed.iterrows():\r\n if len(row[\"model\"].contents) > 0 and len(row[\"model_path\"].contents) > 0:\r\n model_name = row[\"model\"].contents[0]\r\n model_info = get_file_info(row, \"model_path\")\r\n model_path = model_info.pop(\"model_path\")\r\n metadata = model_info\r\n metadata[\"tags\"] = get_model_tags(row)\r\n io_ports, extra_ports = get_model_ports(model_path, metadata, model_name)\r\n if io_ports is not None:\r\n metadata[\"io_ports\"] = io_ports\r\n if extra_ports is not None:\r\n metadata[\"extra_ports\"] = extra_ports\r\n\r\n try:\r\n for k, v in get_file_info(row, \"model_with_data_path\").items():\r\n metadata[k] = v\r\n except (AttributeError, FileNotFoundError) as e:\r\n print(\"no model_with_data in file {}\".format(row[\"source_file\"]))\r\n\r\n try:\r\n opset = int(row[\"opset_version\"].contents[0])\r\n except ValueError:\r\n print(\"malformed opset {} in {}\".format(row[\"opset_version\"].contents[0], row[\"source_file\"]))\r\n continue\r\n\r\n output.append(\r\n {\r\n \"model\": model_name,\r\n \"model_path\": model_path,\r\n \"onnx_version\": row[\"onnx_version\"].contents[0],\r\n \"opset_version\": int(row[\"opset_version\"].contents[0]),\r\n \"metadata\": metadata\r\n }\r\n )\r\n else:\r\n print(\"Missing model in {}\".format(row[\"source_file\"]))\r\n\r\nwith open(join(\"..\", \"ONNX_HUB_MANIFEST.json\"), \"w+\") as f:\r\n print(\"Found {} models\".format(len(output)))\r\n json.dump(output, f, indent=4)\r\n" ]
[ [ "pandas.concat", "pandas.DataFrame" ] ]
ananyaganesh/ftmp
[ "9ee23939f0c1da854846b8ce1a9abe4e9b377031" ]
[ "train.py" ]
[ "import time\nimport os\nimport pyhocon\nimport torch\nfrom torch import nn\nfrom torch import optim\nfrom models import *\nfrom utils import *\nfrom nn_blocks import *\nfrom sklearn.metrics import accuracy_score, classification_report\nfrom transformers import *\nimport argparse\nimport random\nimport numpy as np\n\ndevice = torch.device(\"cuda\")\n\ndef train(experiment):\n config = initialize_env(experiment)\n tokenizer = AutoTokenizer.from_pretrained(\"TODBERT/TOD-BERT-JNT-V1\")\n tod_bert = AutoModel.from_pretrained(\"TODBERT/TOD-BERT-JNT-V1\")\n if config['use_tod']:\n XD_train, YD_train, XU_train, TC_train, YU_train, turn_train = create_todbert_traindata(config=config, tokenizer=tokenizer, prefix='train')\n XD_valid, YD_valid, XU_valid, TC_valid, YU_valid, turn_valid = create_todbert_traindata(config=config, tokenizer=tokenizer, prefix='dev')\n else:\n XD_train, YD_train, XU_train, YU_train, turn_train = create_traindata(config=config, prefix='train')\n XD_valid, YD_valid, XU_valid, YU_valid, turn_valid = create_traindata(config=config, prefix='dev')\n print('Finish create train data...')\n\n if os.path.exists(os.path.join(config['log_root'], 'da_vocab.dict')):\n da_vocab = da_Vocab(config, create_vocab=False)\n utt_vocab = utt_Vocab(config, create_vocab=False)\n else:\n da_vocab = da_Vocab(config, das=[token for conv in XD_train + XD_valid + YD_train + YD_valid for token in conv])\n utt_vocab = utt_Vocab(config,\n sentences=[sentence for conv in XU_train + XU_valid + YU_train + YU_valid for sentence in\n conv])\n da_vocab.save()\n utt_vocab.save()\n print('Utterance vocab.: {}'.format(len(utt_vocab.word2id)))\n print('Dialog Act vocab.: {}'.format(len(da_vocab.word2id)))\n\n # Tokenize\n XD_train, YD_train = da_vocab.tokenize(XD_train), da_vocab.tokenize(YD_train)\n XD_valid, YD_valid = da_vocab.tokenize(XD_valid), da_vocab.tokenize(YD_valid)\n XU_train, YU_train = utt_vocab.tokenize(XU_train), utt_vocab.tokenize(YU_train)\n XU_valid, YU_valid = utt_vocab.tokenize(XU_valid), utt_vocab.tokenize(YU_valid)\n assert len(XD_train) == len(YD_train), 'Unexpect content in train data'\n assert len(XD_valid) == len(YD_valid), 'Unexpect content in valid data'\n lr = config['lr']\n batch_size = config['BATCH_SIZE']\n\n predictor = DApredictModel(utt_vocab=utt_vocab, da_vocab=da_vocab, tod_bert=tod_bert, config=config)\n predictor.to(device)\n model_opt = optim.Adam(predictor.parameters(), lr=lr)\n start = time.time()\n _valid_loss = None\n _train_loss = None\n total_loss = 0\n early_stop = 0\n for e in range(config['EPOCH']):\n tmp_time = time.time()\n print('Epoch {} start'.format(e+1))\n indexes = [i for i in range(len(XD_train))]\n random.shuffle(indexes)\n k = 0\n predictor.train()\n while k < len(indexes):\n # initialize\n step_size = min(batch_size, len(indexes) - k)\n batch_idx = indexes[k: k + step_size]\n model_opt.zero_grad()\n # create batch data\n #print('\\rConversation {}/{} training...'.format(k + step_size, len(XD_train)), end='')\n XU_seq = [XU_train[seq_idx] for seq_idx in batch_idx]\n XD_seq = [XD_train[seq_idx] for seq_idx in batch_idx]\n YD_seq = [YD_train[seq_idx] for seq_idx in batch_idx]\n turn_seq = [turn_train[seq_idx] for seq_idx in batch_idx]\n max_conv_len = max(len(s) for s in XU_seq)\n XU_tensor = []\n XD_tensor = []\n turn_tensor = []\n\n if config['use_tod']:\n TC_seq = [TC_train[seq_idx] for seq_idx in batch_idx]\n max_context_len = max(len(TC) for TC in TC_seq)\n for ci in range(len(TC_seq)):\n TC_seq[ci] = TC_seq[ci] + [0] * (max_context_len - len(TC_seq[ci]))\n TC_tensor = torch.tensor(TC_seq).to(device)\n else:\n TC_valid = None\n TC_tensor = None\n\n for i in range(0, max_conv_len):\n max_xseq_len = max(len(XU[i]) + 1 for XU in XU_seq)\n # utterance padding\n for ci in range(len(XU_seq)):\n XU_seq[ci][i] = XU_seq[ci][i] + [utt_vocab.word2id['<PAD>']] * (max_xseq_len - len(XU_seq[ci][i]))\n XU_tensor.append(torch.tensor([XU[i] for XU in XU_seq]).to(device))\n XD_tensor.append(torch.tensor([[XD[i]] for XD in XD_seq]).to(device))\n turn_tensor.append(torch.tensor([[t[i]] for t in turn_seq]).to(device))\n if config['DApred']['predict']:\n XD_tensor = XD_tensor[:-1]\n YD_tensor = torch.tensor([YD[-2] for YD in YD_seq]).to(device)\n else:\n YD_tensor = torch.tensor([YD[-1] for YD in YD_seq]).to(device)\n loss, preds = predictor.forward(X_da=XD_tensor, Y_da=YD_tensor, X_utt=XU_tensor, TC=TC_tensor, turn=turn_tensor, step_size=step_size)\n model_opt.step()\n total_loss += loss\n k += step_size\n print()\n\n valid_loss, valid_acc = validation(XD_valid=XD_valid, XU_valid=XU_valid, YD_valid=YD_valid, TC_valid=TC_valid, turn_valid=turn_valid, model=predictor, utt_vocab=utt_vocab, config=config)\n\n def save_model(filename):\n torch.save(predictor.state_dict(), os.path.join(config['log_dir'], 'da_pred_state{}.model'.format(filename)))\n\n if _valid_loss is None:\n save_model('validbest')\n _valid_loss = valid_loss\n else:\n if _valid_loss > valid_loss:\n save_model('validbest')\n _valid_loss = valid_loss\n print('valid loss update, save model')\n\n if _train_loss is None:\n save_model('trainbest')\n _train_loss = total_loss\n else:\n if _train_loss > total_loss:\n save_model('trainbest')\n _train_loss = total_loss\n early_stop = 0\n print('train loss update, save model')\n else:\n early_stop += 1\n print('early stopping count | {}/{}'.format(early_stop, config['EARLY_STOP']))\n if early_stop >= config['EARLY_STOP']:\n break\n if (e + 1) % config['LOGGING_FREQ'] == 0:\n print_loss_avg = total_loss / config['LOGGING_FREQ']\n total_loss = 0\n print('steps %d\\tloss %.4f\\tvalid loss %.4f\\tvalid acc %.4f | exec time %.4f' % (e + 1, print_loss_avg, valid_loss, valid_acc, time.time() - tmp_time))\n\n if (e + 1) % config['SAVE_MODEL'] == 0:\n print('saving model')\n save_model(e+1)\n print()\n print('Finish training | exec time: %.4f [sec]' % (time.time() - start))\n\n\ndef validation(XD_valid, XU_valid, YD_valid, TC_valid, turn_valid, model, utt_vocab, config):\n model.eval()\n total_loss = 0\n k = 0\n batch_size = config['BATCH_SIZE']\n indexes = [i for i in range(len(XU_valid))]\n acc = []\n predicted = []\n gold = []\n while k < len(indexes):\n step_size = min(batch_size, len(indexes) - k)\n batch_idx = indexes[k: k + step_size]\n XU_seq = [XU_valid[seq_idx] for seq_idx in batch_idx]\n XD_seq = [XD_valid[seq_idx] for seq_idx in batch_idx]\n YD_seq = [YD_valid[seq_idx] for seq_idx in batch_idx]\n turn_seq = [turn_valid[seq_idx] for seq_idx in batch_idx]\n max_conv_len = max(len(s) for s in XU_seq)\n XU_tensor = []\n XD_tensor = []\n turn_tensor = []\n\n if config['use_tod']:\n TC_seq = [TC_valid[seq_idx] for seq_idx in batch_idx]\n max_context_len = max(len(TC) for TC in TC_seq)\n for ci in range(len(TC_seq)):\n TC_seq[ci] = TC_seq[ci] + [0] * (max_context_len - len(TC_seq[ci]))\n TC_tensor = torch.tensor(TC_seq).to(device)\n else:\n TC_tensor = None\n\n for i in range(0, max_conv_len):\n max_xseq_len = max(len(XU[i]) + 1 for XU in XU_seq)\n for ci in range(len(XU_seq)):\n XU_seq[ci][i] = XU_seq[ci][i] + [utt_vocab.word2id['<PAD>']] * (max_xseq_len - len(XU_seq[ci][i]))\n XU_tensor.append(torch.tensor([x[i] for x in XU_seq]).to(device))\n XD_tensor.append(torch.tensor([[x[i]] for x in XD_seq]).to(device))\n turn_tensor.append(torch.tensor([[t[i]] for t in turn_seq]).to(device))\n if config['DApred']['predict']:\n XD_tensor = XD_tensor[:-1]\n YD_tensor = torch.tensor([YD[-2] for YD in YD_seq]).to(device)\n else:\n YD_tensor = torch.tensor([YD[-1] for YD in YD_seq]).to(device)\n loss, preds = model(X_da=XD_tensor, Y_da=YD_tensor, X_utt=XU_tensor, TC=TC_tensor, turn=turn_tensor, step_size=step_size)\n preds = np.argmax(preds, axis=1)\n predicted.extend(preds)\n gold.extend(YD_tensor.data.tolist())\n acc.append(accuracy_score(y_pred=preds, y_true=YD_tensor.data.tolist()))\n total_loss += loss\n k += step_size\n\n print(classification_report(gold, predicted, digits=4))\n return total_loss, np.mean(acc)\n\nif __name__ == '__main__':\n args = parse()\n train(args.expr)\n" ]
[ [ "torch.tensor", "numpy.argmax", "numpy.mean", "torch.device", "sklearn.metrics.classification_report" ] ]
katsu1110/DataScienceComp
[ "86edf7cab0af372624bba7f3026a3db32e7cd1d6" ]
[ "models/run_models.py" ]
[ "import numpy as np\nimport pandas as pd\nimport os\nimport sys\nimport gc\nimport re\nfrom typing import List, NoReturn, Union, Tuple, Optional, Text, Generic, Callable, Dict\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler, OneHotEncoder, QuantileTransformer\nfrom sklearn.model_selection import KFold, StratifiedKFold, TimeSeriesSplit\nfrom sklearn.metrics import accuracy_score, roc_auc_score, log_loss, mean_squared_error, mean_absolute_error, f1_score\n\n# model\nimport lightgbm as lgb\n\n# visualize\nimport matplotlib.pyplot as plt\nimport matplotlib.style as style\nimport seaborn as sns\nfrom matplotlib import pyplot\nfrom matplotlib.ticker import ScalarFormatter\nsns.set_context(\"talk\")\nstyle.use('seaborn-colorblind')\n\n# custom\nfrom lgb_param_models import lgb_model\nfrom xgb_param_models import xgb_model\nfrom catb_param_models import catb_model\nfrom lin_param_models import lin_model\nfrom nn_param_models import nn_model\nmypath = os.getcwd()\nsys.path.append(mypath + '/code/')\nfrom train_helper import get_oof_ypred\nfrom cv_methods import GroupKFold, StratifiedGroupKFold\n\nclass RunModel(object):\n \"\"\"\n Model Fitting and Prediction Class:\n\n :INPUTS:\n\n :train_df: train pandas dataframe\n :test_df: test pandas dataframe\n :target: target column name (str)\n :features: list of feature names\n :categoricals: list of categorical feature names. Note that categoricals need to be in 'features'\n :model: 'lgb', 'xgb', 'catb', 'linear', or 'nn'\n :params: dictionary of hyperparameters. If empty dict {} is given, default hyperparams are used\n :task: 'regression', 'multiclass', or 'binary'\n :n_splits: K in KFold (default is 4)\n :cv_method: 'KFold', 'StratifiedKFold', 'TimeSeriesSplit', 'GroupKFold', 'StratifiedGroupKFold'\n :group: group feature name when GroupKFold or StratifiedGroupKFold are used\n :target_encoding: True or False\n :seed: seed (int)\n :scaler: None, 'MinMax', 'Standard'\n :verbose: bool\n\n :EXAMPLE:\n\n # fit LGB regression model\n model = RunModel(train_df, test_df, target, features, categoricals=categoricals,\n model=\"lgb\", params={}, task=\"regression\", n_splits=4, cv_method=\"KFold\", \n group=None, target_encoding=False, seed=1220, scaler=None)\n \n # save predictions on train, test data\n np.save(\"y_pred\", model.y_pred)\n np.save(\"oof\", model.oof)\n \"\"\"\n\n def __init__(self, train_df : pd.DataFrame, test_df : pd.DataFrame, target : str, features : List, categoricals: List=[],\n model : str=\"lgb\", params : Dict={}, task : str=\"regression\", n_splits : int=4, cv_method : str=\"KFold\", \n group : str=None, target_encoding=False, seed : int=1220, scaler : str=None, verbose=True):\n\n # display info\n print(\"##############################\")\n print(f\"Starting training model {model} for a {task} task:\")\n print(f\"- train records: {len(train_df)}, test records: {len(test_df)}\")\n print(f\"- target column is {target}\")\n print(f\"- {len(features)} features with {len(categoricals)} categorical features\")\n if target_encoding:\n print(f\"- target encoding: Applied\")\n else:\n print(f\"- target encoding: NOT Applied\")\n print(f\"- CV strategy : {cv_method} with {n_splits} splits\")\n if group is None:\n print(f\"- no group parameter is used for validation\")\n else:\n print(f\"- {group} as group parameter\")\n if scaler is None:\n print(\"- No scaler is used\")\n else:\n print(f\"- {scaler} scaler is used\")\n print(\"##############################\")\n\n # class initializing setups\n self.train_df = train_df\n self.test_df = test_df\n self.target = target\n self.features = features\n self.categoricals = categoricals\n self.model = model\n self.params = params\n self.task = task\n self.n_splits = n_splits\n self.cv_method = cv_method\n self.group = group\n self.target_encoding = target_encoding\n self.seed = seed\n self.scaler = scaler\n self.verbose = verbose\n self.y_pred, self.score, self.model, self.oof, self.y_val, self.fi_df = self.fit()\n\n def train_model(self, train_set, val_set):\n \"\"\"\n employ a model\n \"\"\"\n # compile model\n if self.model == \"lgb\": # LGB \n model, fi = lgb_model(self, train_set, val_set)\n\n elif self.model == \"xgb\": # xgb\n model, fi = xgb_model(self, train_set, val_set)\n\n elif self.model == \"catb\": # catboost\n model, fi = catb_model(self, train_set, val_set)\n\n elif self.model == \"linear\": # linear model\n model, fi = lin_model(self, train_set, val_set)\n\n elif self.model == \"nn\": # neural network\n model, fi = nn_model(self, train_set, val_set)\n \n return model, fi # fitted model and feature importance\n\n def convert_dataset(self, x_train, y_train, x_val, y_val):\n \"\"\"\n dataset converter\n \"\"\"\n if (self.model == \"lgb\") & (self.task != \"multiclass\"):\n train_set = lgb.Dataset(x_train, y_train, categorical_feature=self.categoricals)\n val_set = lgb.Dataset(x_val, y_val, categorical_feature=self.categoricals)\n \n elif (self.model == \"nn\") & (self.task == \"multiclass\"):\n ohe = OneHotEncoder(sparse=False, categories='auto')\n train_set = {'X': x_train, 'y': ohe.fit_transform(y_train.values.reshape(-1, 1))}\n val_set = {'X': x_val, 'y': ohe.transform(y_val.values.reshape(-1, 1))}\n \n else:\n train_set = {'X': x_train, 'y': y_train}\n val_set = {'X': x_val, 'y': y_val}\n \n return train_set, val_set\n\n def calc_metric(self, y_true, y_pred): \n \"\"\"\n calculate evaluation metric for each task\n this may need to be changed based on the metric of interest\n \"\"\"\n if self.task == \"multiclass\":\n return f1_score(y_true, y_pred, average=\"macro\")\n \n elif self.task == \"binary\":\n return roc_auc_score(y_true, y_pred) # log_loss\n \n elif self.task == \"regression\":\n return np.sqrt(mean_squared_error(y_true, y_pred))\n\n def get_cv(self):\n \"\"\"\n employ CV strategy\n \"\"\"\n\n # return cv.split\n if self.cv_method == \"KFold\":\n cv = KFold(n_splits=self.n_splits, shuffle=True, random_state=self.seed)\n return cv.split(self.train_df)\n \n elif self.cv_method == \"StratifiedKFold\":\n cv = StratifiedKFold(n_splits=self.n_splits, shuffle=True, random_state=self.seed)\n return cv.split(self.train_df, self.train_df[self.target])\n \n elif self.cv_method == \"TimeSeriesSplit\":\n cv = TimeSeriesSplit(max_train_size=None, n_splits=self.n_splits)\n return cv.split(self.train_df)\n \n elif self.cv_method == \"GroupKFold\":\n cv = GroupKFold(n_splits=self.n_splits, shuffle=True, random_state=self.seed)\n return cv.split(self.train_df, self.train_df[self.target], self.group)\n \n elif self.cv_method == \"StratifiedGroupKFold\":\n cv = StratifiedGroupKFold(n_splits=self.n_splits, shuffle=True, random_state=self.seed)\n return cv.split(self.train_df, self.train_df[self.target], self.group)\n\n def fit(self):\n \"\"\"\n perform model fitting \n \"\"\"\n\n # initialize\n y_vals = np.zeros((self.train_df.shape[0], ))\n if self.task == \"multiclass\":\n n_class = len(np.unique(self.train_df[self.target].values))\n oof_pred = np.zeros((self.train_df.shape[0], n_class))\n y_pred = np.zeros((self.test_df.shape[0], n_class))\n else:\n oof_pred = np.zeros((self.train_df.shape[0], ))\n y_pred = np.zeros((self.test_df.shape[0], ))\n\n # group does not kick in when group k fold is used\n if self.group is not None:\n if self.group in self.features:\n self.features.remove(self.group)\n if self.group in self.categoricals:\n self.categoricals.remove(self.group)\n fi = np.zeros((self.n_splits, len(self.features)))\n\n # target encoding\n numerical_features = [f for f in self.features if f not in self.categoricals]\n if self.target_encoding: \n # perform target encoding\n k = 0\n f = 1\n overall_mean = self.train_df[self.target].mean()\n for c in self.categoricals:\n data_tmp = pd.DataFrame({c: self.train_df[c].values, 'target': self.train_df[self.target].values})\n tmp = np.nan * np.ones(self.train_df.shape[0])\n \n cv = self.get_cv()\n for fold, (train_idx, val_idx) in enumerate(cv):\n # target mean\n target_mean = data_tmp.iloc[train_idx].groupby(c)['target'].mean().reset_index() \n \n # smoothing\n target_count = data_tmp.iloc[train_idx].groupby(c)['target'].count().reset_index() \n target_count['target'] = target_count['target'].apply(lambda x : 1 / (1 + np.exp((-x-k) / f)))\n target_mean['target'] = target_mean['target'] * target_count['target'] + (1 - target_count['target']) * overall_mean\n\n # allocate\n tmp[val_idx] = self.train_df[c].iloc[val_idx].map(target_mean.to_dict()).values\n self.train_df[c] = tmp\n \n # replace categorical variable in test\n target_mean = data_tmp.groupby(c)['target'].mean()\n self.test_df.loc[:, c] = self.test_df[c].map(target_mean).values\n \n # no categoricals any more\n numerical_features = self.features.copy()\n self.categoricals = []\n \n # fill nan\n if self.model not in ['lgb', 'catb', 'xgb']:\n # fill NaN (numerical features -> median, categorical features -> mode)\n self.train_df[numerical_features] = self.train_df[numerical_features].replace([np.inf, -np.inf], np.nan)\n self.test_df[numerical_features] = self.test_df[numerical_features].replace([np.inf, -np.inf], np.nan)\n self.train_df[numerical_features] = self.train_df[numerical_features].fillna(self.train_df[numerical_features].median())\n self.test_df[numerical_features] = self.test_df[numerical_features].fillna(self.test_df[numerical_features].median())\n self.train_df[self.categoricals] = self.train_df[self.categoricals].fillna(self.train_df[self.categoricals].mode().iloc[0])\n self.test_df[self.categoricals] = self.test_df[self.categoricals].fillna(self.test_df[self.categoricals].mode().iloc[0])\n \n # scaling, if necessary\n if self.scaler is not None:\n # to normal\n pt = QuantileTransformer(n_quantiles=100, random_state=self.seed, output_distribution=\"normal\")\n self.train_df[numerical_features] = pt.fit_transform(self.train_df[numerical_features])\n self.test_df[numerical_features] = pt.transform(self.test_df[numerical_features])\n\n # starndardize\n if self.scaler == \"MinMax\":\n scaler = MinMaxScaler()\n elif self.scaler == \"Standard\":\n scaler = StandardScaler()\n self.train_df[numerical_features] = scaler.fit_transform(self.train_df[numerical_features])\n self.test_df[numerical_features] = scaler.transform(self.test_df[numerical_features])\n\n x_test = self.test_df.copy()\n if self.model == \"nn\":\n x_test = [np.absolute(x_test[i]) for i in self.categoricals] + [x_test[numerical_features]]\n else:\n x_test = x_test[self.features]\n else:\n x_test = self.test_df[self.features]\n \n # fitting with out of fold\n cv = self.get_cv()\n for fold, (train_idx, val_idx) in enumerate(cv):\n # train test split\n x_train, x_val = self.train_df[self.features].iloc[train_idx], self.train_df[self.features].iloc[val_idx]\n y_train, y_val = self.train_df[self.target].iloc[train_idx], self.train_df[self.target].iloc[val_idx]\n\n if self.model == \"nn\":\n x_train = [np.absolute(x_train[i]) for i in self.categoricals] + [x_train[numerical_features]]\n x_val = [np.absolute(x_val[i]) for i in self.categoricals] + [x_val[numerical_features]]\n\n # model fitting\n train_set, val_set = self.convert_dataset(x_train, y_train, x_val, y_val)\n model, importance = self.train_model(train_set, val_set)\n fi[fold, :] = importance\n y_vals[val_idx] = y_val\n\n # predictions and check cv score\n oofs, ypred = get_oof_ypred(model, x_val, x_test, self.model, self.task)\n y_pred += ypred.reshape(y_pred.shape) / self.n_splits\n if self.task == \"multiclass\":\n oof_pred[val_idx, :] = oofs.reshape(oof_pred[val_idx, :].shape)\n print('Partial score of fold {} is: {}'.format(fold, self.calc_metric(y_vals[val_idx], \n np.argmax(oof_pred[val_idx, :], axis=1))))\n else:\n oof_pred[val_idx] = oofs.reshape(oof_pred[val_idx].shape)\n print('Partial score of fold {} is: {}'.format(fold, self.calc_metric(y_vals[val_idx], \n oof_pred[val_idx])))\n\n # feature importance data frame\n fi_df = pd.DataFrame()\n for n in np.arange(self.n_splits):\n tmp = pd.DataFrame()\n tmp[\"features\"] = self.features\n tmp[\"importance\"] = fi[n, :]\n tmp[\"fold\"] = n\n fi_df = pd.concat([fi_df, tmp], ignore_index=True)\n gfi = fi_df[[\"features\", \"importance\"]].groupby([\"features\"]).mean().reset_index()\n fi_df = fi_df.merge(gfi, on=\"features\", how=\"left\", suffixes=('', '_mean'))\n\n # outputs\n if self.task == \"multiclass\":\n loss_score = self.calc_metric(y_vals, np.argmax(oof_pred, axis=1))\n else:\n loss_score = self.calc_metric(y_vals, oof_pred)\n\n if self.verbose:\n print('Our oof loss score is: ', loss_score)\n return y_pred, loss_score, model, oof_pred, y_vals, fi_df\n\n def plot_feature_importance(self, rank_range=[1, 50]):\n \"\"\"\n function for plotting feature importance (nothing is returned when the model is NN)\n\n :EXAMPLE:\n # fit LGB regression model\n model = RunModel(train_df, test_df, target, features, categoricals=categoricals,\n model=\"lgb\", task=\"regression\", n_splits=4, cv_method=\"KFold\", \n group=None, seed=1220, scaler=None)\n \n # plot \n fi_df = model.plot_feature_importance(rank_range=[1, 100])\n \n \"\"\"\n # plot feature importance\n _, ax = plt.subplots(1, 1, figsize=(10, 20))\n sorted_df = self.fi_df.sort_values(by = \"importance_mean\", ascending=False).reset_index().iloc[self.n_splits * (rank_range[0]-1) : self.n_splits * rank_range[1]]\n sns.barplot(data=sorted_df, x =\"importance\", y =\"features\", orient='h')\n ax.set_xlabel(\"feature importance\")\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n return sorted_df" ]
[ [ "sklearn.metrics.roc_auc_score", "sklearn.preprocessing.QuantileTransformer", "pandas.DataFrame", "sklearn.model_selection.KFold", "sklearn.metrics.mean_squared_error", "sklearn.metrics.f1_score", "numpy.exp", "sklearn.preprocessing.MinMaxScaler", "matplotlib.style.use", "numpy.unique", "numpy.arange", "sklearn.model_selection.StratifiedKFold", "sklearn.model_selection.TimeSeriesSplit", "numpy.argmax", "numpy.zeros", "pandas.concat", "numpy.absolute", "sklearn.preprocessing.OneHotEncoder", "matplotlib.pyplot.subplots", "numpy.ones", "sklearn.preprocessing.StandardScaler" ] ]
charleyjoo/myproject
[ "e0b04156e0148cf8c86768c73f66fa6b5795c0d4" ]
[ "myproject/pronto_utils.py" ]
[ "import wget\nimport os\n\nimport zipfile\nimport pandas as pd\n\nimport matplotlib.pyplot as plt\nimport seaborn; seaborn.set()\n\ndef download_if_needed(URL, filename):\n \"\"\"\n Download from URL TO FILENAME UNLESS FILENAME ALREADY EXISTS\n \"\"\"\n if os.path.exists(filename):\n print(filename, \"already exists.\")\n return\n else:\n print(\"Downloading...\")\n wget.download(URL)\n\ndef get_pronto_data():\n \"\"\"\n Download pronto data, unless already downloaded\n \"\"\"\n download_if_needed('https://s3.amazonaws.com/pronto-data/open_data_year_one.zip','open_data_year_one.zip')\n\n\ndef get_trip_data():\n \"\"\"\n Fetch pronto data (if needed) and extract trip data from the zip file\n \"\"\"\n get_pronto_data()\n zf = zipfile.ZipFile('open_data_year_one.zip')\n file_handle = zf.open('2015_trip_data.csv')\n return pd.read_csv(file_handle)\n\n\ndef get_weather_data():\n \"\"\"\n Get weather data from the zip file\n \"\"\"\n get_pronto_data()\n zf = zipfile.ZipFile('open_data_year_one.zip')\n file_handle = zf.open('2015_weather_data.csv')\n return pd.read_csv(file_handle)\n\ndef get_trip_and_weather():\n trip = get_trip_data()\n weather = get_weather_data()\n date = pd.DatetimeIndex(trip['starttime'])\n trips_by_date = trip.pivot_table('trip_id',aggfunc='count',index=date.date,columns='usertype')\n weather = weather.set_index('Date')\n weather.index = pd.DatetimeIndex(weather.index)\n weather = weather.iloc[:-1]\n return weather.join(trips_by_date)\n\ndef plot_daily_totals():\n data = get_trip_and_weather()\n fig, ax = plt.subplots(2,figsize=(14,6),sharex = True)\n data['Annual Member'].plot(ax = ax[0], title='Annual Member')\n data['Short-Term Pass Holder'].plot(ax = ax[1], title='Short-Term Pass Holder')\n fig.savefig('trips_by_day.png')\n" ]
[ [ "pandas.read_csv", "matplotlib.pyplot.subplots", "pandas.DatetimeIndex" ] ]
rotskoff-group/dissipative-design
[ "8335a4e23ccc0eba1771d4b78f5102ee6113664c" ]
[ "lj_system/lj.py" ]
[ "from simtk.openmm.app import *\nfrom simtk.openmm import *\nfrom simtk.unit import *\nfrom sys import stdout\nfrom scipy.stats import gamma\nimport PIL\nimport freud\nimport fresnel\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom mdtraj.reporters import HDF5Reporter\nimport time\n\n\nclass LJ:\n def __init__(self, filename, region_num=15, target_dist=\"default_gamma\"):\n self.num_particles = 100\n self.dim_length = 30\n self.filename = filename\n self.dimensions = 2\n self.dt = 0.0002\n self.invdt = int(1 / self.dt)\n self.target_dist = target_dist\n self.bin, self.q = self._init_target_distribution(\n dist=self.target_dist)\n self.num_bins = len(self.bin)\n self.system = self._init_system()\n self.integrator = self._init_integrator()\n self.simulation = self._init_simulation()\n self.region_num = region_num # Along 1 dimension\n self.region_int = np.linspace(0, self.dim_length, self.region_num + 1)\n self.region_temp = np.ones((self.region_num, self.region_num)) * 1.2\n\n def _init_target_distribution(self, dist=\"default_gamma\"):\n \"\"\"Initializes the target distribution\n\n Args:\n dist: The name of the target distribution\n Returns:\n bin: The positions of the endpoints of each bin. Width of each bin\n is used to calculate probability\n q: The height of each bin\n Raises:\n ValueError: If inputted distribution is not found\n \"\"\"\n if (dist == \"default_gamma\"):\n bin = [1, 2, 3, 4, 5, 6, 7, 8, 9] # Regular\n target_data = np.random.gamma(16, 0.25, 10000000)\n q = plt.hist(target_data, bins=(bin + [100]), density=True)\n plt.close()\n else:\n raise ValueError(\"Dist supplied not defined\")\n return bin, q\n\n def plot_target_distribution(self, dist=\"default_gamma\"):\n \"\"\"\n Plots target distribution\n Args:\n dist: The name of the target distribution\n Raises:\n ValueError: If inputted distribution is not found\n \"\"\"\n if (dist == \"default_gamma\"):\n plt.plot(np.linspace(0, 10, 500), gamma.pdf(\n np.linspace(0, 10, 500), a=16, scale=0.25))\n else:\n raise ValueError(\"Dist supplied not defined\")\n\n def _init_position(self):\n \"\"\"Initializes positions on a lattice\n\n Returns:\n Array of particle positions.\n \"\"\"\n num_per_dim = round(((self.num_particles)**(1 / self.dimensions))\n + 0.5)\n lattice_spacing = self.dim_length / num_per_dim\n particle_position = self.num_particles * [0]\n for i in range(self.num_particles):\n x = i % num_per_dim\n y = i // num_per_dim\n x_pos = lattice_spacing * (x + 0.5 * (y % 2))\n y_pos = lattice_spacing * y\n particle_position[i] = Vec3(x_pos, y_pos, 0)\n\n return particle_position\n\n def _init_system(self):\n \"\"\"Initializes an OpenMM system\n\n Returns:\n Initialized OpenMM System\n \"\"\"\n\n a = Quantity((self.dim_length * nanometer,\n 0 * nanometer, 0 * nanometer))\n b = Quantity((0 * nanometer, self.dim_length *\n nanometer, 0 * nanometer))\n c = Quantity((0 * nanometer, 0 * nanometer,\n self.dim_length * nanometer))\n system = System()\n system.setDefaultPeriodicBoxVectors(a, b, c)\n\n sigma = 1 * nanometer\n epsilon = 0.5 * kilojoule_per_mole\n cutoff_type = NonbondedForce.CutoffPeriodic\n\n lj = CustomNonbondedForce(\"4*epsilon*(((sigma/r)^12-(sigma/r)^6))\")\n lj.addGlobalParameter(\"sigma\", sigma)\n lj.addGlobalParameter(\"epsilon\", epsilon)\n lj.setCutoffDistance(15 * sigma)\n lj.setNonbondedMethod(cutoff_type)\n\n for particle_index in range(self.num_particles):\n system.addParticle(2 * amu)\n lj.addParticle()\n\n system.addForce(lj)\n\n return system\n\n def _init_integrator(self):\n \"\"\"Initializes an OpenMM Integrator\n\n Returns:\n Initialized OpenMM Integrator\n \"\"\"\n\n lj_integrator = CustomIntegrator(self.dt)\n lj_integrator.addGlobalVariable(\"box_length\", self.dim_length)\n lj_integrator.addPerDofVariable(\"D_t\", 1.2)\n lj_integrator.addPerDofVariable(\"dissipation\", 0)\n lj_integrator.addPerDofVariable(\"x_dot\", 0)\n lj_integrator.addPerDofVariable(\"total_force\", 0)\n\n lj_integrator.addComputePerDof(\"x_dot\", \"x\")\n lj_integrator.addComputePerDof(\"total_force\", \"f\")\n lj_integrator.addComputePerDof(\"x\", \"x + dt*(f) + \\\n gaussian * sqrt(2 * D_t * dt)\")\n\n lj_integrator.addComputePerDof(\"x\", \"vector(_x(x), _y(x), 0)\")\n lj_integrator.addComputePerDof(\"x_dot\", \"x - x_dot\")\n lj_integrator.addComputePerDof(\"x_dot\", \"x_dot + step(x_dot - 0.5*box_length)*(-0.5*box_length)\")\n lj_integrator.addComputePerDof(\"x_dot\", \"x_dot + step(-(x_dot + 0.5*box_length))*(0.5*box_length)\")\n lj_integrator.addComputePerDof(\n \"dissipation\", \"dissipation + (dot(x_dot, total_force)/D_t)\")\n\n lj_integrator.addUpdateContextState()\n return lj_integrator\n\n def _init_simulation(self):\n \"\"\"Initializes an OpenMM Simulation\n\n Returns:\n Initialized OpenMM Simulation\n \"\"\"\n topology = Topology()\n element = Element.getBySymbol('H')\n chain = topology.addChain()\n for particle in range(self.num_particles):\n residue = topology.addResidue('lj', chain)\n topology.addAtom('lj', element, residue)\n topology.setUnitCellDimensions(\n Quantity(3 * [self.dim_length], nanometer))\n simulation = Simulation(topology, self.system, self.integrator)\n # simulation.context.getPlatform().\\\n # setPropertyDefaultValue(\"CudaDeviceIndex\", \"0\")\n simulation.context.setPositions(self._init_position())\n simulation.reporters.append(\n HDF5Reporter(self.filename, self.invdt // 100))\n return simulation\n\n def _get_region_temperature(self, particle_pos):\n \"\"\"For a given particle position returns temperature of the region that\n particle is in\n\n Returns:\n Temperature of region particle is in\n \"\"\"\n x_in = np.sum([self.region_int < particle_pos[0]]) - 1\n y_in = np.sum([self.region_int > particle_pos[1]]) - 1\n return self.region_temp[y_in, x_in]\n\n def _update_regions(self):\n \"\"\"Updates temperature of all particles based on the region it is in\n \"\"\"\n positions = self.simulation.context.getState(\n getPositions=True, enforcePeriodicBox=True).getPositions()\n all_particle_temps = [self._get_region_temperature(x._value)\n for x in positions]\n temp_vec = [Vec3(particle_i_temp, particle_i_temp, 0)\n for particle_i_temp in all_particle_temps]\n\n self.simulation.integrator.setPerDofVariableByName(\"D_t\",\n temp_vec)\n\n def _color_cluster(self, positions, cl, tag):\n \"\"\"Renders and saves an image of all clusters of size greater than 2\n Args:\n positions: positions of the particles as a 2D List\n cl: A freud.cluster.Cluster() object of computed clusters\n tag: A string describing the end of the filename of the rendered image\n \"\"\"\n\n colors = np.empty((self.num_particles, 3))\n colors[:, :] = fresnel.color.linear([0, 0, 1])\n max = np.max(cl.cluster_idx)\n for i in range(max, 0, -1):\n if (np.sum(cl.cluster_idx == i) > 2):\n break\n colors[cl.cluster_idx == i, :] = fresnel.color.linear([1, 1, 1])\n scene = fresnel.Scene()\n\n # Spheres for every particle in the system\n geometry = fresnel.geometry.Sphere(scene, N=self.num_particles,\n radius=0.5)\n positions = [[pos - (self.dim_length / 2) for pos in row]\n for row in positions] # Change left cordinate from 0 to -self.dim_length/2\n geometry.position[:] = positions\n geometry.material = fresnel.material.Material(roughness=0.9)\n geometry.outline_width = 0.05\n # use color instead of material.color\n geometry.material.primitive_color_mix = 1.0\n geometry.color[:] = fresnel.color.linear(colors)\n box = freud.box.Box.square(L=self.dim_length)\n fresnel.geometry.Box(scene, box, box_radius=.1)\n\n scene.lights = fresnel.light.ring()\n out = fresnel.pathtrace(scene, light_samples=1)\n image = PIL.Image.fromarray(out[:], mode='RGBA')\n filename_clusters = self.filename[:-3] + tag + \"_color.png\"\n image.save(filename_clusters)\n\n def update_temperature(self, new_temp, tag=\"\"):\n \"\"\"Updates self.D_t to be new_temp and saves heatmap of region temperatures\n Args:\n new_temp: 1D (flattened) array of temperatures of regions\n tag: A string describing the end of the filename of the temperature heatmap\n \"\"\"\n if (not len(new_temp) == (self.region_num ** 2)):\n raise ValueError(\"Incorrect Action Length\")\n self.region_temp = np.array(new_temp).reshape(\n (self.region_num, self.region_num))\n if np.any((self.region_temp <= 0) | (self.region_temp > 2.0)):\n raise ValueError(\"Unallowed Temperatures Inputted\")\n plt.imshow(self.region_temp, cmap=\"Greys\", vmin=0., vmax=2.)\n plt.colorbar()\n filename = self.filename[:-3] + tag + \"_temperature.png\"\n plt.savefig(filename)\n plt.close()\n\n def _run_sim(self, time):\n \"\"\"Runs a simulation for time seconds\n Args:\n time: number of seconds to run simulation\n \"\"\"\n total_sim_time = int(time * self.invdt)\n self.simulation.step(total_sim_time)\n\n def _get_KL(self, p):\n \"\"\"Calculates KL Div from target_distribution to p\n Args:\n p: A normalized distribution of cluster sizes\n Returns:\n KL divergence from target_distribution to p or None if p is None\n Raises:\n ValueError: If q does not have full support over sample space\n \"\"\"\n\n if p is None:\n return None\n sum = 0\n ss_len = len(self.q[0])\n for i in range(ss_len):\n p_i = p[0][i] * (p[1][i + 1] - p[1][i])\n q_i = self.q[0][i] * (self.q[1][i + 1] - self.q[1][i])\n try:\n if (p_i == 0):\n continue\n sum += p_i * np.log(p_i / q_i)\n except:\n raise ValueError(\"Define q with support over sample space\")\n return sum\n\n\n def _duplicate_element_by_val(self, count):\n \"\"\"Duplicates elements by current value. Use to get number of particles per cluster\n E.g. Given an input of [1, 2, 3] it will return [1, 2, 2, 3, 3, 3]\n Args:\n count: A List of all cluster sizes\n Returns:\n A List of the cluster size that each particle belongs to\n or None if the input list is empty (i.e. no clusters present)\n \"\"\"\n dup_count = []\n for val in count:\n dup_count += [val] * val\n if (len(dup_count) == 0):\n \"\"\"\n Return None for regions without any particles\n \"\"\"\n return None\n return dup_count\n\n def _get_cluster_distribution(self, tag):\n \"\"\"Gets the distribution of clusters for each region\n Args:\n tag: A string describing the end of the filename\n Returns:\n p: 2D list of normalized distribution of cluster sizes in the entire system\n cs_region: A 3D List of all cluster sizes in each region\n \"\"\"\n\n cl = freud.cluster.Cluster()\n box = freud.box.Box.square(L=self.dim_length)\n positions = self.simulation.context.getState(\n getPositions=True, enforcePeriodicBox=True).getPositions()\n positions = [list(x) for x in positions._value] # Convert to 2D list\n cl.compute((box, positions), neighbors={'r_max': 1.25}) # In nm\n index, counts = np.unique(cl.cluster_idx, return_counts=True)\n cs_region = [[[] for i in range(self.region_num)]\n for j in range(self.region_num)]\n for p_i in range(self.num_particles):\n particle_pos = positions[p_i]\n x_in = np.sum([self.region_int < particle_pos[0]]) - 1\n y_in = np.sum([self.region_int > particle_pos[1]]) - 1\n current_cluster_index = cl.cluster_idx[p_i]\n # Get all the unique cluster indices in each region\n if current_cluster_index not in cs_region[y_in][x_in]:\n cs_region[y_in][x_in].append(current_cluster_index)\n\n # Get all the cluster sizes in each region\n cs_region = [[counts[cs_region[i][j]]\n for j in range(self.region_num)]\n for i in range(self.region_num)]\n\n # Get all the particles in a cluster sizes in each region\n cs_region = [[self._duplicate_element_by_val(cs_region[i][j])\n for j in range(self.region_num)]\n for i in range(self.region_num)]\n\n p = [[None if cs_region[i][j] is None else plt.hist(cs_region[i][j],\n bins=self.bin +\n [max(\n max(cs_region[i][j]), self.bin[-1] + 1)],\n density=True)\n for j in range(self.region_num)]\n for i in range(self.region_num)]\n plt.close()\n return p, cs_region\n\n def _get_cluster_distribution_all(self, tag):\n \"\"\"Gets the cluster distribution of the entire system (not individual grids)\n Args:\n tag: A string describing the end of the filename\n Returns:\n p: normalized distribution of cluster sizes in the entire system\n counts: A List of all cluster sizes in the entire system\n \"\"\"\n cl = freud.cluster.Cluster()\n box = freud.box.Box.square(L=self.dim_length)\n positions = self.simulation.context.getState(\n getPositions=True, enforcePeriodicBox=True).getPositions()\n positions = [list(x) for x in positions._value] # Convert to 2D list\n cl.compute((box, positions), neighbors={'r_max': 1.25}) # In nm\n index, counts = np.unique(cl.cluster_idx, return_counts=True)\n counts = self._duplicate_element_by_val(counts)\n p = plt.hist(counts, bins=self.bin +\n [max(np.max(counts), self.bin[-1] + 1)], density=True)\n self.plot_target_distribution(dist=self.target_dist)\n filename = self.filename[:-3] + tag + \".png\"\n plt.savefig(filename)\n plt.close()\n self._color_cluster(positions, cl, tag)\n return p, counts\n\n def get_state_reward(self, tag):\n \"\"\"Returns the current state, reward, and list of cluster sizes of each region\n Args:\n tag: A string describing the end of the filename\n Returns:\n dist: 2D list of normalized distribution of cluster sizes in the entire system\n reward: A 2D list of the KL divergence in each region\n cs_region: A 3D List of all cluster sizes in each region\n \"\"\"\n p, cs_region = self._get_cluster_distribution(tag)\n reward = []\n dist = []\n for i in range(self.region_num):\n for j in range(self.region_num):\n reward.append(self._get_KL(p[i][j]))\n if (p[i][j] is None):\n dist.append(None)\n else:\n curr_dist = p[i][j][0] * np.diff(p[i][j][1])\n dist.append(curr_dist.tolist())\n return [dist, reward, cs_region]\n\n def get_state_reward_all(self, tag):\n \"\"\"Returns the current state, reward, and list of the entire system\n Args:\n tag: A string describing the end of the filename\n Returns:\n dist: list of normalized distribution of cluster sizes in the entire system\n reward: KL divergence of entire system\n cs_region: A List of all cluster sizes in entire system\n \"\"\"\n p, counts = self._get_cluster_distribution_all(tag)\n reward = self._get_KL(p)\n dist = p[0] * np.diff(p[1])\n state = dist.tolist()\n return [state, reward, counts]\n\n def run_decorrelation(self, time, tag):\n \"\"\"Runs a decorrelation step of high temperature to \"decorrelate\" from some current state\n Args:\n time: time in seconds to run decorrelation\n tag: A string describing the end of the filename\n \"\"\"\n new_temp = [1.2] * self.region_num**2\n self.update_temperature(new_temp, tag)\n self._update_regions()\n self._run_sim(time)\n\n def run_step(self, is_detailed=False, tag=\"\"):\n \"\"\"Runs simulation for one time \"step\" (i.e. decision) of RL algorithm\n Updates particle activity every 0.25 seconds based on what region particle\n is in. Runs for a total of 0.25 seconds (i.e. 1 decision)\n Args:\n is_detailed: Include information about states/rewards of entire system\n tag: A string describing the end of the filename\n Returns:\n The states, rewards and cluster sizes of the system if is _detailed\n None, None, None if not (is_detailed)\n \"\"\"\n all_system_rewards = []\n all_system_states = []\n all_system_states_cluster = []\n for i in range(1):\n # Updating once every second\n self._update_regions()\n self._run_sim(0.25)\n if (is_detailed):\n curr_tag = tag + \"_\" + str(i)\n system_state, system_reward, system_cluster_counts = self.get_state_reward_all(\n tag)\n all_system_states.append(system_state)\n all_system_rewards.append(system_reward)\n all_system_states_cluster.append(system_cluster_counts)\n if (is_detailed):\n return all_system_states, all_system_rewards, all_system_states_cluster\n else:\n return None, None, None\n\n def reset_context(self, filename):\n \"\"\"Resets position to lattice and closes h5 file\n Args:\n filename: file to save new trajectory in\n \"\"\"\n\n self.filename = filename\n self.simulation.reporters[0].close()\n self.simulation.reporters[0] = HDF5Reporter(\n self.filename, self.invdt // 100)\n self.simulation.context.setPositions(self._init_position())\n\n def get_dissipation(self):\n \"\"\"Gets dissipation of simulation\n Returns:\n Mean total dissipation across all particles\n \"\"\"\n dissipation = self.simulation.integrator.getPerDofVariableByName(\n \"dissipation\")\n dissipation = np.array([d_n[0] for d_n in dissipation])\n return np.mean(dissipation)\n\n\nif __name__ == \"__main__\":\n lj = LJ(\"test.h5\")\n lj.run_step()\n" ]
[ [ "matplotlib.pyplot.imshow", "numpy.log", "numpy.sum", "numpy.linspace", "numpy.unique", "matplotlib.pyplot.savefig", "numpy.ones", "numpy.max", "matplotlib.pyplot.colorbar", "numpy.mean", "numpy.any", "matplotlib.pyplot.close", "numpy.random.gamma", "numpy.diff", "numpy.array", "matplotlib.pyplot.hist", "numpy.empty" ] ]
doansangg/CGAN-PyTorch
[ "941f5bd75102bed7f2eccd7feb9af8e6134af0e4" ]
[ "dataloader.py" ]
[ "import os\nfrom PIL import Image\nimport torch.utils.data as data\nimport torchvision.transforms as transforms\nimport numpy as np\nimport random\nimport torch\nfrom torch import Tensor, int32\n\nclass Dataset(data.Dataset):\n \"\"\"\n dataloader for polyp segmentation tasks\n \"\"\"\n def __init__(self, path_data, trainsize, augmentations):\n self.trainsize = trainsize\n self.augmentations = augmentations\n print(self.augmentations)\n # fix file train\n self.data_raw = open(path_data,\"r\")\n self.data_raw=self.data_raw.readlines()\n #print(self.data_raw)\n self.images=[p.split('\\t')[0] for p in self.data_raw]\n self.labels=[int(p.split('\\t')[1].split('\\n')[0]) for p in self.data_raw]\n #print(self.labels)\n #self.labels=[1 for p in self.data_raw]\n self.labels=torch.LongTensor(self.labels)\n #print(self.labels)\n #self.filter_files()\n self.size = len(self.images)\n self.img_transform = transforms.Compose([\n transforms.Resize((self.trainsize, self.trainsize)),\n transforms.ToTensor()])\n \n\n def __getitem__(self, index):\n \n image = self.rgb_loader(self.images[index])\n #gt = self.binary_loader(self.gts[index])\n label=self.labels[index]\n seed = np.random.randint(2147483647) # make a seed with numpy generator \n random.seed(seed) # apply this seed to img tranfsorms\n torch.manual_seed(seed) # needed for torchvision 0.7\n if self.img_transform is not None:\n image = self.img_transform(image)\n \n random.seed(seed) # apply this seed to img tranfsorms\n torch.manual_seed(seed) # needed for torchvision 0.7\n # if self.gt_transform is not None:\n # gt = self.gt_transform(gt)\n return (image, label)\n\n def filter_files(self):\n assert len(self.images) == len(self.labels)\n images = []\n labels = []\n for img_path, label in zip(self.images, self.labels):\n images.append(img_path)\n labels.append(label)\n self.images = images\n self.labels = labels\n\n def rgb_loader(self, path):\n with open(path, 'rb') as f:\n img = Image.open(f)\n return img.convert('RGB')\n\n\n def resize(self, img,label):\n return (img.resize((self.trainsize, self.trainsize), Image.BILINEAR),label)\n \n def __len__(self):\n return self.size\n\n\ndef get_loader(path_root, batchsize, trainsize, shuffle=True, num_workers=4, pin_memory=True, augmentation=False):\n\n dataset = Dataset(path_root, trainsize, augmentation)\n data_loader = data.DataLoader(dataset=dataset,\n batch_size=batchsize,\n shuffle=shuffle,\n num_workers=num_workers,\n pin_memory=pin_memory)\n return data_loader\n\n\n" ]
[ [ "torch.manual_seed", "torch.LongTensor", "torch.utils.data.DataLoader", "numpy.random.randint" ] ]
joel-simon/Antimander
[ "ec2058a76f172e0941d5e4558776831050a00c2a" ]
[ "optimize/test/draw_random.py" ]
[ "import os, sys, random, time\nimport pygame\nfrom pygame import gfxdraw\nimport numpy as np\nsys.path.append(os.path.abspath('.'))\nfrom src.state import State\nfrom src import districts, mutation, metrics\nfrom src.connectivity import can_lose\nfrom src.constraints import fix_pop_equality\nfrom src.draw import draw_districts\n\n\n# state = State.fromFile('data/t500-c3.json')\nstate = State.makeRandom(400, seed=1)\nfor _ in range(1):\n state, _ = state.contract(seed=0)\n\n# met = metrics.compactness_convex_hull\nmet = metrics.polsby_popper\nmutate = False\nn_districts = 5\n\ndistricts = districts.make_random(state, n_districts)\n# districts = np.random.randint(0, n_districts, (state.n_tiles,), dtype='i')\ntolerance = 0.5\ndraw_kwargs = {\n \"draw_bounding_hulls\": False,\n \"draw_bounding_circles\": False,\n \"draw_district_edges\": True,\n \"draw_vertices\": False,\n \"draw_neigbors_lines\": False\n}\n\n# print(fix_pop_equality(state, districts, n_districts, tolerance=tolerance, max_iters=1000))\npygame.init()\nw, h = (1200, 1200)\nscreen = pygame.display.set_mode((w, h))\nscreen.fill((255, 255, 255))\ncolors = np.random.randint(0, 255, (n_districts, 3))\n\ndraw_districts(state, districts, n_districts, screen, colors, **draw_kwargs)\npygame.display.update()\nstep = 0\n\nwhile True:\n if mutate:\n d2 = districts.copy()\n\n n_pop = np.sum(state.tile_populations)\n ideal_pop = n_pop / n_districts\n pop_max = ideal_pop * (1+tolerance)\n pop_min = ideal_pop * (1-tolerance)\n\n mutation.mutate(d2, n_districts, state, 0.00, pop_min, pop_max)\n\n new_fitness = met(state, d2, n_districts)\n\n if new_fitness < met(state, districts, n_districts):\n print('\\nnew_fitness', step, new_fitness)\n districts = d2\n draw_districts(state, districts, n_districts, screen, colors, **draw_kwargs)\n pygame.display.update()\n else:\n pass\n # print('.', end = '')\n\n step += 1\n\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n pygame.quit()\n quit()\n # elif event.key == pygame.K_LEFT:\n # p.mutate()\n # draw_districts(state, districts, n_districts, screen, display)\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n" ]
[ [ "numpy.sum", "numpy.random.randint" ] ]
chohy/cho_gem5
[ "1207718477576053ee6222faff03dd888a90dbcf" ]
[ "ext/pybind11/tests/test_numpy_array.py" ]
[ "import pytest\nimport gc\n\nwith pytest.suppress(ImportError):\n import numpy as np\n\n\[email protected](scope='function')\ndef arr():\n return np.array([[1, 2, 3], [4, 5, 6]], '<u2')\n\n\[email protected]_numpy\ndef test_array_attributes():\n from pybind11_tests.array import (\n ndim, shape, strides, writeable, size, itemsize, nbytes, owndata\n )\n\n a = np.array(0, 'f8')\n assert ndim(a) == 0\n assert all(shape(a) == [])\n assert all(strides(a) == [])\n with pytest.raises(IndexError) as excinfo:\n shape(a, 0)\n assert str(excinfo.value) == 'invalid axis: 0 (ndim = 0)'\n with pytest.raises(IndexError) as excinfo:\n strides(a, 0)\n assert str(excinfo.value) == 'invalid axis: 0 (ndim = 0)'\n assert writeable(a)\n assert size(a) == 1\n assert itemsize(a) == 8\n assert nbytes(a) == 8\n assert owndata(a)\n\n a = np.array([[1, 2, 3], [4, 5, 6]], 'u2').view()\n a.flags.writeable = False\n assert ndim(a) == 2\n assert all(shape(a) == [2, 3])\n assert shape(a, 0) == 2\n assert shape(a, 1) == 3\n assert all(strides(a) == [6, 2])\n assert strides(a, 0) == 6\n assert strides(a, 1) == 2\n with pytest.raises(IndexError) as excinfo:\n shape(a, 2)\n assert str(excinfo.value) == 'invalid axis: 2 (ndim = 2)'\n with pytest.raises(IndexError) as excinfo:\n strides(a, 2)\n assert str(excinfo.value) == 'invalid axis: 2 (ndim = 2)'\n assert not writeable(a)\n assert size(a) == 6\n assert itemsize(a) == 2\n assert nbytes(a) == 12\n assert not owndata(a)\n\n\[email protected]_numpy\[email protected]('args, ret', [([], 0), ([0], 0), ([1], 3), ([0, 1], 1), ([1, 2], 5)])\ndef test_index_offset(arr, args, ret):\n from pybind11_tests.array import index_at, index_at_t, offset_at, offset_at_t\n assert index_at(arr, *args) == ret\n assert index_at_t(arr, *args) == ret\n assert offset_at(arr, *args) == ret * arr.dtype.itemsize\n assert offset_at_t(arr, *args) == ret * arr.dtype.itemsize\n\n\[email protected]_numpy\ndef test_dim_check_fail(arr):\n from pybind11_tests.array import (index_at, index_at_t, offset_at, offset_at_t, data, data_t,\n mutate_data, mutate_data_t)\n for func in (index_at, index_at_t, offset_at, offset_at_t, data, data_t,\n mutate_data, mutate_data_t):\n with pytest.raises(IndexError) as excinfo:\n func(arr, 1, 2, 3)\n assert str(excinfo.value) == 'too many indices for an array: 3 (ndim = 2)'\n\n\[email protected]_numpy\[email protected]('args, ret',\n [([], [1, 2, 3, 4, 5, 6]),\n ([1], [4, 5, 6]),\n ([0, 1], [2, 3, 4, 5, 6]),\n ([1, 2], [6])])\ndef test_data(arr, args, ret):\n from pybind11_tests.array import data, data_t\n assert all(data_t(arr, *args) == ret)\n assert all(data(arr, *args)[::2] == ret)\n assert all(data(arr, *args)[1::2] == 0)\n\n\[email protected]_numpy\ndef test_mutate_readonly(arr):\n from pybind11_tests.array import mutate_data, mutate_data_t, mutate_at_t\n arr.flags.writeable = False\n for func, args in (mutate_data, ()), (mutate_data_t, ()), (mutate_at_t, (0, 0)):\n with pytest.raises(RuntimeError) as excinfo:\n func(arr, *args)\n assert str(excinfo.value) == 'array is not writeable'\n\n\[email protected]_numpy\[email protected]('dim', [0, 1, 3])\ndef test_at_fail(arr, dim):\n from pybind11_tests.array import at_t, mutate_at_t\n for func in at_t, mutate_at_t:\n with pytest.raises(IndexError) as excinfo:\n func(arr, *([0] * dim))\n assert str(excinfo.value) == 'index dimension mismatch: {} (ndim = 2)'.format(dim)\n\n\[email protected]_numpy\ndef test_at(arr):\n from pybind11_tests.array import at_t, mutate_at_t\n\n assert at_t(arr, 0, 2) == 3\n assert at_t(arr, 1, 0) == 4\n\n assert all(mutate_at_t(arr, 0, 2).ravel() == [1, 2, 4, 4, 5, 6])\n assert all(mutate_at_t(arr, 1, 0).ravel() == [1, 2, 4, 5, 5, 6])\n\n\[email protected]_numpy\ndef test_mutate_data(arr):\n from pybind11_tests.array import mutate_data, mutate_data_t\n\n assert all(mutate_data(arr).ravel() == [2, 4, 6, 8, 10, 12])\n assert all(mutate_data(arr).ravel() == [4, 8, 12, 16, 20, 24])\n assert all(mutate_data(arr, 1).ravel() == [4, 8, 12, 32, 40, 48])\n assert all(mutate_data(arr, 0, 1).ravel() == [4, 16, 24, 64, 80, 96])\n assert all(mutate_data(arr, 1, 2).ravel() == [4, 16, 24, 64, 80, 192])\n\n assert all(mutate_data_t(arr).ravel() == [5, 17, 25, 65, 81, 193])\n assert all(mutate_data_t(arr).ravel() == [6, 18, 26, 66, 82, 194])\n assert all(mutate_data_t(arr, 1).ravel() == [6, 18, 26, 67, 83, 195])\n assert all(mutate_data_t(arr, 0, 1).ravel() == [6, 19, 27, 68, 84, 196])\n assert all(mutate_data_t(arr, 1, 2).ravel() == [6, 19, 27, 68, 84, 197])\n\n\[email protected]_numpy\ndef test_bounds_check(arr):\n from pybind11_tests.array import (index_at, index_at_t, data, data_t,\n mutate_data, mutate_data_t, at_t, mutate_at_t)\n funcs = (index_at, index_at_t, data, data_t,\n mutate_data, mutate_data_t, at_t, mutate_at_t)\n for func in funcs:\n with pytest.raises(IndexError) as excinfo:\n func(arr, 2, 0)\n assert str(excinfo.value) == 'index 2 is out of bounds for axis 0 with size 2'\n with pytest.raises(IndexError) as excinfo:\n func(arr, 0, 4)\n assert str(excinfo.value) == 'index 4 is out of bounds for axis 1 with size 3'\n\n\[email protected]_numpy\ndef test_make_c_f_array():\n from pybind11_tests.array import (\n make_c_array, make_f_array\n )\n assert make_c_array().flags.c_contiguous\n assert not make_c_array().flags.f_contiguous\n assert make_f_array().flags.f_contiguous\n assert not make_f_array().flags.c_contiguous\n\n\[email protected]_numpy\ndef test_wrap():\n from pybind11_tests.array import wrap\n\n def assert_references(a, b):\n assert a is not b\n assert a.__array_interface__['data'][0] == b.__array_interface__['data'][0]\n assert a.shape == b.shape\n assert a.strides == b.strides\n assert a.flags.c_contiguous == b.flags.c_contiguous\n assert a.flags.f_contiguous == b.flags.f_contiguous\n assert a.flags.writeable == b.flags.writeable\n assert a.flags.aligned == b.flags.aligned\n assert a.flags.updateifcopy == b.flags.updateifcopy\n assert np.all(a == b)\n assert not b.flags.owndata\n assert b.base is a\n if a.flags.writeable and a.ndim == 2:\n a[0, 0] = 1234\n assert b[0, 0] == 1234\n\n a1 = np.array([1, 2], dtype=np.int16)\n assert a1.flags.owndata and a1.base is None\n a2 = wrap(a1)\n assert_references(a1, a2)\n\n a1 = np.array([[1, 2], [3, 4]], dtype=np.float32, order='F')\n assert a1.flags.owndata and a1.base is None\n a2 = wrap(a1)\n assert_references(a1, a2)\n\n a1 = np.array([[1, 2], [3, 4]], dtype=np.float32, order='C')\n a1.flags.writeable = False\n a2 = wrap(a1)\n assert_references(a1, a2)\n\n a1 = np.random.random((4, 4, 4))\n a2 = wrap(a1)\n assert_references(a1, a2)\n\n a1 = a1.transpose()\n a2 = wrap(a1)\n assert_references(a1, a2)\n\n a1 = a1.diagonal()\n a2 = wrap(a1)\n assert_references(a1, a2)\n\n\[email protected]_numpy\ndef test_numpy_view(capture):\n from pybind11_tests.array import ArrayClass\n with capture:\n ac = ArrayClass()\n ac_view_1 = ac.numpy_view()\n ac_view_2 = ac.numpy_view()\n assert np.all(ac_view_1 == np.array([1, 2], dtype=np.int32))\n del ac\n gc.collect()\n assert capture == \"\"\"\n ArrayClass()\n ArrayClass::numpy_view()\n ArrayClass::numpy_view()\n \"\"\"\n ac_view_1[0] = 4\n ac_view_1[1] = 3\n assert ac_view_2[0] == 4\n assert ac_view_2[1] == 3\n with capture:\n del ac_view_1\n del ac_view_2\n gc.collect()\n assert capture == \"\"\"\n ~ArrayClass()\n \"\"\"\n\n\[email protected]_numpy\ndef test_cast_numpy_int64_to_uint64():\n from pybind11_tests.array import function_taking_uint64\n function_taking_uint64(123)\n function_taking_uint64(np.uint64(123))\n\n\[email protected]_numpy\ndef test_isinstance():\n from pybind11_tests.array import isinstance_untyped, isinstance_typed\n\n assert isinstance_untyped(np.array([1, 2, 3]), \"not an array\")\n assert isinstance_typed(np.array([1.0, 2.0, 3.0]))\n\n\[email protected]_numpy\ndef test_constructors():\n from pybind11_tests.array import default_constructors, converting_constructors\n\n defaults = default_constructors()\n for a in defaults.values():\n assert a.size == 0\n assert defaults[\"array\"].dtype == np.array([]).dtype\n assert defaults[\"array_t<int32>\"].dtype == np.int32\n assert defaults[\"array_t<double>\"].dtype == np.float64\n\n results = converting_constructors([1, 2, 3])\n for a in results.values():\n np.testing.assert_array_equal(a, [1, 2, 3])\n assert results[\"array\"].dtype == np.int_\n assert results[\"array_t<int32>\"].dtype == np.int32\n assert results[\"array_t<double>\"].dtype == np.float64\n" ]
[ [ "numpy.random.random", "numpy.all", "numpy.testing.assert_array_equal", "numpy.uint64", "numpy.array" ] ]
LukeHartmanTmlt/private-pgm
[ "8d57504ae076ae1cb54f7a1c1f2a0815a696ae77" ]
[ "mechanisms/adaptive_grid.py" ]
[ "import numpy as np\nimport pandas as pd\nimport json\nfrom mbi import FactoredInference, Factor, Dataset, Domain\nfrom scipy import sparse\nfrom scipy.special import logsumexp\nimport itertools\nimport networkx as nx\nfrom disjoint_set import DisjointSet\nfrom cdp2adp import cdp_rho\nimport argparse\n\n\ndef powerset(iterable):\n \"\"\"Returns powerset of set consisting of elements in ``iterable``.\n Args:\n iterable (iterable): Set that powerset will be taken over\n\n Returns:\n iterator: the powerset\n\n Example:\n >>> pset = powerset(['a','b'])\n >>> list(pset)\n [('a',), ('b',), ('a', 'b')]\n \"\"\"\n s = list(iterable)\n return itertools.chain.from_iterable(\n itertools.combinations(s, r) for r in range(1, len(s) + 1)\n )\n\n\ndef downward_closure(cliques):\n \"\"\"Returns the 'downward closure' of a list of cliques. The 'downward closure'\n is the union of powersets of each individual clique in a list. Elements within\n each clique are sorted, but the list of cliques is not.\n\n Args:\n cliques ([iterable]): List of cliques\n\n Returns:\n list: the downward closure of the set of variables in cliques\n\n Example:\n >>> downward_closure([[1,2],[2,3]])\n [(2,), (1,), (3,), (1, 2), (2, 3)]\n \"\"\"\n ans = set()\n for proj in cliques:\n ans.update(powerset(proj))\n return list(sorted(ans, key=len))\n\n\ndef get_permutation_matrix(cl1, cl2, domain):\n # permutation matrix that maps datavector of cl1 factor to datavector of cl2 factor\n\n \"\"\"Using the vector-of-counts representation of a database detailed in\n [Li 2012], we create a permutation matrix which maps the database with\n attributes in order cl1 to database with attributes in order cl2. Note that\n cl1 and cl2 contain the same elements, just in different order.\n\n Example of Concept:\n Let us define two example databases:\n\n Database A\n id a b c\n 1 0 1 0\n 2 0 0 0\n\n Database B\n id b c a\n 1 1 0 0\n 2 0 0 0\n\n We know that A = B since only the ordering of the attributes is changed.\n\n Let #(.) operation return the number of elements in a database\n which satisfy the condition within the parenthesis and let vec(.)\n be the vector-of-counts operation. Thus:\n\n vec(A) = [#(a=0,b=0,c=0),#(a=0,b=0,c=1),#(a=0,b=1,c=0),...,#(a=1,b=1,c=1)]\n = [1,0,1,0,0,0,0,0]\n vec(B) = [#(b=0,a=0,c=0),#(b=0,a=0,c=1),#(b=0,a=1,c=0),...,#(b=1,a=1,c=1)]\n = [1,0,0,0,1,0,0,0]\n\n Observe that vec(A) and vec(B) have the same values, but are just\n rearranged. Then, for any two equivalent databases A and B, the\n permutation matrix P is an 8x8 matrix such that:\n\n P @ vec(A) = vec(B).T\n\n For two identical database A and B.\n\n Args:\n cl1 (iterable): Input clique that permutation matrix maps from.\n cl2 (iterable): Target clique that permutation matrix maps to.\n domain (mbi.Domain): A mbi Domain object which holds the shape and names\n of each variable in the domain.\n\n Returns:\n scipy.sparse.csr_matrix: Sparse permutation matrix.\n\n Example:\n >>> domain = Domain(attrs=[1,2],shape=[2,2])\n >>> get_permutation_matrix([1,2],[2,1], domain).todense()\n matrix([[1., 0., 0., 0.],\n [0., 0., 1., 0.],\n [0., 1., 0., 0.],\n [0., 0., 0., 1.]])\n \"\"\"\n assert set(cl1) == set(cl2)\n n = domain.size(cl1)\n fac = Factor(domain.project(cl1), np.arange(n))\n new = fac.transpose(cl2)\n data = np.ones(n)\n row_ind = fac.datavector()\n col_ind = new.datavector()\n return sparse.csr_matrix((data, (row_ind, col_ind)), shape=(n, n))\n\n\ndef get_aggregate(cl, matrices, domain):\n \"\"\"Returns additional measurement matrices by taking the Kronecker\n product between Identity and previous measurements.\n\n Args:\n cl (iterable): A clique marginal.\n matrices (dict): A dictionary of measurement matrices where the key is\n the clique and the value is the matrix.\n domain (mbi.Domain): A mbi Domain object which holds the shape and names\n of each variable in the domain.\n\n Returns:\n scipy.sparse.csr_matrix: Sparse matrix containing additional\n measurements.\n \"\"\"\n children = [r for r in matrices if set(r) < set(cl) and len(r) + 1 == len(cl)]\n ans = [sparse.csr_matrix((0, domain.size(cl)))]\n for c in children:\n coef = 1.0 / np.sqrt(len(children))\n a = tuple(set(cl) - set(c))\n cl2 = a + c\n Qc = matrices[c]\n P = get_permutation_matrix(cl, cl2, domain)\n T = np.ones(domain.size(a))\n Q = sparse.kron(T, Qc) @ P\n ans.append(coef * Q)\n return sparse.vstack(ans)\n\n\ndef get_identity(cl, post_plausibility, domain):\n \"\"\"Determine which cells in the cl marginal *could* have a count above\n threshold based on previous measurements.\n\n Args:\n cl (iterable): A clique marginal\n post_plausibility (dict): Dictionary of previously taken measurements.\n The key is the clique and value is a Factor object.\n domain (mbi.Domain): A mbi Domain object which holds the shape and names\n of each variable in the domain\n\n Returns:\n scipy.sparse.csr_matrix: Sparse matrix object where cells identified as\n probably containing counts above threshold have value 1.\n \"\"\"\n children = [\n r for r in post_plausibility if set(r) < set(cl) and len(r) + 1 == len(cl)\n ]\n plausibility = Factor.ones(domain.project(cl))\n for c in children:\n plausibility *= post_plausibility[c]\n\n row_ind = col_ind = np.nonzero(plausibility.datavector())[0]\n data = np.ones_like(row_ind)\n n = domain.size(cl)\n Q = sparse.csr_matrix((data, (row_ind, col_ind)), (n, n))\n return Q\n\n\ndef exponential_mechanism(q, eps, sensitivity, prng=np.random, monotonic=False):\n \"\"\"Performs the exponential mechanism. Returned results satisfy eps-DP.\n\n Args:\n q (ndarray): Weights for each item.\n eps (float): Privacy parameter.\n sensitivity (float): Sensitivity of the query.\n prng (np.random): Pseudo-random number generator to be used.\n monotonic (boolean): True if the addition of an new element to the\n selection set cannot cause the value of the query to increase, False\n otherwise.\n\n Returns:\n A list containing indices of chosen items.\n \"\"\"\n if eps == np.inf:\n eps = np.finfo(np.float64).max\n coef = 1.0 if monotonic else 0.5\n scores = coef * eps / sensitivity * (q - q.max())\n probas = np.exp(scores - logsumexp(scores))\n return prng.choice(q.size, p=probas)\n\n\ndef select(data, model, rho, targets=[]):\n \"\"\"Selects additional measurements using Minimum Spanning Tree based method\n with the exponential mechanism being used to privately select candidate\n edges. Weights for each edge of the tree are based on the L1 norm between\n the marginal counts from the data and the marginal counts from the model.\n\n Args:\n data (mbi.Dataset): The sensitive dataset.\n model (mbi.GraphicalModel): The DP graphical model learned from the\n first round of measurements.\n rho (float): Remaining privacy budget, calculated using Gaussian DP.\n targets (list, optional): Target columns specified by the user. Default\n is ``[]``.\n\n Returns:\n List of additional measurements selected by the algorithm.\n \"\"\"\n weights = {}\n candidates = list(itertools.combinations(data.domain.invert(targets), 2))\n for a, b in candidates:\n xhat = model.project([a, b] + targets).datavector()\n x = data.project([a, b] + targets).datavector()\n weights[a, b] = np.linalg.norm(x - xhat, 1)\n\n T = nx.Graph()\n T.add_nodes_from(data.domain.attrs)\n ds = DisjointSet()\n\n r = len(data.domain) - len(targets)\n epsilon = np.sqrt(8 * rho / (r - 1))\n for i in range(r - 1):\n candidates = [e for e in candidates if not ds.connected(*e)]\n wgts = np.array([weights[e] for e in candidates])\n idx = exponential_mechanism(wgts, epsilon, sensitivity=1.0)\n e = candidates[idx]\n T.add_edge(*e)\n ds.union(*e)\n\n return [e + tuple(targets) for e in T.edges]\n\n\ndef adagrid(data, epsilon, delta, threshold, targets=[], split_strategy=None, **mbi_args):\n \"\"\"Implements the Adagrid mechanism used in Sprint 3 of NIST 2021\n Competition by Team Minutemen.\n\n Args:\n data (mbi.Dataset): The sensitive dataset.\n epsilon (float): Privacy parameter.\n delta (float): Delta parameter in approximate DP. Set to ``0`` if pure\n DP is required.\n threshold (float): Threshold for deciding which cells are\n likely to have non-zero counts .\n targets (iterable, optional): List of target columns. Default is\n ``[]``.\n iters (int): Number of iterations for Mirror Descent algorithm to run.\n split_strategy ([floats]): List of floats, each containing the\n fraction of the zCDP budget allocated to each step of the algorithm.\n mbi_args (kwargs): Args to pass to mbi.FactoredInference. Please refer\n to the comments within this class to determine which parameters to pass.\n\n Returns:\n mbi.Dataset: Dataset object holding synthetic dataset satisfying\n (epsilon, delta) DP\n \"\"\"\n rho = cdp_rho(epsilon, delta)\n if not split_strategy:\n rho_step_1 = rho_step_2 = rho_step_3 = rho / 3\n else:\n assert len(split_strategy) == 3\n frac_1, frac_2, frac_3 = np.array(split_strategy) / sum(split_strategy)\n rho_step_1 = rho*frac_1\n rho_step_2 = rho*frac_2\n rho_step_3 = rho*frac_3\n\n domain = data.domain\n measurements = []\n post_plausibility = {}\n matrices = {}\n\n step1_outer = [(a,) + tuple(targets) for a in domain if a not in targets]\n step1_all = downward_closure(step1_outer)\n step1_sigma = np.sqrt(0.5 / rho_step_1) * np.sqrt(len(step1_all))\n\n # Step 1: Measure all 1-way marginals involving target(s)\n for k in range(1, len(targets) + 2):\n split = [cl for cl in step1_all if len(cl) == k]\n print()\n for cl in split:\n I = sparse.eye(domain.size(cl))\n Q1 = get_identity(\n cl, post_plausibility, domain\n ) # get fine-granularity measurements\n Q2 = get_aggregate(cl, matrices, domain) @ (\n I - Q1\n ) # get remaining aggregate measurements\n Q1 = Q1[Q1.getnnz(1) > 0] # remove all-zero rows\n Q = sparse.vstack([Q1, Q2])\n Q.T = sparse.csr_matrix(Q.T) # a trick to improve efficiency of Private-PGM\n # Q has sensitivity 1 by construction\n print(\n \"Measuring %s, L2 sensitivity %.6f\"\n % (cl, np.sqrt(Q.power(2).sum(axis=0).max()))\n )\n #########################################\n ### This code uses the sensitive data ###\n #########################################\n mu = data.project(cl).datavector()\n y = Q @ mu + np.random.normal(loc=0, scale=step1_sigma, size=Q.shape[0])\n #########################################\n est = Q1.T @ y[: Q1.shape[0]]\n\n post_plausibility[cl] = Factor(\n domain.project(cl), est >= step1_sigma * threshold\n )\n matrices[cl] = Q\n measurements.append((Q, y, 1.0, cl))\n\n engine = FactoredInference(domain, log=False, **mbi_args)\n engine.estimate(measurements)\n\n # Step 2: select more marginals using an MST-style approach\n step2_queries = select(data, engine.model, rho_step_2, targets)\n\n print()\n # step 3: measure those marginals\n step3_sigma = np.sqrt(len(step2_queries)) * np.sqrt(0.5 / rho_step_3)\n for cl in step2_queries:\n I = sparse.eye(domain.size(cl))\n Q1 = get_identity(\n cl, post_plausibility, domain\n ) # get fine-granularity measurements\n Q2 = get_aggregate(cl, matrices, domain) @ (\n I - Q1\n ) # get remaining aggregate measurements\n Q1 = Q1[Q1.getnnz(1) > 0] # remove all-zero rows\n Q = sparse.vstack([Q1, Q2])\n Q.T = sparse.csr_matrix(Q.T) # a trick to improve efficiency of Private-PGM\n # Q has sensitivity 1 by construction\n print(\n \"Measuring %s, L2 sensitivity %.6f\"\n % (cl, np.sqrt(Q.power(2).sum(axis=0).max()))\n )\n #########################################\n ### This code uses the sensitive data ###\n #########################################\n mu = data.project(cl).datavector()\n y = Q @ mu + np.random.normal(loc=0, scale=step3_sigma, size=Q.shape[0])\n #########################################\n\n measurements.append((Q, y, 1.0, cl))\n\n print()\n print(\"Post-processing with Private-PGM, will take some time...\")\n model = engine.estimate(measurements)\n return model.synthetic_data()\n\n\ndef default_params():\n \"\"\"\n Return default parameters to run this program\n\n :returns: a dictionary of default parameter settings for each command line argument\n \"\"\"\n params = {}\n params['dataset'] = 'datasets/adult.zip'\n params['domain'] = 'datasets/adult-domain.json'\n params['epsilon'] = 1.0\n params['delta'] = 1e-10\n params['targets'] = []\n params['pgm_iters'] = 2500\n params['warm_start'] = True\n params['metric'] = 'L2'\n params['threshold'] = 5.0\n params['split_strategy'] = [0.1, 0.1, 0.8]\n params['save'] = 'out.csv'\n\n return params\n\nif __name__ == \"__main__\":\n\n description = 'A generalization of the Adaptive Grid Mechanism that won 2nd place in the 2020 NIST temporal map challenge'\n formatter = argparse.ArgumentDefaultsHelpFormatter\n parser = argparse.ArgumentParser(description=description, formatter_class=formatter)\n parser.add_argument('--dataset', help='dataset to use')\n parser.add_argument('--domain', help='domain to use')\n parser.add_argument('--epsilon', type=float, help='privacy parameter')\n parser.add_argument('--delta', type=float, help='privacy parameter')\n parser.add_argument('--targets', type=str, nargs='+', help='target columns to preserve')\n parser.add_argument('--pgm_iters', type=int, help='number of iterations')\n parser.add_argument('--warm_start', type=bool, help='warm start PGM')\n parser.add_argument('--metric', choices=['L1','L2'], help='loss function metric to use')\n parser.add_argument('--threshold', type=float, help='adagrid treshold parameter')\n parser.add_argument('--split_strategy', type=float, nargs='+', help='budget split for 3 steps')\n parser.add_argument('--save', type=str, help='path to save synthetic data')\n\n parser.set_defaults(**default_params())\n args = parser.parse_args()\n\n df = pd.read_csv(args.dataset)\n domain = Domain.fromdict(json.load(open(args.domain, \"r\")))\n data = Dataset(df, domain)\n mbi_args = {\"iters\": args.pgm_iters, \"warm_start\": args.warm_start, \"metric\": args.metric}\n synth = adagrid(\n data,\n args.epsilon,\n args.delta,\n args.threshold,\n split_strategy=args.split_strategy,\n targets=args.targets,\n **mbi_args\n )\n\n synth.df.to_csv(args.save, index=False)\n" ]
[ [ "pandas.read_csv", "numpy.ones_like", "numpy.sqrt", "numpy.arange", "numpy.linalg.norm", "scipy.sparse.csr_matrix", "numpy.ones", "numpy.finfo", "numpy.random.normal", "scipy.sparse.vstack", "scipy.sparse.kron", "numpy.array", "scipy.special.logsumexp" ] ]