repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
bstee615/Devign
[ "721ee636234a7ee4d71b5696501061a446af1e7b" ]
[ "main.py" ]
[ "import argparse\nimport glob\nimport json\nimport os\nimport pickle\nimport random\nimport sys\n\nimport numpy as np\nimport sklearn.model_selection\nimport torch\nfrom torch.nn import BCELoss\nfrom torch.optim import Adam\n\nfrom data_loader.dataset import DataSet, SavedDataset\nfrom modules.model import DevignModel, GGNNSum\nfrom run_model import run_one\nfrom trainer import train\nfrom utils import tally_param\nimport logging\n# from sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score\n\n# logger = logging.getLogger(__name__)\n\ndef debug(*msg, sep=' '):\n print(sep.join((str(m) for m in msg)))\n\ndef main(raw_args=None):\n parser = argparse.ArgumentParser()\n parser.add_argument('--model_type', type=str, help='Type of the model (devign/ggnn)',\n choices=['devign', 'ggnn'], default='devign')\n parser.add_argument('--input_dir', type=str, required=True, help='Input Directory of the parser')\n # parser.add_argument('--preprocessed_bin', type=str, help='Path to preprocessed data file')\n parser.add_argument('--model_dir', type=str, default=None, help='(DEPRECATED) Directory to store the model')\n parser.add_argument('--node_tag', type=str, help='Name of the node feature.', default='node_features')\n parser.add_argument('--graph_tag', type=str, help='Name of the graph feature.', default='graph')\n parser.add_argument('--label_tag', type=str, help='Name of the label feature.', default='targets')\n\n parser.add_argument('--feature_size', type=int, help='Size of feature vector for each node', default=169)\n parser.add_argument('--graph_embed_size', type=int, help='Size of the Graph Embedding', default=200)\n parser.add_argument('--num_steps', type=int, help='Number of steps in GGNN', default=6)\n parser.add_argument('--batch_size', type=int, help='Batch Size for training', default=128)\n parser.add_argument('--patience', type=int, help='Patience for early stopping', default=50)\n parser.add_argument('--seed', default=1000, type=int)\n parser.add_argument('--preprocess_only', action='store_true')\n parser.add_argument('--n_folds', default=5, type=int)\n parser.add_argument('--ray', action='store_true')\n parser.add_argument('--test', action='store_true')\n parser.add_argument('--save_after', action='store_true')\n args = parser.parse_args(raw_args)\n\n print(f'{__name__} args: {args}')\n if args.test:\n print(\"Quitting because it's just a test.\")\n return\n\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n\n n_folds = args.n_folds\n\n model_dir = os.path.join(args.input_dir, 'models')\n if not os.path.exists(model_dir):\n os.makedirs(model_dir, exist_ok=True)\n input_dir = os.path.join(args.input_dir, 'ggnn_input')\n if not os.path.exists(input_dir):\n os.makedirs(input_dir, exist_ok=True)\n logfile_name = f'devign-{args.model_type}'\n if args.preprocess_only:\n logfile_name += '-preprocess_only'\n logfile_name += '.log'\n logfile_path = os.path.join(model_dir, logfile_name)\n if os.path.exists(logfile_path):\n os.unlink(logfile_path)\n # logger.addHandler(logging.FileHandler(logfile_path))\n\n after_ggnn_dir = os.path.join(args.input_dir, 'after_ggnn')\n if not os.path.exists(after_ggnn_dir):\n os.makedirs(after_ggnn_dir, exist_ok=True)\n\n if args.model_dir is not None:\n debug(f'--model_dir set to {args.model_dir} but is DEPRECATED. Will not be used.')\n\n if args.feature_size > args.graph_embed_size:\n print('Graph Embed dimension should be at least equal to the feature dimension.\\n'\n 'Setting graph embedding size to feature size', file=sys.stderr)\n args.graph_embed_size = args.feature_size\n\n # processed_data_path = glob.glob(os.path.join(input_dir, '*processed.bin'))[0]\n processed_data_path = os.path.join(input_dir, 'processed.bin')\n\n if os.path.exists(processed_data_path):\n debug('Reading already processed data from %s!' % processed_data_path)\n saved_dataset = pickle.load(open(processed_data_path, 'rb'))\n else:\n saved_dataset = SavedDataset.read_dataset(args.node_tag, args.graph_tag, args.label_tag, os.path.join(input_dir, 'GGNNinput.pkl'), os.path.join(input_dir, 'augmented_GGNNinput.pkl'))\n with open(processed_data_path, 'wb') as file:\n pickle.dump(saved_dataset, file)\n\n if args.preprocess_only:\n debug('Done preprocessing, exiting.')\n return\n\n print(f\"CUDA: {torch.cuda.is_available()}, {torch.version.cuda}\")\n assert torch.cuda.is_available()\n\n output_file_name = os.path.join(model_dir, f'devign-{args.model_type}-results.tsv')\n\n all_splits = []\n with open(output_file_name, 'w') as output_file:\n for i in range(0, n_folds):\n print(f'Fold: {i}')\n roll = len(saved_dataset.examples) // n_folds * i\n splits = (int(.7*len(saved_dataset.examples)), int(.8*len(saved_dataset.examples)))\n all_splits.append({\n \"idx\": i,\n \"roll\": roll,\n \"splits\": splits,\n })\n dataset = saved_dataset.to_dataset(roll, splits, args.batch_size)\n with open(os.path.join(model_dir, f'splits-{args.model_type}.json'), 'w') as f:\n json.dump(all_splits, f)\n print(f'Feature size: {dataset.feature_size}')\n assert args.feature_size == dataset.feature_size, \\\n 'Dataset contains different feature vector than argument feature size. ' \\\n 'Either change the feature vector size in argument, or provide different dataset.'\n model_filename = os.path.join(model_dir, f'{args.model_type}-model-{i}.pth')\n if args.model_type == 'ggnn':\n model = GGNNSum(input_dim=dataset.feature_size, output_dim=args.graph_embed_size,\n num_steps=args.num_steps, max_edge_types=dataset.max_edge_type)\n else:\n model = DevignModel(input_dim=dataset.feature_size, output_dim=args.graph_embed_size,\n num_steps=args.num_steps, max_edge_types=dataset.max_edge_type)\n debug('Total Parameters : %d' % tally_param(model))\n debug('#' * 100)\n model.cuda()\n loss_function = BCELoss(reduction='sum')\n optim = Adam(model.parameters(), lr=0.0001, weight_decay=0.001)\n train(model=model, dataset=dataset, max_steps=1000000, log_every=None, dev_every=128,\n loss_function=loss_function, optimizer=optim,\n save_path=model_filename, output_file=output_file, max_patience=args.patience, ray=args.ray)\n\n if args.save_after:\n run_one(after_ggnn_dir, model, loss_function, 'test', dataset.initialize_test_batch(), dataset.get_next_test_batch, i, logger_fn=print)\n run_one(after_ggnn_dir, model, loss_function, 'valid', dataset.initialize_valid_batch(), dataset.get_next_valid_batch, i, logger_fn=print)\n run_one(after_ggnn_dir, model, loss_function, 'train', dataset.initialize_train_batch(), dataset.get_next_train_batch, i, logger_fn=print)\n\n del model\n del dataset\n del optim\n del loss_function\n\n\nif __name__ == \"__main__\":\n # logging.basicConfig(\n # format='%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',\n # datefmt='%Y-%m-%d:%H:%M:%S',\n # level=logging.INFO,\n # )\n main()\n" ]
[ [ "numpy.random.seed", "torch.manual_seed", "torch.cuda.is_available", "torch.nn.BCELoss" ] ]
thatgeeman/pybx
[ "d1085d2942a3c5f5d33fcfd57bfd69bf51ce09ea" ]
[ "tests/test_vis.py" ]
[ "import json\nimport unittest\nimport warnings\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nfrom pybx.basics import *\nfrom pybx.vis import VisBx\n\nnp.random.seed(1)\n\nparams = {\n \"data_dir\": './data',\n \"annots_file\": 'annots_iou.json',\n \"annots_iou_file\": './data/annots_iou.json',\n \"annots_rand_file\": './data/annots_rand.json',\n \"annots_l\": [[50., 70., 120., 100., 'rand1'], [150., 200., 250., 240., 'rand2']],\n \"annots_1d\": np.random.randint(low=1, high=10, size=4),\n \"annots_nd\": np.random.randint(low=1, high=10, size=(2, 4)),\n \"annots_json\": [{'label': '', 'x_max': 0, 'x_min': 0, 'y_max': 0, 'y_min': 0}],\n \"feature_sz\": (2, 2),\n \"image_sz\": (10, 10, 3),\n \"random_im_sz\": (10, 10, 3),\n \"image_arr\": np.random.randint(size=(10, 10, 3), low=0, high=255),\n \"image_arr_float\": np.random.randn(10, 10, 3),\n}\n\n\nclass VisTestCase(unittest.TestCase):\n def __init__(self, args):\n super(VisTestCase, self).__init__(args)\n # use image paths to load image and anns\n self.v1 = VisBx(image_sz=params[\"image_sz\"], feature_sz=params[\"feature_sz\"],\n logits=True, pth=params[\"data_dir\"], ann_fn=params[\"annots_file\"], sample=True, load_ann=True)\n\n # use image paths to load image only dont load anns\n self.v2 = VisBx(image_sz=params[\"image_sz\"], pth=params[\"data_dir\"], sample=True, load_ann=False)\n\n # use image array directly with annots\n self.v3 = VisBx(image_arr=params[\"image_arr\"], annots=params[\"annots_l\"], feature_sz=params[\"feature_sz\"])\n\n # use image array directly with 1D annots\n self.v4 = VisBx(image_arr=params[\"image_arr\"], annots=params[\"annots_1d\"], feature_sz=params[\"feature_sz\"])\n\n # use image array directly with ND annots\n self.v5 = VisBx(image_arr=params[\"image_arr\"], annots=params[\"annots_nd\"], feature_sz=params[\"feature_sz\"])\n\n # use random image array\n self.v6 = VisBx(image_sz=params[\"image_sz\"])\n\n # use logits data with image array\n self.v7 = VisBx(image_arr=params[\"image_arr\"], annots=params[\"annots_l\"], feature_sz=params[\"feature_sz\"],\n logits=np.random.randn(*params[\"feature_sz\"]))\n\n # use logits data with image array but single anns\n self.v8 = VisBx(image_arr=params[\"image_arr\"], annots=params[\"annots_l\"][0], feature_sz=params[\"feature_sz\"],\n logits=np.random.randn(*params[\"feature_sz\"]))\n\n # use annots json\n self.v9 = VisBx(image_arr=params[\"image_arr\"], annots=params[\"annots_json\"], feature_sz=params[\"feature_sz\"])\n\n self.vs = [self.v1, self.v2, self.v3, self.v4,\n self.v5, self.v6, self.v7, self.v8, self.v9]\n\n def test_vis_bx(self):\n with open(params[\"annots_rand_file\"]) as f:\n annots = json.load(f)\n for v in self.vs:\n self.assertTrue(v.show(annots))\n plt.close()\n\n def test_vis_jsonbx(self):\n with open(params[\"annots_rand_file\"]) as f:\n annots = json.load(f)\n annots = mbx(annots)\n for v in self.vs:\n self.assertTrue(v.show(annots))\n plt.close()\n\n def test_vis_jsonbx_single(self):\n annots = params[\"annots_json\"]\n for v in self.vs:\n self.assertTrue(v.show(annots))\n plt.close()\n\n def test_vis_listbx_single(self):\n annots = bbx(params[\"annots_l\"][0])\n for v in self.vs:\n self.assertTrue(v.show(annots))\n plt.close()\n\n def test_vis_listbx(self):\n annots = mbx(params[\"annots_l\"])\n for v in self.vs:\n self.assertTrue(v.show(annots))\n plt.close()\n\n def test_vis_bbx_list(self):\n b = bbx(params[\"annots_l\"][0])\n self.assertIsInstance(b, BaseBx)\n for v in self.vs:\n self.assertTrue(v.show(b))\n plt.close()\n\n def test_vis_bbx_json(self):\n with open(params[\"annots_rand_file\"]) as f:\n annots = json.load(f)\n b = bbx(annots[0])\n self.assertIsInstance(b, BaseBx)\n for v in self.vs:\n self.assertTrue(v.show(b))\n plt.close()\n\n \"\"\"\n # float arrays not accepted since pybx 0.1.3\n def test_float_array(self):\n im = params[\"image_arr_float\"]\n ann = params[\"annots_json\"]\n sz = params[\"image_sz\"]\n self.assertRaises(TypeError, VisBx, image_arr=im, image_sz=sz, annots=ann)\n \"\"\"\n\n\nif __name__ == '__main__':\n with warnings.catch_warnings:\n warnings.filterwarnings('ignore')\n unittest.main()\n" ]
[ [ "numpy.random.seed", "numpy.random.randn", "numpy.random.randint", "matplotlib.pyplot.close" ] ]
nicolasmelo1/marketing-performance-report
[ "796534beeb729a38a142ae4a099a378c7eae6f99" ]
[ "pax/performance.py" ]
[ "from pax.appsflyer import appsflyerData\r\nfrom pax.facebook import facebookdata\r\nfrom pax.google import googlereports\r\nfrom pax.database import baseDatabase\r\nfrom utils.paths import PATH_SIGLAS_PRACAS\r\nfrom pax.twitter import twitterdata\r\nimport unidecode\r\nimport pandas\r\nimport utils.time\r\npandas.options.mode.chained_assignment = None\r\n\r\n\r\ndef cleanNewAppData(performance):\r\n\r\n performance['os_name'][performance['campaign'].str.contains('iOS', na=False)] = 'ios'\r\n performance['os_name'][performance['campaign'].str.contains('IOS', na=False)] = 'ios'\r\n\r\n performance['campaign'][performance['campaign'] == 'GDN_And_MKT_BH-CPI924382686'] = 'GDN_And_MKT_BH-CPI'\r\n performance['campaign'][performance['campaign'] == 'GDN_And_MKT_VIX-CPC931642203'] = 'GDN_And_MKT_VIX-CPC'\r\n performance['campaign'][performance['campaign'] == 'GDN_And_MKT_VIX-CPI930974807'] = 'GDN_And_MKT_VIX-CPI'\r\n performance['campaign'][performance['campaign'] == 'SMS_All_Ops_CWB/market://details?id=com.app99.pax'] = 'SMS_All_Ops_CWB'\r\n performance['campaign'][performance['campaign'] == 'Spotify_All_Brand_BH-overlay'] = 'Spotify_All_Brand_BH'\r\n performance['campaign'][performance['campaign'] == 'SRC_And_Conc-GYN_UA929042102'] = 'SRC_And_Conc-GYN_UA'\r\n\r\n performance['campaign'][performance['campaign'] == 'RG-CWB-IOS-AppInstal'] = 'RG-CWB-IOS-AppInstall'\r\n\r\n if 'region' in performance.columns:\r\n region = pandas.read_csv(PATH_SIGLAS_PRACAS, sep=';')\r\n region['pracas'] = region['pracas'].str.upper()\r\n listofregions = [tuple(x) for x in region.values]\r\n\r\n performance['region'][performance['region'].isnull()] = 'BR'\r\n for i in range(0, len(listofregions)):\r\n performance['region'][performance['campaign'].str.contains(listofregions[i][0], na=False)] = \\\r\n listofregions[i][1]\r\n\r\n performance['region'] = performance['region'].apply(lambda x: unidecode.unidecode(x))\r\n\r\n performance['week'] = performance['date'].dt.week\r\n\r\n return performance\r\n\r\n\r\ndef performanceNewAppData():\r\n performance = pandas.concat([\r\n appsflyerData(),\r\n twitterdata(),\r\n facebookdata(['act_1894184000615284', 'act_1691552937545059', 'act_967083766658650']),\r\n googlereports(['771-742-8350', '411-922-6657']),\r\n baseDatabase()\r\n ])\r\n performance = cleanNewAppData(performance)\r\n performance = performance[\r\n ['date', 'week', 'tool', 'midia', 'source', 'os_name', 'campaign', 'adgroup', 'creative',\r\n 'installs', 'first_trip', 'sign_ups', 'amount_spent', 'impressions', 'clicks', 'pax', 'trips', 'burn', 'gmv',\r\n 'takerate', 'pft_pax',\r\n 'pft_trips', 'pft_burn', 'pft_gmv', 'pft_takerate', 'region']]\r\n return performance\r\n\r\n\r\n" ]
[ [ "pandas.read_csv" ] ]
yuichikano/Fashion-Image-Retrieval-System
[ "5d712a4e400716e84337defe08f51c2165d44ade" ]
[ "DML/netlib.py" ]
[ "# Copyright 2019 Karsten Roth and Biagio Brattoli\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\n# Normal ver\n\n############################ LIBRARIES ######################################\nimport torch, os, numpy as np\n\nimport torch.nn as nn\nimport pretrainedmodels as ptm\n\nimport pretrainedmodels.utils as utils\nimport torchvision.models as models\n\nimport googlenet\n\n\n\n\"\"\"=============================================================\"\"\"\ndef initialize_weights(model):\n \"\"\"\n Function to initialize network weights.\n NOTE: NOT USED IN MAIN SCRIPT.\n\n Args:\n model: PyTorch Network\n Returns:\n Nothing!\n \"\"\"\n for idx,module in enumerate(model.modules()):\n if isinstance(module, nn.Conv2d):\n nn.init.kaiming_normal_(module.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(module, nn.BatchNorm2d):\n nn.init.constant_(module.weight, 1)\n nn.init.constant_(module.bias, 0)\n elif isinstance(module, nn.Linear):\n module.weight.data.normal_(0,0.01)\n module.bias.data.zero_()\n\n\n\n\"\"\"==================================================================================================================================\"\"\"\n### ATTRIBUTE CHANGE HELPER\ndef rename_attr(model, attr, name):\n \"\"\"\n Rename attribute in a class. Simply helper function.\n\n Args:\n model: General Class for which attributes should be renamed.\n attr: str, Name of target attribute.\n name: str, New attribute name.\n \"\"\"\n setattr(model, name, getattr(model, attr))\n delattr(model, attr)\n\n\n\"\"\"==================================================================================================================================\"\"\"\n### NETWORK SELECTION FUNCTION\ndef networkselect(opt):\n \"\"\"\n Selection function for available networks.\n\n Args:\n opt: argparse.Namespace, contains all training-specific training parameters.\n Returns:\n Network of choice\n \"\"\"\n if opt.arch == 'googlenet':\n network = GoogLeNet(opt)\n elif opt.arch == 'resnet50':\n network = ResNet50(opt)\n else:\n raise Exception('Network {} not available!'.format(opt.arch))\n return network\n\n\n\n\n\"\"\"==================================================================================================================================\"\"\"\nclass GoogLeNet(nn.Module):\n \"\"\"\n Container for GoogLeNet s.t. it can be used for metric learning.\n The Network has been broken down to allow for higher modularity, if one wishes\n to target specific layers/blocks directly.\n \"\"\"\n def __init__(self, opt):\n \"\"\"\n Args:\n opt: argparse.Namespace, contains all training-specific parameters.\n Returns:\n Nothing!\n \"\"\"\n super(GoogLeNet, self).__init__()\n\n self.pars = opt\n\n self.model = googlenet.googlenet(num_classes=1000, pretrained='imagenet' if not opt.not_pretrained else False)\n\n for module in filter(lambda m: type(m) == nn.BatchNorm2d, self.model.modules()):\n module.eval()\n module.train = lambda _: None\n\n rename_attr(self.model, 'fc', 'last_linear')\n\n self.layer_blocks = nn.ModuleList([self.model.inception3a, self.model.inception3b, self.model.maxpool3,\n self.model.inception4a, self.model.inception4b, self.model.inception4c,\n self.model.inception4d, self.model.inception4e, self.model.maxpool4,\n self.model.inception5a, self.model.inception5b, self.model.avgpool])\n\n self.model.last_linear = torch.nn.Linear(self.model.last_linear.in_features, opt.embed_dim)\n\n\n def forward(self, x):\n ### Initial Conv Layers\n x = self.model.conv3(self.model.conv2(self.model.maxpool1(self.model.conv1(x))))\n x = self.model.maxpool2(x)\n\n ### Inception Blocks\n for layerblock in self.layer_blocks:\n x = layerblock(x)\n\n x = x.view(x.size(0), -1)\n x = self.model.dropout(x)\n\n mod_x = self.model.last_linear(x)\n\n #No Normalization is used if N-Pair Loss is the target criterion.\n return mod_x if self.pars.loss=='npair' else torch.nn.functional.normalize(mod_x, dim=-1)\n\n\n\n\"\"\"=============================================================\"\"\"\nclass ResNet50(nn.Module):\n \"\"\"\n Container for ResNet50 s.t. it can be used for metric learning.\n The Network has been broken down to allow for higher modularity, if one wishes\n to target specific layers/blocks directly.\n \"\"\"\n def __init__(self, opt, list_style=False, no_norm=False):\n super(ResNet50, self).__init__()\n\n self.pars = opt\n\n if not opt.not_pretrained:\n print('Getting pretrained weights...')\n self.model = ptm.__dict__['resnet50'](num_classes=1000, pretrained='imagenet')\n print('Done.')\n else:\n print('Not utilizing pretrained weights!')\n self.model = ptm.__dict__['resnet50'](num_classes=1000, pretrained=None)\n\n for module in filter(lambda m: type(m) == nn.BatchNorm2d, self.model.modules()):\n module.eval()\n module.train = lambda _: None\n\n self.model.last_linear = torch.nn.Linear(self.model.last_linear.in_features, opt.embed_dim)\n #self.layer_blocks = nn.ModuleList([self.model.layer1, self.model.layer2])\n self.layer_blocks = nn.ModuleList([self.model.layer1, self.model.layer2, self.model.layer3, self.model.layer4])\n\n def forward(self, x, is_init_cluster_generation=False):\n x = self.model.maxpool(self.model.relu(self.model.bn1(self.model.conv1(x))))\n\n for layerblock in self.layer_blocks:\n x = layerblock(x)\n\n x = self.model.avgpool(x)\n x = x.view(x.size(0),-1)\n\n mod_x = self.model.last_linear(x)\n #No Normalization is used if N-Pair Loss is the target criterion.\n return mod_x if self.pars.loss=='npair' else torch.nn.functional.normalize(mod_x, dim=-1)\n" ]
[ [ "torch.nn.Linear", "torch.nn.functional.normalize", "torch.nn.ModuleList", "torch.nn.init.constant_", "torch.nn.init.kaiming_normal_" ] ]
EthereumGeeks/dev
[ "daf2d0fb3418cac564461d03c1a9fed4fdec3589" ]
[ "packages/contracts/macroModel/macro_model.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"Baseline vs Alternative V2 Copy\n\nAutomatically generated by Colaboratory.\n\nOriginal file is located at\n https://colab.research.google.com/drive/1NNPdiKfO3950MuAGyIXTNrr4OMliINKb\n\n# Parameters and Initialization\n\"\"\"\n\nimport random\nimport numpy as np\nimport pandas as pd\nimport plotly.graph_objects as go\nimport plotly.express as px\nimport matplotlib.pyplot as plt\nimport scipy.stats\nfrom plotly.subplots import make_subplots\n\n#policy functions\nrate_issuance = 0.01\nrate_redemption = 0.01\nbase_rate_initial = 0\n\n#global variables\nperiod = 24*365\nmonth=24*30\nday=24\n\n#ether price\nprice_ether_initial = 1000\nprice_ether = [price_ether_initial]\nsd_ether=0.02\ndrift_ether = 0\n\n#LQTY price & airdrop\nprice_LQTY_initial = 1\nprice_LQTY = [price_LQTY_initial]\nsd_LQTY=0.005\ndrift_LQTY = 0.0035\n#reduced for now. otherwise the initial return too high\nquantity_LQTY_airdrop = 500\nsupply_LQTY=[0]\nLQTY_total_supply=100000000\n\n#PE ratio\nPE_ratio = 50\n\n#natural rate\nnatural_rate_initial = 0.2\nnatural_rate = [natural_rate_initial]\nsd_natural_rate=0.002\n\n#stability pool\ninitial_return=0.2\nreturn_stability=[initial_return]\nsd_return=0.001\nsd_stability=0.001\ndrift_stability=1.002\ntheta=0.001\n\n#liquidity pool & redemption pool\nsd_liquidity=0.001\nsd_redemption=0.001\ndrift_liquidity=1.0003\nredemption_star = 0.8\ndelta = -20\n\n#close troves\nsd_closetroves=0.5\n#sensitivity to LUSD price\nbeta = 0.2\n\n#open troves\ndistribution_parameter1_ether_quantity=10\ndistribution_parameter2_ether_quantity=500\ndistribution_parameter1_CR = 1.1\ndistribution_parameter2_CR = 0.1\ndistribution_parameter3_CR = 16\ndistribution_parameter1_inattention = 4\ndistribution_parameter2_inattention = 0.08\nsd_opentroves=0.5\nn_steady=0.5\ninitial_open=10\n\n#sensitivity to LUSD price & issuance fee\nalpha = 0.3\n\n#number of runs in simulation\nn_sim= 8640\n\n\"\"\"# Exogenous Factors\n\nEther Price\n\"\"\"\n\n#ether price\nfor i in range(1, period):\n random.seed(2019375+10000*i)\n shock_ether = random.normalvariate(0,sd_ether)\n price_ether.append(price_ether[i-1]*(1+shock_ether)*(1+drift_ether))\n\n\"\"\"Natural Rate\"\"\"\n\n#natural rate\nfor i in range(1, period):\n random.seed(201597+10*i)\n shock_natural = random.normalvariate(0,sd_natural_rate)\n natural_rate.append(natural_rate[i-1]*(1+shock_natural))\n\n\"\"\"LQTY Price - First Month\"\"\"\n\n#LQTY price\nfor i in range(1, month):\n random.seed(2+13*i)\n shock_LQTY = random.normalvariate(0,sd_LQTY) \n price_LQTY.append(price_LQTY[i-1]*(1+shock_LQTY)*(1+drift_LQTY))\n\n\"\"\"# Troves\n\nLiquidate Troves\n\"\"\"\n\ndef liquidate_troves(troves, index, data):\n troves['CR_current'] = troves['Ether_Price']*troves['Ether_Quantity']/troves['Supply']\n price_LUSD_previous = data.loc[index-1,'Price_LUSD']\n price_LQTY_previous = data.loc[index-1,'price_LQTY']\n stability_pool_previous = data.loc[index-1, 'stability']\n\n troves_liquidated = troves[troves.CR_current < 1.1]\n troves = troves[troves.CR_current >= 1.1]\n debt_liquidated = troves_liquidated['Supply'].sum()\n ether_liquidated = troves_liquidated['Ether_Quantity'].sum()\n n_liquidate = troves_liquidated.shape[0]\n troves = troves.reset_index(drop = True)\n\n liquidation_gain = ether_liquidated*price_ether_current - debt_liquidated*price_LUSD_previous\n airdrop_gain = price_LQTY_previous * quantity_LQTY_airdrop\n \n np.random.seed(2+index)\n shock_return = np.random.normal(0,sd_return)\n if index <= day:\n return_stability = initial_return*(1+shock_return)\n elif index<=month:\n #min function to rule out the large fluctuation caused by the large but temporary liquidation gain in a particular period\n return_stability = min(0.5, 365*(data.loc[index-day:index, 'liquidation_gain'].sum()+data.loc[index-day:index, 'airdrop_gain'].sum())/(price_LUSD_previous*stability_pool_previous))\n else:\n return_stability = (365/30)*(data.loc[index-month:index, 'liquidation_gain'].sum()+data.loc[index-month:index, 'airdrop_gain'].sum())/(price_LUSD_previous*stability_pool_previous)\n \n return[troves, return_stability, debt_liquidated, ether_liquidated, liquidation_gain, airdrop_gain, n_liquidate]\n\n\"\"\"Close Troves\"\"\"\n\ndef close_troves(troves, index2, price_LUSD_previous):\n np.random.seed(208+index2)\n shock_closetroves = np.random.normal(0,sd_closetroves)\n n_troves = troves.shape[0]\n\n if index2 <= 240:\n number_closetroves = np.random.uniform(0,1)\n elif price_LUSD_previous >=1:\n number_closetroves = max(0, n_steady * (1+shock_closetroves))\n else:\n number_closetroves = max(0, n_steady * (1+shock_closetroves)) + beta*(1-price_LUSD_previous)*n_troves\n \n number_closetroves = int(round(number_closetroves))\n \n random.seed(293+100*index2)\n drops = list(random.sample(range(len(troves)), number_closetroves))\n troves = troves.drop(drops)\n troves = troves.reset_index(drop=True)\n if len(troves) < number_closetroves:\n number_closetroves = -999\n\n return[troves, number_closetroves]\n\n\"\"\"Adjust Troves\"\"\"\n\ndef adjust_troves(troves, index):\n issuance_LUSD_adjust = 0\n random.seed(57984-3*index)\n ratio = random.uniform(0,1)\n for i in range(0, troves.shape[0]):\n random.seed(187*index + 3*i)\n working_trove = troves.iloc[i,:]\n p = random.uniform(0,1)\n check = (working_trove['CR_current']-working_trove['CR_initial'])/(working_trove['CR_initial']*working_trove['Rational_inattention'])\n\n #A part of the troves are adjusted by adjusting debt\n if p >= ratio:\n if check<-1:\n working_trove['Supply'] = working_trove['Ether_Price']*working_trove['Ether_Quantity']/working_trove['CR_initial']\n if check>2:\n supply_new = working_trove['Ether_Price']*working_trove['Ether_Quantity']/working_trove['CR_initial']\n issuance_LUSD_adjust = issuance_LUSD_adjust + rate_issuance * (supply_new - working_trove['Supply'])\n working_trove['Supply'] = supply_new\n #Another part of the troves are adjusted by adjusting collaterals\n if p < ratio and (check < -1 or check > 2):\n working_trove['Ether_Quantity'] = working_trove['CR_initial']*working_trove['Supply']/working_trove['Ether_Price']\n \n troves.loc[i] = working_trove\n return[troves, issuance_LUSD_adjust]\n\n\"\"\"Open Troves\"\"\"\n\ndef open_troves(troves, index1, price_LUSD_previous):\n random.seed(2019*index1) \n issuance_LUSD_open = 0\n shock_opentroves = random.normalvariate(0,sd_opentroves)\n n_troves = troves.shape[0]\n\n if index1<=0:\n number_opentroves = initial_open\n elif price_LUSD_previous <=1 + rate_issuance:\n number_opentroves = max(0, n_steady * (1+shock_opentroves))\n else:\n number_opentroves = max(0, n_steady * (1+shock_opentroves)) + alpha*(price_LUSD_previous-rate_issuance-1)*n_troves\n \n number_opentroves = int(round(float(number_opentroves)))\n\n for i in range(0, number_opentroves):\n price_ether_current = price_ether[index1]\n \n np.random.seed(2033 + index1 + i*i)\n CR_ratio = distribution_parameter1_CR + distribution_parameter2_CR * np.random.chisquare(df=distribution_parameter3_CR)\n \n np.random.seed(20 + 10 * i + index1)\n quantity_ether = np.random.gamma(distribution_parameter1_ether_quantity, scale=distribution_parameter2_ether_quantity)\n \n np.random.seed(209870- index1 + i*i)\n rational_inattention = np.random.gamma(distribution_parameter1_inattention, scale=distribution_parameter2_inattention)\n \n supply_trove = price_ether_current * quantity_ether / CR_ratio\n issuance_LUSD_open = issuance_LUSD_open + rate_issuance * supply_trove\n\n new_row = {\"Ether_Price\": price_ether_current, \"Ether_Quantity\": quantity_ether, \n \"CR_initial\": CR_ratio, \"Supply\": supply_trove, \n \"Rational_inattention\": rational_inattention, \"CR_current\": CR_ratio}\n troves = troves.append(new_row, ignore_index=True)\n\n return[troves, number_opentroves, issuance_LUSD_open]\n\n\"\"\"# LUSD Market\n\nStability Pool\n\"\"\"\n\ndef stability_update(stability_pool_previous, return_previous, index):\n np.random.seed(27+3*index)\n shock_stability = np.random.normal(0,sd_stability)\n natural_rate_current = natural_rate[index]\n if index <= month:\n stability_pool = stability_pool_previous* (drift_stability+shock_stability)* (1+ return_previous- natural_rate_current)**theta\n else:\n stability_pool = stability_pool_previous* (1+shock_stability)* (1+ return_previous- natural_rate_current)**theta\n return[stability_pool]\n\n\"\"\"LUSD Price, liquidity pool, and redemption\"\"\"\n\ndef price_stabilizer(troves, index, data, stability_pool, n_open):\n issuance_LUSD_stabilizer = 0\n redemption_fee = 0\n n_redempt = 0\n redempted = 0\n redemption_pool = 0 \n#Calculating Price\n supply = troves['Supply'].sum()\n np.random.seed(20*index)\n shock_liquidity = np.random.normal(0,sd_liquidity)\n liquidity_pool_previous = float(data['liquidity'][index-1])\n price_LUSD_previous = float(data['Price_LUSD'][index-1])\n price_LUSD_current= price_LUSD_previous*((supply-stability_pool)/(liquidity_pool_previous*(drift_liquidity+shock_liquidity)))**(1/delta)\n \n\n#Liquidity Pool\n liquidity_pool = supply-stability_pool\n\n#Stabilizer\n #Ceiling Arbitrageurs\n if price_LUSD_current > 1.1 + rate_issuance:\n #supply_current = sum(troves['Supply'])\n supply_wanted=stability_pool+liquidity_pool_previous*(drift_liquidity+shock_liquidity)*((1.1+rate_issuance)/price_LUSD_previous)**delta\n supply_trove = supply_wanted - supply\n\n CR_ratio = 1.1\n rational_inattention = 0.1\n quantity_ether = supply_trove * CR_ratio / price_ether_current\n issuance_LUSD_stabilizer = rate_issuance * supply_trove\n\n new_row = {\"Ether_Price\": price_ether_current, \"Ether_Quantity\": quantity_ether, \"CR_initial\": CR_ratio,\n \"Supply\": supply_trove, \"Rational_inattention\": rational_inattention, \"CR_current\": CR_ratio}\n troves = troves.append(new_row, ignore_index=True)\n price_LUSD_current = 1.1 + rate_issuance\n #missing in the previous version \n liquidity_pool = supply_wanted-stability_pool\n n_open=n_open+1\n \n\n #Floor Arbitrageurs\n if price_LUSD_current < 1 - rate_redemption:\n np.random.seed(30*index)\n shock_redemption = np.random.normal(0,sd_redemption)\n redemption_ratio = redemption_star * (1+shock_redemption)\n\n #supply_current = sum(troves['Supply'])\n supply_target=stability_pool+liquidity_pool_previous*(drift_liquidity+shock_liquidity)*((1-rate_redemption)/price_LUSD_previous)**delta\n supply_diff = supply - supply_target\n if supply_diff < redemption_ratio * liquidity_pool:\n redemption_pool=supply_diff\n #liquidity_pool = liquidity_pool - redemption_pool\n price_LUSD_current = 1 - rate_redemption\n else:\n redemption_pool=redemption_ratio * liquidity_pool\n #liquidity_pool = (1-redemption_ratio)*liquidity_pool\n price_LUSD_current= price_LUSD_previous * (liquidity_pool/(liquidity_pool_previous*(drift_liquidity+shock_liquidity)))**(1/delta)\n \n #Shutting down the riskiest troves\n troves = troves.sort_values(by='CR_current', ascending = True)\n quantity_working_trove = troves['Supply'][troves.index[0]]\n redempted = quantity_working_trove\n while redempted <= redemption_pool:\n troves = troves.drop(troves.index[0])\n quantity_working_trove = troves['Supply'][troves.index[0]]\n redempted = redempted + quantity_working_trove\n n_redempt = n_redempt + 1\n \n #Residuals\n redempted = redempted - quantity_working_trove\n residual = redemption_pool - redempted\n wk = troves.index[0]\n troves['Supply'][wk] = troves['Supply'][wk] - residual\n troves['Ether_Quantity'][wk] = troves['Ether_Quantity'][wk] - residual/price_ether_current\n troves['CR_current'][wk] = price_ether_current * troves['Ether_Quantity'][wk] / troves['Supply'][wk]\n\n #Redemption Fee\n redemption_fee = rate_redemption * redemption_pool\n \n\n troves = troves.reset_index(drop=True)\n return[price_LUSD_current, liquidity_pool, troves, issuance_LUSD_stabilizer, redemption_fee, n_redempt, redemption_pool, n_open]\n\n\"\"\"# LQTY Market\"\"\"\n\n\n\ndef LQTY_market(index, data):\n quantity_LQTY = (100000000/3)*(1-0.5**(index/period))\n np.random.seed(2+3*index)\n if index <= month: \n price_LQTY_current = price_LQTY[index-1]\n annualized_earning = (index/month)**0.5*np.random.normal(200000000,500000)\n else:\n revenue_issuance = data.loc[index-month:index, 'issuance_fee'].sum()\n revenue_redemption = data.loc[index-month:index, 'redemption_fee'].sum()\n annualized_earning = 365*(revenue_issuance+revenue_redemption)/30\n #discountin factor to factor in the risk in early days\n discount=index/period\n price_LQTY_current = discount*PE_ratio*annualized_earning/LQTY_total_supply\n \n MC_LQTY_current = price_LQTY_current * quantity_LQTY\n return[price_LQTY_current, annualized_earning, MC_LQTY_current]\n\n\"\"\"# Simulation Program\"\"\"\n\n#Defining Initials\ninitials = {\"Price_LUSD\":[1.00], \"Price_Ether\":[price_ether_initial], \"n_open\":[initial_open], \"n_close\":[0], \"n_liquidate\": [0], \"n_redempt\":[0], \n \"n_troves\":[initial_open], \"stability\":[0], \"liquidity\":[0], \"redemption_pool\":[0],\n \"supply_LUSD\":[0], \"return_stability\":[initial_return], \"airdrop_gain\":[0], \"liquidation_gain\":[0], \"issuance_fee\":[0], \"redemption_fee\":[0],\n \"price_LQTY\":[price_LQTY_initial], \"MC_LQTY\":[0], \"annualized_earning\":[0]}\ndata = pd.DataFrame(initials)\ntroves= pd.DataFrame({\"Ether_Price\":[], \"Ether_Quantity\":[], \"CR_initial\":[], \n \"Supply\":[], \"Rational_inattention\":[], \"CR_current\":[]})\nresult_open = open_troves(troves, 0, data['Price_LUSD'][0])\ntroves = result_open[0]\nissuance_LUSD_open = result_open[2]\ndata.loc[0,'issuance_fee'] = issuance_LUSD_open * initials[\"Price_LUSD\"][0]\ndata.loc[0,'supply_LUSD'] = troves[\"Supply\"].sum()\ndata.loc[0,'liquidity'] = 0.5*troves[\"Supply\"].sum()\ndata.loc[0,'stability'] = 0.5*troves[\"Supply\"].sum()\n\n#Simulation Process\nfor index in range(1, n_sim):\n#exogenous ether price input\n price_ether_current = price_ether[index]\n troves['Ether_Price'] = price_ether_current\n price_LUSD_previous = data.loc[index-1,'Price_LUSD']\n price_LQTY_previous = data.loc[index-1,'price_LQTY']\n\n#trove liquidation & return of stability pool\n result_liquidation = liquidate_troves(troves, index, data)\n troves = result_liquidation[0]\n return_stability = result_liquidation[1]\n debt_liquidated = result_liquidation[2]\n ether_liquidated = result_liquidation[3]\n liquidation_gain = result_liquidation[4]\n airdrop_gain = result_liquidation[5]\n n_liquidate = result_liquidation[6]\n\n#close troves\n result_close = close_troves(troves, index, price_LUSD_previous)\n troves = result_close[0]\n n_close = result_close[1]\n #if n_close<0:\n # break\n\n#adjust troves\n result_adjustment = adjust_troves(troves, index)\n troves = result_adjustment[0]\n issuance_LUSD_adjust = result_adjustment[1]\n\n#open troves\n result_open = open_troves(troves, index, price_LUSD_previous)\n troves = result_open[0]\n n_open = result_open[1] \n issuance_LUSD_open = result_open[2]\n\n#Stability Pool\n stability_pool = stability_update(data.loc[index-1,'stability'], return_stability, index)[0]\n\n#Calculating Price, Liquidity Pool, and Redemption\n result_price = price_stabilizer(troves, index, data, stability_pool, n_open)\n price_LUSD_current = result_price[0]\n liquidity_pool = result_price[1]\n troves = result_price[2]\n issuance_LUSD_stabilizer = result_price[3]\n redemption_fee = result_price[4]\n n_redempt = result_price[5]\n redemption_pool = result_price[6]\n n_open=result_price[7]\n if liquidity_pool<0:\n break\n\n#LQTY Market\n result_LQTY = LQTY_market(index, data)\n price_LQTY_current = result_LQTY[0]\n annualized_earning = result_LQTY[1]\n MC_LQTY_current = result_LQTY[2]\n\n#Summary\n issuance_fee = price_LUSD_current * (issuance_LUSD_adjust + issuance_LUSD_open + issuance_LUSD_stabilizer)\n n_troves = troves.shape[0]\n supply_LUSD = troves['Supply'].sum()\n if index >= month:\n price_LQTY.append(price_LQTY_current)\n\n new_row = {\"Price_LUSD\":float(price_LUSD_current), \"Price_Ether\":float(price_ether_current), \"n_open\":float(n_open), \"n_close\":float(n_close), \n \"n_liquidate\":float(n_liquidate), \"n_redempt\": float(n_redempt), \"n_troves\":float(n_troves),\n \"stability\":float(stability_pool), \"liquidity\":float(liquidity_pool), \"redemption_pool\":float(redemption_pool), \"supply_LUSD\":float(supply_LUSD),\n \"issuance_fee\":float(issuance_fee), \"redemption_fee\":float(redemption_fee),\n \"airdrop_gain\":float(airdrop_gain), \"liquidation_gain\":float(liquidation_gain), \"return_stability\":float(return_stability), \n \"annualized_earning\":float(annualized_earning), \"MC_LQTY\":float(MC_LQTY_current), \"price_LQTY\":float(price_LQTY_current)\n }\n data = data.append(new_row, ignore_index=True)\n if price_LUSD_current < 0:\n break\n\n\"\"\"#**Exhibition**\"\"\"\n\ndata\n\ndef linevis(data, measure):\n fig = px.line(data, x=data.index/720, y=measure, title= measure+' dynamics')\n fig.show()\n\nfig = make_subplots(specs=[[{\"secondary_y\": True}]])\nfig.add_trace(\n go.Scatter(x=data.index/720, y=data['Price_LUSD'], name=\"LUSD Price\"),\n secondary_y=False,\n)\nfig.add_trace(\n go.Scatter(x=data.index/720, y=data['Price_Ether'], name=\"Ether Price\"),\n secondary_y=True,\n)\nfig.update_layout(\n title_text=\"Price Dynamics of LUSD and Ether\"\n)\nfig.update_xaxes(tick0=0, dtick=1, title_text=\"Month\")\nfig.update_yaxes(title_text=\"LUSD Price\", secondary_y=False)\nfig.update_yaxes(title_text=\"Ether Price\", secondary_y=True)\nfig.show()\n\nfig = make_subplots(specs=[[{\"secondary_y\": True}]])\nfig.add_trace(\n go.Scatter(x=data.index/720, y=data['n_troves'], name=\"Number of Troves\"),\n secondary_y=False,\n)\nfig.add_trace(\n go.Scatter(x=data.index/720, y=data['supply_LUSD'], name=\"LUSD Supply\"),\n secondary_y=True,\n)\nfig.update_layout(\n title_text=\"Dynamics of Trove Numbers and LUSD Supply\"\n)\nfig.update_xaxes(tick0=0, dtick=1, title_text=\"Month\")\nfig.update_yaxes(title_text=\"Number of Troves\", secondary_y=False)\nfig.update_yaxes(title_text=\"LUSD Supply\", secondary_y=True)\nfig.show()\n\nfig = make_subplots(rows=2, cols=1)\nfig.add_trace(\n go.Scatter(x=data.index/720, y=data['n_open'], name=\"Number of Troves Opened\", mode='markers'),\n row=1, col=1, secondary_y=False\n)\nfig.add_trace(\n go.Scatter(x=data.index/720, y=data['n_close'], name=\"Number of Troves Closed\", mode='markers'),\n row=2, col=1, secondary_y=False\n)\nfig.update_layout(\n title_text=\"Dynamics of Number of Troves Opened and Closed\"\n)\nfig.update_xaxes(tick0=0, dtick=1, title_text=\"Month\")\nfig.update_yaxes(title_text=\"Troves Opened\", row=1, col=1)\nfig.update_yaxes(title_text=\"Troves Closed\", row=2, col=1)\nfig.show()\n\nfig = make_subplots(specs=[[{\"secondary_y\": True}]])\nfig.add_trace(\n go.Scatter(x=data.index/720, y=data['n_liquidate'], name=\"Number of Liquidated Troves\", mode='markers'),\n secondary_y=False,\n)\nfig.add_trace(\n go.Scatter(x=data.index/720, y=data['n_redempt'], name=\"Number of Redempted Troves\", mode='markers'),\n secondary_y=False,\n)\nfig.update_layout(\n title_text=\"Dynamics of Number of Liquidated and Redempted Troves\"\n)\nfig.update_xaxes(tick0=0, dtick=1, title_text=\"Month\")\nfig.update_yaxes(title_text=\"Number of Liquidated Troves\", secondary_y=False)\nfig.update_yaxes(title_text=\"Number of Redempted Troves\", secondary_y=True)\nfig.show()\n\nfig = make_subplots(specs=[[{\"secondary_y\": True}]])\nfig.add_trace(\n go.Scatter(x=data.index/720, y=data['liquidity'], name=\"Liquidity Pool\"),\n secondary_y=False,\n)\nfig.add_trace(\n go.Scatter(x=data.index/720, y=data['stability'], name=\"Stability Pool\"),\n secondary_y=False,\n)\nfig.add_trace(\n go.Scatter(x=data.index/720, y=100*data['redemption_pool'], name=\"100*Redemption Pool\"),\n secondary_y=False,\n)\nfig.add_trace(\n go.Scatter(x=data.index/720, y=data['return_stability'], name=\"Return of Stability Pool\"),\n secondary_y=True,\n)\nfig.update_layout(\n title_text=\"Dynamics of Liquidity, Stability, Redemption Pools and Return of Stability Pool\"\n)\nfig.update_xaxes(tick0=0, dtick=1, title_text=\"Month\")\nfig.update_yaxes(title_text=\"Size of Pools\", secondary_y=False)\nfig.update_yaxes(title_text=\"Return\", secondary_y=True)\nfig.show()\n\nfig = make_subplots(specs=[[{\"secondary_y\": True}]])\nfig.add_trace(\n go.Scatter(x=data.index/720, y=data['airdrop_gain'], name=\"Airdrop Gain\"),\n secondary_y=False,\n)\nfig.add_trace(\n go.Scatter(x=data.index/720, y=data['liquidation_gain'], name=\"Liquidation Gain\"),\n secondary_y=True,\n)\nfig.update_layout(\n title_text=\"Dynamics of Airdrop and Liquidation Gain\"\n)\nfig.update_xaxes(tick0=0, dtick=1, title_text=\"Month\")\nfig.update_yaxes(title_text=\"Airdrop Gain\", secondary_y=False)\nfig.update_yaxes(title_text=\"Liquidation Gain\", secondary_y=True)\nfig.show()\n\nfig = make_subplots(specs=[[{\"secondary_y\": True}]])\nfig.add_trace(\n go.Scatter(x=data.index/720, y=data['issuance_fee'], name=\"Issuance Fee\"),\n secondary_y=False,\n)\nfig.add_trace(\n go.Scatter(x=data.index/720, y=data['redemption_fee'], name=\"Redemption Fee\"),\n secondary_y=True,\n)\nfig.update_layout(\n title_text=\"Dynamics of Issuance Fee and Redemption Fee\"\n)\nfig.update_xaxes(tick0=0, dtick=1, title_text=\"Month\")\nfig.update_yaxes(title_text=\"Issuance Fee\", secondary_y=False)\nfig.update_yaxes(title_text=\"Redemption Fee\", secondary_y=True)\nfig.show()\n\n#linevis(data, 'annualized_earning')\n\nfig = make_subplots(specs=[[{\"secondary_y\": True}]])\nfig.add_trace(\n go.Scatter(x=data.index/720, y=data['price_LQTY'], name=\"LQTY Price\"),\n secondary_y=False,\n)\nfig.add_trace(\n go.Scatter(x=data.index/720, y=data['MC_LQTY'], name=\"LQTY Market Cap\"),\n secondary_y=True,\n)\nfig.update_layout(\n title_text=\"Dynamics of the Price and Market Cap of LQTY\"\n)\nfig.update_xaxes(tick0=0, dtick=1, title_text=\"Month\")\nfig.update_yaxes(title_text=\"LQTY Price\", secondary_y=False)\nfig.update_yaxes(title_text=\"LQTY Market Cap\", secondary_y=True)\nfig.show()\n\ndef trove_histogram(measure):\n fig = px.histogram(troves, x=measure, title='Distribution of '+measure, nbins=25)\n fig.show()\n\ntroves\n\ntrove_histogram('Ether_Quantity')\ntrove_histogram('CR_initial')\ntrove_histogram('Supply')\ntrove_histogram('Rational_inattention')\ntrove_histogram('CR_current')\n\nimport matplotlib.pyplot as plt\nplt.plot(troves[\"Ether_Quantity\"])\nplt.show()\n\nplt.plot(troves[\"CR_initial\"])\nplt.show()\n\nplt.plot(troves[\"Supply\"])\nplt.show()\n\nplt.plot(troves[\"CR_current\"])\nplt.show()\n\ndata.describe()\n\n\"\"\"new policy function\n\nissuance fee = redemption fee = base rate\n\n#**Simulation with Policy Function**\n\"\"\"\n\n#Defining Initials\ninitials = {\"Price_LUSD\":[1.00], \"Price_Ether\":[price_ether_initial], \"n_open\":[initial_open], \"n_close\":[0], \"n_liquidate\": [0], \"n_redempt\":[0], \n \"n_troves\":[initial_open], \"stability\":[0], \"liquidity\":[0], \"redemption_pool\":[0],\n \"supply_LUSD\":[0], \"return_stability\":[initial_return], \"airdrop_gain\":[0], \"liquidation_gain\":[0], \"issuance_fee\":[0], \"redemption_fee\":[0],\n \"price_LQTY\":[price_LQTY_initial], \"MC_LQTY\":[0], \"annualized_earning\":[0], \"base_rate\":[base_rate_initial]}\ndata2 = pd.DataFrame(initials)\ntroves2= pd.DataFrame({\"Ether_Price\":[], \"Ether_Quantity\":[], \"CR_initial\":[], \n \"Supply\":[], \"Rational_inattention\":[], \"CR_current\":[]})\nresult_open = open_troves(troves2, 0, data2['Price_LUSD'][0])\ntroves2 = result_open[0]\nissuance_LUSD_open = result_open[2]\ndata2.loc[0,'issuance_fee'] = issuance_LUSD_open * initials[\"Price_LUSD\"][0]\ndata2.loc[0,'supply_LUSD'] = troves2[\"Supply\"].sum()\ndata2.loc[0,'liquidity'] = 0.5*troves2[\"Supply\"].sum()\ndata2.loc[0,'stability'] = 0.5*troves2[\"Supply\"].sum()\n\n#Simulation Process\nfor index in range(1, n_sim):\n#exogenous ether price input\n price_ether_current = price_ether[index]\n troves2['Ether_Price'] = price_ether_current\n price_LUSD_previous = data2.loc[index-1,'Price_LUSD']\n price_LQTY_previous = data2.loc[index-1,'price_LQTY']\n\n#policy function determines base rate\n base_rate_current = 0.98 * data2.loc[index-1,'base_rate'] + 0.5*(data2.loc[index-1,'redemption_pool']/troves2['Supply'].sum())\n rate_issuance = base_rate_current\n rate_redemption = base_rate_current\n\n#trove liquidation & return of stability pool\n result_liquidation = liquidate_troves(troves2, index, data2)\n troves2 = result_liquidation[0]\n return_stability = result_liquidation[1]\n debt_liquidated = result_liquidation[2]\n ether_liquidated = result_liquidation[3]\n liquidation_gain = result_liquidation[4]\n airdrop_gain = result_liquidation[5]\n n_liquidate = result_liquidation[6]\n\n#close troves\n result_close = close_troves(troves2, index, price_LUSD_previous)\n troves2 = result_close[0]\n n_close = result_close[1]\n #if n_close<0:\n # break\n\n#adjust troves\n result_adjustment = adjust_troves(troves2, index)\n troves2 = result_adjustment[0]\n issuance_LUSD_adjust = result_adjustment[1]\n\n#open troves\n result_open = open_troves(troves2, index, price_LUSD_previous)\n troves2 = result_open[0]\n n_open = result_open[1] \n issuance_LUSD_open = result_open[2]\n\n#Stability Pool\n stability_pool = stability_update(data2.loc[index-1,'stability'], return_stability, index)[0]\n\n#Calculating Price, Liquidity Pool, and Redemption\n result_price = price_stabilizer(troves2, index, data2, stability_pool, n_open)\n price_LUSD_current = result_price[0]\n liquidity_pool = result_price[1]\n troves2 = result_price[2]\n issuance_LUSD_stabilizer = result_price[3]\n redemption_fee = result_price[4]\n n_redempt = result_price[5]\n redemption_pool = result_price[6]\n n_open=result_price[7]\n if liquidity_pool<0:\n break\n\n#LQTY Market\n result_LQTY = LQTY_market(index, data2)\n price_LQTY_current = result_LQTY[0]\n annualized_earning = result_LQTY[1]\n MC_LQTY_current = result_LQTY[2]\n\n#Summary\n issuance_fee = price_LUSD_current * (issuance_LUSD_adjust + issuance_LUSD_open + issuance_LUSD_stabilizer)\n n_troves = troves2.shape[0]\n supply_LUSD = troves2['Supply'].sum()\n if index >= month:\n price_LQTY.append(price_LQTY_current)\n\n new_row = {\"Price_LUSD\":float(price_LUSD_current), \"Price_Ether\":float(price_ether_current), \"n_open\":float(n_open), \"n_close\":float(n_close), \n \"n_liquidate\":float(n_liquidate), \"n_redempt\": float(n_redempt), \"n_troves\":float(n_troves),\n \"stability\":float(stability_pool), \"liquidity\":float(liquidity_pool), \"redemption_pool\":float(redemption_pool), \"supply_LUSD\":float(supply_LUSD),\n \"issuance_fee\":float(issuance_fee), \"redemption_fee\":float(redemption_fee),\n \"airdrop_gain\":float(airdrop_gain), \"liquidation_gain\":float(liquidation_gain), \"return_stability\":float(return_stability), \n \"annualized_earning\":float(annualized_earning), \"MC_LQTY\":float(MC_LQTY_current), \"price_LQTY\":float(price_LQTY_current), \n \"base_rate\":float(base_rate_current)}\n data2 = data2.append(new_row, ignore_index=True)\n if price_LUSD_current < 0:\n break\n\ndata2\n\n\"\"\"#**Exhibition Part 2**\"\"\"\n\nfig = make_subplots(specs=[[{\"secondary_y\": True}]])\nfig.add_trace(\n go.Scatter(x=data.index/720, y=data['Price_LUSD'], name=\"LUSD Price\"),\n secondary_y=False,\n)\nfig.add_trace(\n go.Scatter(x=data.index/720, y=data['Price_Ether'], name=\"Ether Price\"),\n secondary_y=True,\n)\nfig.add_trace(\n go.Scatter(x=data2.index/720, y=data2['Price_LUSD'], name=\"LUSD Price New\", line = dict(dash='dot')),\n secondary_y=False,\n)\nfig.update_layout(\n title_text=\"Price Dynamics of LUSD and Ether\"\n)\nfig.update_xaxes(tick0=0, dtick=1, title_text=\"Month\")\nfig.update_yaxes(title_text=\"LUSD Price\", secondary_y=False)\nfig.update_yaxes(title_text=\"Ether Price\", secondary_y=True)\nfig.show()\n\nfig = make_subplots(specs=[[{\"secondary_y\": True}]])\nfig.add_trace(\n go.Scatter(x=data.index/720, y=data['n_troves'], name=\"Number of Troves\"),\n secondary_y=False,\n)\nfig.add_trace(\n go.Scatter(x=data.index/720, y=data['supply_LUSD'], name=\"LUSD Supply\"),\n secondary_y=True,\n)\nfig.add_trace(\n go.Scatter(x=data2.index/720, y=data2['n_troves'], name=\"Number of Troves New\", line = dict(dash='dot')),\n secondary_y=False,\n)\nfig.add_trace(\n go.Scatter(x=data2.index/720, y=data2['supply_LUSD'], name=\"LUSD Supply New\", line = dict(dash='dot')),\n secondary_y=True,\n)\nfig.update_layout(\n title_text=\"Dynamics of Trove Numbers and LUSD Supply\"\n)\nfig.update_xaxes(tick0=0, dtick=1, title_text=\"Month\")\nfig.update_yaxes(title_text=\"Number of Troves\", secondary_y=False)\nfig.update_yaxes(title_text=\"LUSD Supply\", secondary_y=True)\nfig.show()\n\nfig = make_subplots(rows=2, cols=2)\nfig.add_trace(\n go.Scatter(x=data.index/720, y=data['n_open'], name=\"Number of Troves Opened\", mode='markers'),\n row=1, col=1, secondary_y=False\n)\nfig.add_trace(\n go.Scatter(x=data.index/720, y=data['n_close'], name=\"Number of Troves Closed\", mode='markers'),\n row=2, col=1, secondary_y=False\n)\nfig.add_trace(\n go.Scatter(x=data2.index/720, y=data2['n_open'], name=\"Number of Troves Opened New\", mode='markers'),\n row=1, col=2, secondary_y=False\n)\nfig.add_trace(\n go.Scatter(x=data2.index/720, y=data2['n_close'], name=\"Number of Troves Closed New\", mode='markers'),\n row=2, col=2, secondary_y=False\n)\nfig.update_layout(\n title_text=\"Dynamics of Number of Troves Opened and Closed\"\n)\nfig.update_xaxes(tick0=0, dtick=1, title_text=\"Month\")\nfig.update_yaxes(title_text=\"Troves Opened\", row=1, col=1)\nfig.update_yaxes(title_text=\"Troves Closed\", row=2, col=1)\nfig.show()\n\nfig = make_subplots(rows=2, cols=1)\nfig.add_trace(\n go.Scatter(x=data.index/720, y=data['n_liquidate'], name=\"Number of Liquidated Troves\"),\n row=1, col=1, secondary_y=False\n)\nfig.add_trace(\n go.Scatter(x=data.index/720, y=data['n_redempt'], name=\"Number of Redempted Troves\"),\n row=2, col=1, secondary_y=False\n)\nfig.add_trace(\n go.Scatter(x=data2.index/720, y=data2['n_liquidate'], name=\"Number of Liquidated Troves New\", line = dict(dash='dot')),\n row=1, col=1, secondary_y=False\n)\nfig.add_trace(\n go.Scatter(x=data2.index/720, y=data2['n_redempt'], name=\"Number of Redempted Troves New\", line = dict(dash='dot')),\n row=2, col=1, secondary_y=False\n)\nfig.update_layout(\n title_text=\"Dynamics of Number of Liquidated and Redempted Troves\"\n)\nfig.update_xaxes(tick0=0, dtick=1, title_text=\"Month\")\nfig.update_yaxes(title_text=\"Troves Liquidated\", row=1, col=1)\nfig.update_yaxes(title_text=\"Troves Redempted\", row=2, col=1)\nfig.show()\n\nfig = make_subplots(specs=[[{\"secondary_y\": True}]])\nfig.add_trace(\n go.Scatter(x=data.index/720, y=data['liquidity'], name=\"Liquidity Pool\"),\n secondary_y=False,\n)\nfig.add_trace(\n go.Scatter(x=data.index/720, y=data['stability'], name=\"Stability Pool\"),\n secondary_y=False,\n)\nfig.add_trace(\n go.Scatter(x=data.index/720, y=100*data['redemption_pool'], name=\"100*Redemption Pool\"),\n secondary_y=False,\n)\nfig.add_trace(\n go.Scatter(x=data2.index/720, y=data2['liquidity'], name=\"Liquidity Pool New\", line = dict(dash='dot')),\n secondary_y=False,\n)\nfig.add_trace(\n go.Scatter(x=data2.index/720, y=data2['stability'], name=\"Stability Pool New\", line = dict(dash='dot')),\n secondary_y=False,\n)\nfig.add_trace(\n go.Scatter(x=data2.index/720, y=100*data2['redemption_pool'], name=\"100*Redemption Pool New\", line = dict(dash='dot')),\n secondary_y=False,\n)\nfig.update_layout(\n title_text=\"Dynamics of Liquidity, Stability, Redemption Pools and Return of Stability Pool\"\n)\nfig.update_xaxes(tick0=0, dtick=1, title_text=\"Month\")\nfig.update_yaxes(title_text=\"Size of Pools\", secondary_y=False)\nfig.show()\n\nfig = make_subplots(specs=[[{\"secondary_y\": True}]])\nfig.add_trace(\n go.Scatter(x=data.index/720, y=data['return_stability'], name=\"Return of Stability Pool\"),\n secondary_y=False,\n)\nfig.add_trace(\n go.Scatter(x=data2.index/720, y=data2['return_stability'], name=\"Return of Stability Pool New\", line = dict(dash='dot')),\n secondary_y=False,\n)\nfig.update_layout(\n title_text=\"Dynamics of Liquidity, Stability, Redemption Pools and Return of Stability Pool\"\n)\nfig.update_xaxes(tick0=0, dtick=1, title_text=\"Month\")\nfig.update_yaxes(title_text=\"Return\", secondary_y=False)\nfig.show()\n\nfig = make_subplots(specs=[[{\"secondary_y\": True}]])\nfig.add_trace(\n go.Scatter(x=data.index/720, y=data['airdrop_gain'], name=\"Airdrop Gain\"),\n secondary_y=False,\n)\nfig.add_trace(\n go.Scatter(x=data.index/720, y=data['liquidation_gain'], name=\"Liquidation Gain\"),\n secondary_y=True,\n)\nfig.add_trace(\n go.Scatter(x=data2.index/720, y=data2['airdrop_gain'], name=\"Airdrop Gain New\", line = dict(dash='dot')),\n secondary_y=False,\n)\nfig.add_trace(\n go.Scatter(x=data2.index/720, y=data2['liquidation_gain'], name=\"Liquidation Gain New\", line = dict(dash='dot')),\n secondary_y=True,\n)\nfig.update_layout(\n title_text=\"Dynamics of Airdrop and Liquidation Gain\"\n)\nfig.update_xaxes(tick0=0, dtick=1, title_text=\"Month\")\nfig.update_yaxes(title_text=\"Airdrop Gain\", secondary_y=False)\nfig.update_yaxes(title_text=\"Liquidation Gain\", secondary_y=True)\nfig.show()\n\nfig = make_subplots(rows=2, cols=1)\nfig.add_trace(\n go.Scatter(x=data.index/720, y=data['issuance_fee'], name=\"Issuance Fee\"),\n row=1, col=1\n)\nfig.add_trace(\n go.Scatter(x=data.index/720, y=data['redemption_fee'], name=\"Redemption Fee\"),\n row=2, col=1\n)\nfig.add_trace(\n go.Scatter(x=data2.index/720, y=data2['issuance_fee'], name=\"Issuance Fee New\", line = dict(dash='dot')),\n row=1, col=1\n)\nfig.add_trace(\n go.Scatter(x=data2.index/720, y=data2['redemption_fee'], name=\"Redemption Fee New\", line = dict(dash='dot')),\n row=2, col=1\n)\nfig.update_layout(\n title_text=\"Dynamics of Issuance Fee and Redemption Fee\"\n)\nfig.update_xaxes(tick0=0, dtick=1, title_text=\"Month\")\nfig.update_yaxes(title_text=\"Issuance Fee\", secondary_y=False, row=1, col=1)\nfig.update_yaxes(title_text=\"Redemption Fee\", secondary_y=False, row=2, col=1)\nfig.show()\n\nfig = make_subplots(specs=[[{\"secondary_y\": True}]])\nfig.add_trace(\n go.Scatter(x=data.index/720, y=data['annualized_earning'], name=\"Annualized Earning\"),\n secondary_y=False,\n)\nfig.add_trace(\n go.Scatter(x=data2.index/720, y=data2['annualized_earning'], name=\"Annualized Earning New\", line = dict(dash='dot')),\n secondary_y=False,\n)\nfig.update_layout(\n title_text=\"Dynamics of Annualized Earning\"\n)\nfig.update_xaxes(tick0=0, dtick=1, title_text=\"Month\")\nfig.update_yaxes(title_text=\"Annualized Earning\", secondary_y=False)\nfig.show()\n\nfig = make_subplots(specs=[[{\"secondary_y\": True}]])\nfig.add_trace(\n go.Scatter(x=data.index/720, y=data['price_LQTY'], name=\"LQTY Price\"),\n secondary_y=False,\n)\nfig.add_trace(\n go.Scatter(x=data.index/720, y=data['MC_LQTY'], name=\"LQTY Market Cap\"),\n secondary_y=True,\n)\nfig.add_trace(\n go.Scatter(x=data2.index/720, y=data2['price_LQTY'], name=\"LQTY Price New\", line = dict(dash='dot')),\n secondary_y=False,\n)\nfig.add_trace(\n go.Scatter(x=data2.index/720, y=data2['MC_LQTY'], name=\"LQTY Market Cap New\", line = dict(dash='dot')),\n secondary_y=True,\n)\nfig.update_layout(\n title_text=\"Dynamics of the Price and Market Cap of LQTY\"\n)\nfig.update_xaxes(tick0=0, dtick=1, title_text=\"Month\")\nfig.update_yaxes(title_text=\"LQTY Price\", secondary_y=False)\nfig.update_yaxes(title_text=\"LQTY Market Cap\", secondary_y=True)\nfig.show()\n\nfig = make_subplots(specs=[[{\"secondary_y\": True}]])\nfig.add_trace(\n go.Scatter(x=data.index/720, y=[0.01] * n_sim, name=\"Base Rate\"),\n secondary_y=False,\n)\nfig.add_trace(\n go.Scatter(x=data2.index/720, y=data2['base_rate'], name=\"Base Rate New\"),\n secondary_y=False,\n)\nfig.update_layout(\n title_text=\"Dynamics of Issuance Fee and Redemption Fee\"\n)\nfig.update_xaxes(tick0=0, dtick=1, title_text=\"Month\")\nfig.update_yaxes(title_text=\"Issuance Fee\", secondary_y=False)\nfig.update_yaxes(title_text=\"Redemption Fee\", secondary_y=True)\nfig.show()\n\ndef trove2_histogram(measure):\n fig = px.histogram(troves2, x=measure, title='Distribution of '+measure, nbins=25)\n fig.show()\n\ntrove2_histogram('Ether_Quantity')\ntrove2_histogram('CR_initial')\ntrove2_histogram('Supply')\ntrove2_histogram('Rational_inattention')\ntrove2_histogram('CR_current')" ]
[ [ "numpy.random.normal", "numpy.random.seed", "pandas.DataFrame", "numpy.random.gamma", "matplotlib.pyplot.plot", "numpy.random.chisquare", "numpy.random.uniform", "matplotlib.pyplot.show" ] ]
onkarsabnis/realtime-facial-emotion-analyzer
[ "ac74f94e8caa211b615413819ca5a7bcaf0aa554", "ac74f94e8caa211b615413819ca5a7bcaf0aa554" ]
[ "tests/test_face_detection_opencv.py", "training/data_prep.py" ]
[ "# ---- coding: utf-8 ----\n# ===================================================\n# Author: Susanta Biswas\n# ===================================================\n\"\"\"Description: Tests for opencv face detector.\"\"\"\n# ===================================================\n\nfrom emotion_analyzer.exceptions import ModelFileMissing, InvalidImage\nfrom emotion_analyzer.face_detection_opencv import FaceDetectorOpenCV\nimport pytest \nimport numpy as np\n\ndef test_invalid_image():\n model_loc = \"./models\"\n ob = FaceDetectorOpenCV(model_loc=model_loc)\n img = np.zeros((100,100,5), dtype='float32')\n\n with pytest.raises(InvalidImage):\n ob.detect_faces(img)\n\n\ndef test_bbox_outside_img():\n model_loc = \"./models\"\n ob = FaceDetectorOpenCV(model_loc=model_loc)\n \n assert ob.is_valid_bbox([0, 0, 100, 100], 10, 10) == False\n\n\ndef test_correct_model_path():\n \"\"\"\n Test object init with the correct model path\n \"\"\"\n ob = None\n model_loc = \"./models\"\n try:\n ob = FaceDetectorOpenCV(model_loc=model_loc)\n except Exception:\n pass\n finally:\n assert isinstance(ob, FaceDetectorOpenCV)\n\n\ndef test_incorrect_model_path():\n \"\"\"\n Test object init with the incorrect model path\n \"\"\"\n inccorrect_model_loc = \"./wrong_models\"\n with pytest.raises(ModelFileMissing):\n _ = FaceDetectorOpenCV(model_loc=inccorrect_model_loc)\n\n\ndef test_detect_face(img2_data, img2_facebox_opencv):\n model_loc = \"./models\"\n ob = FaceDetectorOpenCV(model_loc=model_loc)\n assert img2_facebox_opencv == ob.detect_faces(img2_data)\n", "import cv2\nimport numpy as np\n\ndef load_images(start_idx, end_idx, base_path):\n # training images\n images = []\n for name in range(start_idx, end_idx):\n img = cv2.imread(base_path + str(name) + '.jpg', 0)\n if img is not None:\n images.append(img)\n\n return images\n\n# read images from folder\ndef load_images_folder():\n # training images\n images_train = load_images(0, 28710, 'output/Training/')\n # validation images\n images_cv = load_images(28710, 32299, 'output/PublicTest/')\n # test images\n images_test = load_images(32299, 35888, 'output/PrivateTest/')\n \n return images_train, images_cv, images_test\n \n\n# load the images\nimages_train, images_cv, images_test = load_images_folder()\n\n# change to numpy matrix\nimages_train = np.array(images_train)\nimages_cv = np.array(images_cv)\nimages_test = np.array(images_test)\n\n# save the numpy matrix\nnp.save('dataset/train_raw.npy', images_train)\nnp.save('dataset/cv_raw.npy', images_cv)\nnp.save('dataset/test_raw.npy', images_test)\n" ]
[ [ "numpy.zeros" ], [ "numpy.array", "numpy.save" ] ]
alexis-roche/nireg
[ "6ed32f2830ff6ebc1860519dc630ebdf8e969dcf" ]
[ "nireg/slicetiming/timefuncs.py" ]
[ "\"\"\" Utility functions for returning slice times from number of slices and TR\n\nSlice timing routines in nipy need a vector of slice times.\n\nSlice times are vectors $t_i i = 0 ... N$ of times, one for each slice, where\n$t_i% gives the time at which slice number $i$ was acquired, relative to the\nbeginning of the volume acquisition.\n\nWe like these vectors because they are unambiguous; the indices $i$ refer to\npositions in space, and the values $t_i$ refer to times.\n\nBut, there are many common slice timing regimes for which it's easy to get the\nslice times once you know the volume acquisition time (the TR) and the number of\nslices.\n\nFor example, if you acquired the slices in a simple ascending order, and you\nhave 10 slices and the TR was 2.0, then the slice times are:\n\n>>> import numpy as np\n>>> np.arange(10) / 10. * 2.0\narray([ 0. , 0.2, 0.4, 0.6, 0.8, 1. , 1.2, 1.4, 1.6, 1.8])\n\nThese are small convenience functions that accept the number of slices and the\nTR as input, and return a vector of slice times:\n\n>>> ascending(10, 2.)\narray([ 0. , 0.2, 0.4, 0.6, 0.8, 1. , 1.2, 1.4, 1.6, 1.8])\n\"\"\"\nfrom __future__ import division, print_function, absolute_import\n\nimport numpy as np\n\n# Dictionary (key, value) == (name, function) for slice timing functions\nSLICETIME_FUNCTIONS = {}\n\ndef _dec_filldoc(func):\n \"\"\" Fill docstring of slice time function\n \"\"\"\n func._doc_template = func.__doc__\n func.__doc__ = func.__doc__.format(\n **dict(\n name = func.__name__,\n pstr=\n\"\"\"Note: slice 0 is the first slice in the voxel data block\n\n Parameters\n ----------\n n_slices : int\n Number of slices in volume\n TR : float\n Time to acquire one full volume\n\n Returns\n -------\n slice_times : (n_slices,) ndarray\n Vectors $t_i i = 0 ... N$ of times, one for each slice, where $t_i$\n gives the time at which slice number $i$ was acquired, relative to the\n beginning of the volume acquisition.\n \"\"\"))\n return func\n\n\ndef _dec_register_stf(func):\n \"\"\" Register slice time function in module dictionary \"\"\"\n name = func.__name__\n SLICETIME_FUNCTIONS[name] = func\n if name.startswith('st_'):\n short_name = name[3:]\n if short_name in SLICETIME_FUNCTIONS:\n raise ValueError(\n \"Duplicate short / long function name {0}\".format(short_name))\n SLICETIME_FUNCTIONS[short_name] = func\n return func\n\n\ndef _dec_stfunc(func):\n return _dec_register_stf(_dec_filldoc(func))\n\n\ndef _derived_func(name, func):\n def derived(n_slices, TR):\n return func(n_slices, TR)\n derived.__name__ = name\n derived.__doc__ = func._doc_template\n return _dec_stfunc(derived)\n\n\n@_dec_stfunc\ndef st_01234(n_slices, TR):\n \"\"\" Simple ascending slice sequence\n\n slice 0 first, slice 1 second etc.\n\n For example, for 5 slices and a TR of 1:\n\n >>> {name}(5, 1.)\n array([ 0. , 0.2, 0.4, 0.6, 0.8])\n\n {pstr}\n \"\"\"\n return np.arange(n_slices) / n_slices * TR\n\nascending = _derived_func('ascending', st_01234)\n\n\n@_dec_stfunc\ndef st_43210(n_slices, TR):\n \"\"\" Simple descending slice sequence\n\n slice ``n_slices-1`` first, slice ``n_slices - 2`` second etc.\n\n For example, for 5 slices and a TR of 1:\n\n >>> {name}(5, 1.)\n array([ 0.8, 0.6, 0.4, 0.2, 0. ])\n\n {pstr}\n \"\"\"\n return np.arange(n_slices)[::-1] / n_slices * TR\n\ndescending = _derived_func('descending', st_43210)\n\n\n@_dec_stfunc\ndef st_02413(n_slices, TR):\n \"\"\"Ascend alternate every second slice, starting at first slice\n\n Collect slice 0 first, slice 2 second up to top. Then return to collect\n slice 1, slice 3 etc.\n\n For example, for 5 slices and a TR of 1:\n\n >>> {name}(5, 1.)\n array([ 0. , 0.6, 0.2, 0.8, 0.4])\n\n {pstr}\n \"\"\"\n one_slice = TR / n_slices\n time_to_space = list(range(0, n_slices, 2)) + list(range(1, n_slices, 2))\n space_to_time = np.argsort(time_to_space)\n return space_to_time * one_slice\n\nasc_alt_2 = _derived_func('asc_alt_2', st_02413)\n\n\n@_dec_stfunc\ndef st_13024(n_slices, TR):\n \"\"\"Ascend alternate every second slice, starting at second slice\n\n Collect slice 1 first, slice 3 second up to top (highest numbered slice).\n Then return to collect slice 0, slice 2 etc. This order is rare except on\n Siemens acquisitions with an even number of slices. See\n :func:`st_odd0_even1` for this logic.\n\n For example, for 5 slices and a TR of 1:\n\n >>> {name}(5, 1.)\n array([ 0.4, 0. , 0.6, 0.2, 0.8])\n\n {pstr}\n \"\"\"\n one_slice = TR / n_slices\n time_to_space = list(range(1, n_slices, 2)) + list(range(0, n_slices, 2))\n space_to_time = np.argsort(time_to_space)\n return space_to_time * one_slice\n\nasc_alt_2_1 = _derived_func('asc_alt_2_1', st_13024)\n\n\n@_dec_stfunc\ndef st_42031(n_slices, TR):\n \"\"\"Descend alternate every second slice, starting at last slice\n\n Collect slice (`n_slices` - 1) first, slice (`nslices` - 3) second down to\n bottom (lowest numbered slice). Then return to collect slice (`n_slices`\n -2), slice (`n_slices` - 4) etc.\n\n For example, for 5 slices and a TR of 1:\n\n >>> {name}(5, 1.)\n array([ 0.4, 0.8, 0.2, 0.6, 0. ])\n\n {pstr}\n \"\"\"\n return st_02413(n_slices, TR)[::-1]\n\ndesc_alt_2 = _derived_func('desc_alt_2', st_42031)\n\n\n@_dec_stfunc\ndef st_odd0_even1(n_slices, TR):\n \"\"\"Ascend alternate starting at slice 0 for odd, slice 1 for even `n_slices`\n\n Acquisitions with alternating ascending slices from Siemens scanners often\n seem to have this behavior as default - see:\n https://mri.radiology.uiowa.edu/fmri_images.html\n\n This means we use the :func:`st_02413` algorithm if `n_slices` is odd,\n and the :func:`st_13024` algorithm if `n_slices is even.\n\n For example, for 4 slices and a TR of 1:\n\n >>> {name}(4, 1.)\n array([ 0.5 , 0. , 0.75, 0.25])\n\n 5 slices and a TR of 1:\n\n >>> {name}(5, 1.)\n array([ 0. , 0.6, 0.2, 0.8, 0.4])\n\n {pstr}\n \"\"\"\n if n_slices % 2 == 0:\n return st_13024(n_slices, TR)\n return st_02413(n_slices, TR)\n\nasc_alt_siemens = _derived_func('asc_alt_siemens', st_odd0_even1)\n\n\n@_dec_stfunc\ndef st_03142(n_slices, TR):\n \"\"\"Ascend alternate, where alternation is by half the volume\n\n Collect slice 0 then slice ``ceil(n_slices / 2.)`` then slice 1 then slice\n ``ceil(nslices / 2.) + 1`` etc.\n\n For example, for 5 slices and a TR of 1:\n\n >>> {name}(5, 1.)\n array([ 0. , 0.4, 0.8, 0.2, 0.6])\n\n {pstr}\n \"\"\"\n one_slice = TR / n_slices\n space_to_time = (list(range(0, n_slices, 2)) +\n list(range(1, n_slices, 2)))\n return np.array(space_to_time) * one_slice\n\nasc_alt_half = _derived_func('asc_alt_half', st_03142)\n\n\n@_dec_stfunc\ndef st_41302(n_slices, TR):\n \"\"\"Descend alternate, where alternation is by half the volume\n\n Collect slice (n_slices - 1) then slice ``floor(nslices / 2.) - 1`` then slice\n (n_slices - 2) then slice ``floor(nslices / 2.) - 2`` etc.\n\n For example, for 5 slices and a TR of 1:\n\n >>> {name}(5, 1.)\n array([ 0.6, 0.2, 0.8, 0.4, 0. ])\n\n {pstr}\n \"\"\"\n return st_03142(n_slices, TR)[::-1]\n\ndesc_alt_half = _derived_func('desc_alt_half', st_41302)\n" ]
[ [ "numpy.array", "numpy.arange", "numpy.argsort" ] ]
Exhor/dfstore
[ "c314adbee048c24fdcb17ed975f4ed462061fc21" ]
[ "tests/test_api.py" ]
[ "import numpy as np\nimport pandas as pd\nfrom fastapi.testclient import TestClient\n\nfrom src.client import DFStoreClient\nfrom src.server import make_app\nfrom tests.e2e import e2e\n\n\ndef make_awkward_data(nrows=100, ncols=5):\n df = pd.DataFrame(\n {\n \"const\": [1] * nrows,\n \"index\": range(nrows),\n \"float_with_nans\": [np.nan] + [1] * (nrows - 2) + [np.nan],\n \"text\": (list(\"abbccc\") * nrows)[:nrows],\n \"text_with_nans\": ([\"a\", np.nan] * nrows)[:nrows],\n }\n ).set_index(\"index\")\n for c in range(df.shape[1], ncols):\n df[f\"rnd_{c}\"] = np.random.randn(nrows)\n return df\n\n\ndef test_upload_and_download_awkward_data_returns_original_data_with_text_and_nans():\n app = make_app()\n client = DFStoreClient(httpclient=TestClient(app), base_url=\"\")\n df = make_awkward_data(nrows=5)\n filename = \"upload.csv\"\n\n client.upload_dataframe(df=df, name=filename)\n reloaded_df = client.get(name=filename)\n\n expected = df.reset_index()\n pd.testing.assert_frame_equal(reloaded_df, expected)\n\n\ndef test_e2e():\n app = make_app()\n client = TestClient(app)\n url = \"\"\n e2e(httpclient=client, url=url)\n" ]
[ [ "pandas.testing.assert_frame_equal", "numpy.random.randn" ] ]
aircov/-
[ "2e306b846dcdd3c57f1dba4493dc3d2babb036a7" ]
[ "data_nlp/kears_ner.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\n@time : 2020/07/18 10:45\n@author : 姚明伟\n\"\"\"\nimport pickle\nimport numpy as np\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras import Sequential\nfrom keras_contrib.layers import CRF\nimport pickle\nfrom keras.layers import Embedding, Bidirectional, LSTM\n\n\nclass Data_set:\n # 定义Dataset类,封装一些数据读入和预处理方法。\n def __init__(self, data_path, labels):\n with open(data_path, \"rb\") as f:\n self.data = f.read().decode(\"utf-8\")\n self.process_data = self.process_data()\n self.labels = labels\n\n def process_data(self):\n train_data = self.data.split(\"\\n\\n\")\n train_data = [token.split(\"\\n\") for token in train_data]\n train_data = [[j.split() for j in i] for i in train_data]\n train_data.pop()\n return train_data\n\n def save_vocab(self, save_path):\n all_char = [char[0] for sen in self.process_data for char in sen]\n chars = set(all_char)\n word2id = {char: id_ + 1 for id_, char in enumerate(chars)}\n word2id[\"unk\"] = 0\n with open(save_path, \"wb\") as f:\n pickle.dump(word2id, f)\n return word2id\n\n def generate_data(self, vocab, maxlen):\n char_data_sen = [[token[0] for token in i] for i in self.process_data]\n label_sen = [[token[1] for token in i] for i in self.process_data]\n sen2id = [[vocab.get(char, 0) for char in sen] for sen in char_data_sen]\n label2id = {label: id_ for id_, label in enumerate(self.labels)}\n lab_sen2id = [[label2id.get(lab, 0) for lab in sen] for sen in label_sen]\n sen_pad = pad_sequences(sen2id, maxlen)\n lab_pad = pad_sequences(lab_sen2id, maxlen, value=-1)\n lab_pad = np.expand_dims(lab_pad, 2)\n return sen_pad, lab_pad" ]
[ [ "numpy.expand_dims" ] ]
ManiBlitz/URA-text-analysis
[ "839eba038fa36782200905a43d710f51d1761b7a" ]
[ "parser/kmeansClusters.py" ]
[ "from copy import deepcopy\nimport numpy as np\nfrom matplotlib import pyplot as plt\nplt.rcParams['figure.figsize'] = (16, 9)\nplt.style.use('ggplot')\n\n# Euclidean Distance Caculator\ndef dist(a, b, ax=1):\n return np.linalg.norm(a - b, axis=ax)\n\n# Define the number of clusters and provide an array with the different centers\ndef get_clusters(X,n=3):\n # Number of clusters\n print(X)\n k = n\n # X coordinates of random centroids\n C_x = np.random.randint(0, 30, size=k)\n # Y coordinates of random centroids\n C_y = np.random.randint(0, 30, size=k)\n C = np.array(list(zip(C_x, C_y)), dtype=np.float32)\n C_final = update_clusters(k,X,C,error_range=0.02)\n return C_final\n\ndef update_clusters(k, X, C, error_range = 0.01):\n\n # To store the value of centroids when it updates\n C_old = np.zeros(C.shape)\n # Cluster Lables(0, 1, 2)\n clusters = np.zeros(len(X))\n # Error func. - Distance between new centroids and old centroids\n error = dist(C, C_old, None)\n cluster_iter = 1\n # Loop will run till the error becomes zero\n while error > error_range:\n # Assigning each value to its closest cluster\n for i in range(len(X)):\n distances = dist(X[i], C)\n cluster = np.argmin(distances)\n clusters[i] = cluster\n # Storing the old centroid values\n C_old = deepcopy(C)\n # Finding the new centroids by taking the average value\n for i in range(k):\n points = [X[j] for j in range(len(X)) if clusters[j] == i]\n C[i] = np.mean(points, axis=0)\n error = dist(C, C_old, None)\n print(\"======> Cluster Iteration \"+str(cluster_iter)+\"\\n---------------------------\")\n print(C)\n print(\"error = \"+str(error))\n cluster_iter += 1\n return {'C':C,'clusters':clusters}\n\n" ]
[ [ "numpy.linalg.norm", "numpy.zeros", "numpy.argmin", "numpy.mean", "numpy.random.randint", "matplotlib.pyplot.style.use" ] ]
hvanwyk/drifter
[ "a08df0cef81bc6ca76084ae8cac089644e2bd56b", "a08df0cef81bc6ca76084ae8cac089644e2bd56b" ]
[ "tests/test_mesh/test_cell.py", "tests/test_gmrf/test_spdmatrix.py" ]
[ "from mesh import Cell, HalfEdge, Vertex\nfrom mesh import convert_to_array\nfrom assembler import GaussRule\nimport numpy as np\nimport unittest\n\n\nclass TestCell(unittest.TestCase):\n \"\"\"\n Test Cell object(s).\n \"\"\"\n def test_constructor(self):\n \"\"\"\n Constructor\n \"\"\"\n #\n # Triangle\n # \n v1 = Vertex((0,0))\n v2 = Vertex((1,0))\n v3 = Vertex((0,1))\n \n h12 = HalfEdge(v1, v2)\n h23 = HalfEdge(v2, v3)\n h31 = HalfEdge(v3, v1)\n \n # Vertices not in order\n bad_list_1 = [h12, h31, h23]\n self.assertRaises(Exception, Cell, *[bad_list_1])\n \n # Not a closed loop\n bad_list_2 = [h12, h23]\n self.assertRaises(Exception, Cell, *[bad_list_2])\n \n triangle_half_edges = [h12, h23, h31]\n cell = Cell(triangle_half_edges)\n self.assertAlmostEqual(cell.area(),0.5)\n self.assertEqual(cell.n_vertices(),3)\n self.assertEqual(cell.n_half_edges(),3)\n half_edge = cell.get_half_edge(0)\n for i in range(3):\n self.assertEqual(half_edge.next(), triangle_half_edges[(i+1)%3])\n half_edge = half_edge.next()\n \n #\n # Square \n # \n v4 = Vertex((1,1))\n h24 = HalfEdge(v2,v4)\n h43 = HalfEdge(v4,v3)\n square_half_edges = [h12, h24, h43, h31]\n cell = Cell(square_half_edges)\n self.assertAlmostEqual(cell.area(),1)\n self.assertEqual(cell.n_vertices(),4)\n self.assertEqual(cell.n_half_edges(),4)\n \n \n def test_get_half_edge(self):\n #\n # Construct Cell\n # \n v1 = Vertex((0,0))\n v2 = Vertex((1,0))\n v3 = Vertex((0,1))\n \n h12 = HalfEdge(v1, v2)\n h23 = HalfEdge(v2, v3)\n h31 = HalfEdge(v3, v1)\n \n # Check whether you get the right he's back\n hes = [h12, h23, h31]\n cell = Cell(hes)\n for i in range(3):\n self.assertEqual(cell.get_half_edge(i), hes[i])\n \n \n def test_get_half_edges(self):\n #\n # Construct Cell\n # \n v1 = Vertex((0,0))\n v2 = Vertex((1,0))\n v3 = Vertex((0,1))\n \n h12 = HalfEdge(v1, v2)\n h23 = HalfEdge(v2, v3)\n h31 = HalfEdge(v3, v1)\n \n # Check whether you get the right he's back\n hes = [h12, h23, h31]\n cell = Cell(hes)\n self.assertEqual(cell.get_half_edges(), hes)\n \n \n \n def test_get_vertex(self):\n #\n # Construct Cell\n # \n v1 = Vertex((0,0))\n v2 = Vertex((1,0))\n v3 = Vertex((0,1))\n \n h12 = HalfEdge(v1, v2)\n h23 = HalfEdge(v2, v3)\n h31 = HalfEdge(v3, v1)\n \n # Check whether you get the right he's back\n vs = [v1, v2, v3]\n cell = Cell([h12,h23,h31])\n for i in range(3):\n self.assertEqual(cell.get_vertex(i), vs[i])\n \n \n def test_get_vertices(self):\n #\n # Construct Cell\n # \n v1 = Vertex((0,0))\n v2 = Vertex((1,0))\n v3 = Vertex((0,1))\n \n h12 = HalfEdge(v1, v2)\n h23 = HalfEdge(v2, v3)\n h31 = HalfEdge(v3, v1)\n \n # Check whether you get the right he's back\n vs = [v1, v2, v3]\n cell = Cell([h12,h23,h31])\n self.assertEqual(cell.get_vertices(), vs)\n \n \n def test_contains_points(self):\n #\n # Triangle\n # \n v1 = Vertex((0,0))\n v2 = Vertex((1,0))\n v3 = Vertex((0,1))\n \n h12 = HalfEdge(v1, v2)\n h23 = HalfEdge(v2, v3)\n h31 = HalfEdge(v3, v1)\n \n cell = Cell([h12, h23, h31])\n \n # Vertices\n in_cell = cell.contains_points([v1,v2,v3])\n in_cell_ref = np.ones(3, dtype=np.bool)\n for i in range(3):\n self.assertEqual(in_cell_ref[i], in_cell[i])\n \n # Random points\n points = np.random.rand(100,2)\n in_cell_ref = (points[:,1]<1-points[:,0])\n in_cell = cell.contains_points(points)\n for i in range(100):\n self.assertEqual(in_cell_ref[i], in_cell[i])\n \n #\n # Square \n # \n v4 = Vertex((1,1))\n h24 = HalfEdge(v2,v4)\n h43 = HalfEdge(v4,v3)\n square_half_edges = [h12, h24, h43, h31]\n cell = Cell(square_half_edges)\n \n points = [(2,0), (-1,0), (0.5,0.5)]\n in_cell_ref = np.array([0,0,1], dtype=np.bool)\n in_cell = cell.contains_points(points)\n for i in range(3):\n self.assertEqual(in_cell_ref[i], in_cell[i])\n \n #\n # Single points\n # \n # Vertex \n point = Vertex((3,3))\n self.assertFalse(cell.contains_points(point))\n # Tuple\n point = (1,0)\n self.assertTrue(cell.contains_points(point)) \n # Array\n point = np.array([1,0])\n self.assertTrue(cell.contains_points(point))\n \n \n def test_intersects_line_segment(self):\n vertices = [Vertex((0,0)), Vertex((3,1)), \n Vertex((2,3)), Vertex((-1,1))]\n \n h_edges = []\n for i in range(4):\n h_edges.append(HalfEdge(vertices[i], vertices[(i+1)%4]))\n cell = Cell(h_edges)\n \n #\n # Line beginning in cell and ending outside\n # \n line_1 = [(1,1),(3,0)]\n self.assertTrue(cell.intersects_line_segment(line_1),\\\n 'Cell should intersect line segment.')\n #\n # Line inside cell\n #\n line_2 = [(1,1),(1.1,1.1)]\n self.assertTrue(cell.intersects_line_segment(line_2),\\\n 'Cell contains line segment.')\n #\n # Line outside cell\n # \n line_3 = [(3,0),(5,6)]\n self.assertFalse(cell.intersects_line_segment(line_3),\\\n 'Cell does not intersect line segment.')\n \n def test_incident_half_edge(self):\n #\n # Triangle\n # \n v1 = Vertex((0,0))\n v2 = Vertex((1,0))\n v3 = Vertex((0,1))\n \n h12 = HalfEdge(v1, v2)\n h23 = HalfEdge(v2, v3)\n h31 = HalfEdge(v3, v1)\n \n cell = Cell([h12, h23, h31])\n \n hes_forward = [h31, h12, h23]\n hes_reverse = [h12, h23, h31]\n vs = [v1,v2,v3]\n for i in range(3):\n # forward\n self.assertEqual(cell.incident_half_edge(vs[i]),hes_forward[i])\n \n # backward\n self.assertEqual(cell.incident_half_edge(vs[i], reverse=True),\\\n hes_reverse[i])\n \n \n def test_get_neighbors(self):\n #\n # HalfEdge pivot\n # \n \n #\n # Cell with no neighbors\n # \n v1 = Vertex((0,0))\n v2 = Vertex((1,0))\n v3 = Vertex((0,1))\n \n h12 = HalfEdge(v1, v2)\n h23 = HalfEdge(v2, v3)\n h31 = HalfEdge(v3, v1)\n #\n # Make triangle\n # \n cell = Cell([h12, h23, h31])\n # No neighbors\n self.assertIsNone(cell.get_neighbors(h12))\n self.assertEqual(cell.get_neighbors(v1),[])\n \n # Add a new neighboring triangle\n v4 = Vertex((1,1))\n h24 = HalfEdge(v2, v4)\n h43 = HalfEdge(v4 ,v3)\n h32 = h23.make_twin()\n \n ncell_1 = Cell([h24, h43, h32])\n \n # Make sure they are neighbors wrt halfedge\n self.assertEqual(cell.get_neighbors(h23),ncell_1)\n \n # Neighbors wrt vertices\n self.assertEqual(cell.get_neighbors(v2),[ncell_1])\n self.assertEqual(cell.get_neighbors(v3),[ncell_1])\n \n #\n # Add a third neighboring triangle\n #\n v5 = Vertex((1,2))\n h34 = h43.make_twin()\n h45 = HalfEdge(v4, v5)\n h53 = HalfEdge(v5, v3)\n \n ncell_2 = Cell([h34, h45, h53])\n \n # Check if it's a neighbor wrt halfedge\n self.assertEqual(ncell_1.get_neighbors(h43), ncell_2)\n \n # 2 Neighbors wrt v3 \n self.assertEqual(cell.get_neighbors(v3),[ncell_1, ncell_2])\n self.assertEqual(ncell_1.get_neighbors(v3), [ncell_2, cell])\n self.assertEqual(ncell_2.get_neighbors(v3), [cell, ncell_1])\n \n #\n # Split h31 and make an illegal neighbor\n #\n v6 = Vertex((-1,0.5))\n \n h31.split()\n h331 = h31.get_child(0)\n \n h133 = h331.make_twin()\n h36 = HalfEdge(v3, v6)\n h613 = HalfEdge(v6, h133.base())\n \n ncell_3 = Cell([h133, h36, h613])\n \n # No neighbors wrt shared edges\n self.assertIsNone(cell.get_neighbors(h31))\n self.assertIsNone(ncell_3.get_neighbors(h133))\n \n # Neighbors wrt vertices remain as they are.\n self.assertEqual(cell.get_neighbors(v3),[ncell_1, ncell_2])\n self.assertEqual(ncell_1.get_neighbors(v3), [ncell_2, cell])\n self.assertEqual(ncell_2.get_neighbors(v3), [cell, ncell_1])\n self.assertEqual(ncell_3.get_neighbors(v3), [])\n \n \nif __name__ == \"__main__\":\n #import sys;sys.argv = ['', 'Test.testNode']\n unittest.main()", "import unittest\nfrom gmrf import SPDMatrix\nfrom gmrf import diagonal_inverse\nfrom gmrf import modchol_ldlt \nimport numpy as np\nimport scipy.linalg as linalg\nimport scipy.sparse as sp\nfrom sklearn.datasets import make_sparse_spd_matrix\n\ndef test_matrix(n, sparse=False, d=-0.5):\n \"\"\"\n Returns symmetric matrices on which to test algorithms\n \n Inputs:\n \n n: int, matrix size\n \n sparse: bool (False), sparsity\n \n rank: str/int, if 'full', then rank=n, otherwise rank=r in {1,2,...,n}.\n \n Output:\n \n A: double, symmetric positive definite matrix with specified rank\n (hopefully) and sparsity.\n \"\"\" \n if sparse:\n #\n # Sparse matrix \n # \n A = make_sparse_spd_matrix(dim=n, alpha=0.95, norm_diag=False,\n smallest_coef=.1, largest_coef=.9);\n A = sp.csc_matrix(A)\n else:\n #\n # Full matrix\n #\n X = np.random.rand(n, n)\n X = X + X.T\n U, dummy, V = linalg.svd(np.dot(X.T, X))\n A = np.dot(np.dot(U, d + np.diag(np.random.rand(n))), V)\n \n return A\n \n\n\nclass TestSPDMatrix(unittest.TestCase):\n \"\"\"\n Test the storage, inversion and factorization of matrices of the \n form M^{-1} K\n \"\"\"\n def test_modchol_ldlt(self):\n # Indefinite Matrix\n K = np.array([[1, 1, 0, 1], \n [1, 1, 1, 0], \n [0, 1, 1, 1], \n [1, 0, 1, 1]])\n \n # Compute modified Cholesky decomposition\n L, D, dummy, D0 = modchol_ldlt(K)\n \n self.assertTrue(np.allclose(L.dot(D0.dot(L.T)),K))\n self.assertFalse(np.allclose(D0,D))\n \n \n def test_constructor(self):\n n = 20\n for sparse in [True, False]:\n # Generate test matrix\n A = test_matrix(n, sparse)\n K = SPDMatrix(A)\n \n # Check size function\n self.assertEqual(K.size(),n)\n \n # Check sparsity function\n self.assertEqual(K.issparse(),sparse)\n \n # Check get_matrix function\n if sparse:\n self.assertTrue(np.allclose(K.get_matrix().toarray(), A.toarray()))\n else:\n self.assertTrue(np.allclose(K.get_matrix(), A))\n \n \n def test_diag_inverse(self):\n #\n # Compute the pseudo-inverse of a diagonal matrix\n # \n I = np.eye(10)\n I[-1,-1] = 0 \n J = diagonal_inverse(np.diag(I))\n JJ = diagonal_inverse(I)\n self.assertTrue(np.allclose(I, J))\n self.assertTrue(np.allclose(I,JJ))\n\n \n def test_chol_types(self):\n \n n = 20 \n for sparsity in [False, True]:\n # Generate random SPD matrix\n A = test_matrix(n, sparsity)\n K = SPDMatrix(A)\n \n # Compute the Cholesky decomposition\n K.chol_decomp()\n \n # Check that the right algorithm was used.\n if sp.issparse(A): \n A = A.toarray()\n \n # Check that matrix is full rank\n rank = np.linalg.matrix_rank(A) \n self.assertEqual(rank, n)\n \n chol_type = 'sparse' if sparsity else 'full' \n self.assertEqual(chol_type, K.chol_type())\n \n \n def test_get_chol_decomp(self):\n \"\"\"\n Return L,\n \"\"\"\n n = 10\n for sparsity in [False, True]:\n #\n # Cycle through sparsity\n #\n \n # Generate random SPD matrix\n A = test_matrix(n, sparsity)\n K = SPDMatrix(A)\n \n # Compute the Cholesky decomposition\n K.chol_decomp()\n\n # Check that the decomposition reproduces the matrix\n if K.chol_type()=='full':\n # Get Cholesky factor\n L, D, P, D0 = K.get_chol_decomp()\n \n if not np.allclose(D,D0):\n # Indefinite matrix - change to modified matrix\n A = L.dot(D.dot(L.T))\n \n # Check reconstruction\n self.assertTrue(np.allclose(L.dot(D.dot(L.T)),A))\n \n\n # Check that P*L is lower triangular with ones on diagonal\n self.assertTrue(np.allclose(1, np.diagonal(P.dot(L))))\n self.assertTrue(np.allclose(0, linalg.triu(P.dot(L),1)))\n \n elif K.chol_type()=='sparse':\n # Get Cholesky factor\n L = K.get_chol_decomp()\n P = L.P()\n LL = L.L()\n \n # Build permutation matrix\n I = sp.diags([1],0, shape=(n,n), format='csc')\n PP = I[P,:]\n \n # Compute P'L\n LL = PP.T.dot(LL)\n \n # Check reconstruction LL' = PAP'\n self.assertTrue(np.allclose(LL.dot(LL.T).toarray(),\n A.toarray())) \n \n \n def test_chol_sqrt(self):\n \"\"\"\n Return R*b, where K = R*R'\n \"\"\"\n n = 20\n b = np.random.rand(n)\n for sparsity in [False, True]:\n #\n # Cycle through sparsity\n #\n \n # Generate random SPD matrix\n A = test_matrix(n, sparsity)\n K = SPDMatrix(A)\n \n # Compute the Cholesky decomposition\n K.chol_decomp()\n \n # Compute R*b\n if K.chol_type()=='full':\n \n # Reconstruct (modified) matrix\n B = K.chol_reconstruct()\n \n # Identity matrix\n I = np.eye(n)\n \n # Compute R*I\n z = K.chol_sqrt(I)\n \n # Check that R*R' = B\n self.assertTrue(np.allclose(z.dot(z.T),B))\n \n # Compute R'*b \n b = np.random.rand(n)\n z = K.chol_sqrt(b,transpose=True)\n \n # Check that b'Ab = (Rb)'(Rb)\n self.assertTrue(np.allclose(z.dot(z),b.T.dot(B.dot(b))))\n \n \n elif K.chol_type()=='sparse':\n # Identity matrix\n I = np.eye(n)\n \n # Compute R*I\n z = K.chol_sqrt(I)\n \n # Check that RR' = A\n # print(np.linalg.norm(z.dot(z.T) - A.toarray()))\n self.assertTrue(np.allclose(z.dot(z.T),A.toarray()))\n \n # Compute R'*b\n b = np.random.rand(n)\n z = K.chol_sqrt(b, transpose=True)\n \n # Check that b'Ab = (Rb)'(Rb)\n self.assertTrue(np.allclose(z.dot(z),b.T.dot(A.dot(b))))\n \n \n\n def test_sqrt_solve(self):\n n = 20\n \n for sparsity in [False, True]:\n #\n # Cycle through sparsity\n #\n \n # Generate random SPD matrix\n A = test_matrix(n, sparsity)\n K = SPDMatrix(A)\n \n # Compute the Cholesky decomposition\n K.chol_decomp()\n \n # Random vector\n x = np.random.rand(n)\n \n for transpose in [False, True]:\n # Compute b = Rx (or R'x)\n b = K.chol_sqrt(x, transpose=transpose) \n \n # Solve for x \n xx = K.chol_sqrt_solve(b, transpose=transpose)\n \n # Check that we've recovered the original x \n self.assertTrue(np.allclose(xx,x))\n \n \n def test_chol_solve(self):\n n = 100\n \n for sparsity in [False, True]:\n #\n # Cycle through sparsity\n #\n \n # Generate random SPD matrix\n A = test_matrix(n, sparsity)\n K = SPDMatrix(A)\n \n # Compute the Cholesky decomposition\n K.chol_decomp()\n \n # Use modified A if necessary\n A = K.chol_reconstruct()\n \n # Generate random solution \n x = np.random.rand(n)\n b = A.dot(x)\n \n # Solve using Cholesky decomposition\n xx = K.chol_solve(b)\n \n # Check accuracy\n self.assertTrue(np.allclose(xx,x))\n \n \n \n def test_eig(self):\n # Form SPD matrix\n n = 20\n for sparse in range(False, True):\n A = test_matrix(n,sparse,1)\n K = SPDMatrix(A)\n \n \n # Compute eigendecomposition\n K.compute_eig_decomp()\n \n # Check reconstruction\n d, V = K.get_eig_decomp()\n AA = V.dot(np.diag(d).dot(V.T))\n A = A.toarray() if sparse else A\n self.assertTrue(np.allclose(AA,A))\n \n\n def test_eigsolve(self):\n n = 20\n for sparse in range(False, True):\n # Test matrix\n A = test_matrix(n, sparse)\n K = SPDMatrix(A)\n \n # Compute eigendecomposition\n K.compute_eig_decomp()\n \n # Reconstruct\n A = K.eig_reconstruct()\n \n # Make up system\n x = np.random.rand(K.size())\n b = A.dot(x)\n \n # Solve it\n xx = K.eig_solve(b)\n xxx = np.linalg.solve(A,b)\n \n # Check \n self.assertTrue(np.allclose(xx,x))\n self.assertTrue(np.allclose(xxx,x))\n \n \n def test_eig_sqrt(self):\n n = 20\n for sparse in range(False, True):\n # Test matrix\n A = test_matrix(n, sparse)\n K = SPDMatrix(A)\n \n # Compute eigendecomposition\n K.compute_eig_decomp() \n \n B = K.eig_reconstruct()\n \n #\n # Test Rx\n # \n \n # Identity matrix\n I = np.eye(n)\n \n # Compute R*I\n z = K.eig_sqrt(I)\n \n # Check that R*R' = B\n self.assertTrue(np.allclose(z.dot(z.T),B))\n \n #\n # Compute R'*b\n # \n b = np.random.rand(n)\n z = K.eig_sqrt(b,transpose=True)\n \n # Check that b'Ab = (Rb)'(Rb)\n self.assertTrue(np.allclose(z.dot(z),b.T.dot(B.dot(b))))\n\n \n def test_eig_sqrt_solve(self):\n n = 20\n \n for sparsity in [False, True]:\n #\n # Cycle through sparsity\n #\n \n # Generate random SPD matrix\n A = test_matrix(n, sparsity)\n K = SPDMatrix(A)\n \n # Compute the Eigen decomposition\n K.compute_eig_decomp()\n \n # Random vector\n x = np.random.rand(n)\n \n for transpose in [False, True]:\n # Compute b = Rx (or R'x)\n b = K.eig_sqrt(x, transpose=transpose) \n \n # Solve for x \n xx = K.eig_sqrt_solve(b, transpose=transpose)\n \n # Check that we've recovered the original x \n self.assertTrue(np.allclose(xx,x))\n \n \n \n def test_scalings(self):\n pass " ]
[ [ "numpy.array", "numpy.ones", "numpy.random.rand" ], [ "scipy.sparse.issparse", "numpy.array", "numpy.dot", "numpy.random.rand", "numpy.linalg.matrix_rank", "scipy.sparse.diags", "scipy.sparse.csc_matrix", "numpy.eye", "numpy.allclose", "sklearn.datasets.make_sparse_spd_matrix", "numpy.linalg.solve", "numpy.diag" ] ]
mepear/flow
[ "4fc6ceaf64ca522b5a5c4104a3098b20cf207dd4" ]
[ "train/myppo/train_ppo.py" ]
[ "import copy\nimport glob\nimport os\nimport sys\nimport time\nfrom collections import deque\nimport random\nfrom functools import partial\n\nimport gym\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.distributed import rpc\nimport torch.multiprocessing as mp\n\nimport sumolib\n\nfrom .a2c_ppo_acktr.arguments import get_args\nfrom .a2c_ppo_acktr.envs import make_vec_envs\nfrom .a2c_ppo_acktr.model import Policy\nfrom .a2c_ppo_acktr.storage import RolloutStorage\nfrom .a2c_ppo_acktr.trainer import Trainer\nfrom .evaluation import evaluate\n\ndef train(rank, args, flow_params=None):\n os.environ['MASTER_ADDR'] = 'localhost'\n os.environ['MASTER_PORT'] = str(args.master_port)\n if rank == 0:\n rpc_opt = rpc.TensorPipeRpcBackendOptions(num_worker_threads=\\\n max(16, args.num_splits * args.num_actors), rpc_timeout=500)\n rpc.init_rpc('agent', rank=rank, world_size=args.num_actors + 1, rpc_backend_options=rpc_opt)\n\n torch.manual_seed(args.seed)\n torch.cuda.manual_seed_all(args.seed)\n np.random.seed(args.seed)\n random.seed(args.seed)\n\n if args.cuda and torch.cuda.is_available() and args.cuda_deterministic:\n torch.backends.cudnn.benchmark = False\n torch.backends.cudnn.deterministic = True\n\n # envs = make_vec_envs(args.env_name, args.seed, 1, \\\n # args.gamma, save_path, device, False, \\\n # port=args.port, popart_reward=args.popart_reward, \\\n # flow_params=flow_params, reward_scale=args.reward_scale, \\\n # verbose=args.verbose)\n\n trainer = Trainer(args, flow_params)\n trainer.run()\n else:\n rpc_opt = rpc.TensorPipeRpcBackendOptions(rpc_timeout=500)\n rpc.init_rpc('actor_' + str(rank - 1), rank=rank, world_size=args.num_actors + 1, rpc_backend_options=rpc_opt)\n rpc.shutdown()\n\n\ndef train_ppo(flow_params=None):\n mp.set_start_method('spawn')\n args = get_args(sys.argv[2:])\n args.master_port = sumolib.miscutils.getFreeSocketPort()\n procs = []\n for i in range(args.num_actors + 1):\n p = mp.Process(target=train, args=(i, args, flow_params))\n p.start()\n procs.append(p)\n for p in procs:\n p.join()\n\n\nif __name__ == \"__main__\":\n train_ppo()\n" ]
[ [ "torch.multiprocessing.Process", "torch.cuda.manual_seed_all", "numpy.random.seed", "torch.distributed.rpc.TensorPipeRpcBackendOptions", "torch.multiprocessing.set_start_method", "torch.manual_seed", "torch.distributed.rpc.init_rpc", "torch.cuda.is_available", "torch.distributed.rpc.shutdown" ] ]
sumit-158/sktime
[ "e74c32ac6493291dacae36dbfc979de1eefeffac" ]
[ "sktime/datatypes/_convert.py" ]
[ "# -*- coding: utf-8 -*-\n# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)\n\"\"\"Machine type converters for scitypes.\n\nExports\n-------\nconvert_to(obj, to_type: str, as_scitype: str, store=None)\n converts object \"obj\" to type \"to_type\", considerd as \"as_scitype\"\n\nconvert(obj, from_type: str, to_type: str, as_scitype: str, store=None)\n same as convert_to, without automatic identification of \"from_type\"\n\nmtype(obj, as_scitype: str)\n returns \"from_type\" of obj, considered as \"as_scitype\"\n---\n\nFunction signature of convert\n\nParameters\n----------\nobj : object to convert - any type, should comply with mtype spec for as_scitype\nfrom_type : str - the type to convert \"obj\" to, a valid mtype string\nto_type : str - the type to convert \"obj\" to, a valid mtype string\nas_scitype : str - name of scitype the object \"obj\" is considered as\nstore : reference of storage for lossy conversions, default=None (no store)\n\nReturns\n-------\nconverted_obj : to_type - object obj converted to to_type\n\n---\n\nFunction signature of convert_to\n\nParameters\n----------\nobj : object to convert - any type, should comply with mtype spec for as_scitype\nto_type : str - the type to convert \"obj\" to, a valid mtype string\nas_scitype : str - name of scitype the object \"obj\" is considered as\nstore : reference of storage for lossy conversions, default=None (no store)\n\nReturns\n-------\nconverted_obj : to_type - object obj converted to to_type\n\n---\n\nFunction signature of mtype\n\nParameters\n----------\nobj : object to convert - any type, should comply with mtype spec for as_scitype\nas_scitype : str - name of scitype the object \"obj\" is considered as\n\nReturns\n-------\nstr - the type to convert \"obj\" to, a valid mtype string\n or None, if obj is None\n\"\"\"\n\n__author__ = [\"fkiraly\"]\n\n__all__ = [\n \"convert\",\n \"convert_to\",\n]\n\nimport numpy as np\nimport pandas as pd\n\nfrom sktime.datatypes._check import mtype as infer_mtype\nfrom sktime.datatypes._panel import convert_dict_Panel\nfrom sktime.datatypes._registry import mtype_to_scitype\nfrom sktime.datatypes._series import convert_dict_Series\nfrom sktime.datatypes._table import convert_dict_Table\n\n# pool convert_dict-s and infer_mtype_dict-s\nconvert_dict = dict()\nconvert_dict.update(convert_dict_Series)\nconvert_dict.update(convert_dict_Panel)\nconvert_dict.update(convert_dict_Table)\n\n\ndef convert(obj, from_type: str, to_type: str, as_scitype: str = None, store=None):\n \"\"\"Convert objects between different machine representations, subject to scitype.\n\n Parameters\n ----------\n obj : object to convert - any type, should comply with mtype spec for as_scitype\n from_type : str - the type to convert \"obj\" to, a valid mtype string\n to_type : str - the type to convert \"obj\" to, a valid mtype string\n as_scitype : str, optional - name of scitype the object \"obj\" is considered as\n default = inferred from from_type\n store : reference of storage for lossy conversions, default=None (no store)\n\n Returns\n -------\n converted_obj : to_type - object obj converted to to_type\n if obj was None, returns None\n\n Raises\n ------\n KeyError if conversion is not implemented\n \"\"\"\n if obj is None:\n return None\n\n if not isinstance(to_type, str):\n raise TypeError(\"to_type must be a str\")\n if not isinstance(from_type, str):\n raise TypeError(\"from_type must be a str\")\n if as_scitype is None:\n as_scitype = mtype_to_scitype(to_type)\n elif not isinstance(as_scitype, str):\n raise TypeError(\"as_scitype must be str or None\")\n\n key = (from_type, to_type, as_scitype)\n\n if key not in convert_dict.keys():\n raise NotImplementedError(\n \"no conversion defined from type \" + str(from_type) + \" to \" + str(to_type)\n )\n\n converted_obj = convert_dict[key](obj, store=store)\n\n return converted_obj\n\n\n# conversion based on queriable type to specified target\ndef convert_to(obj, to_type: str, as_scitype: str = None, store=None):\n \"\"\"Convert object to a different machine representation, subject to scitype.\n\n Parameters\n ----------\n obj : object to convert - any type, should comply with mtype spec for as_scitype\n to_type : str - the type to convert \"obj\" to, a valid mtype string\n or list - admissible types for conversion to\n as_scitype : str, optional - name of scitype the object \"obj\" is considered as\n default = inferred from mtype of obj, which is in turn inferred internally\n store : reference of storage for lossy conversions, default=None (no store)\n\n Returns\n -------\n converted_obj : to_type - object obj converted to to_type, if to_type is str\n if to_type is list, converted to to_type[0],\n unless from_type in to_type, in this case converted_obj=obj\n if obj was None, returns None\n\n Raises\n ------\n TypeError if machine type of input \"obj\" is not recognized\n KeyError if conversion is not implemented\n \"\"\"\n if obj is None:\n return None\n\n if isinstance(to_type, list):\n if not np.all(isinstance(x, str) for x in to_type):\n raise TypeError(\"to_type must be a str or list of str\")\n elif not isinstance(to_type, str):\n raise TypeError(\"to_type must be a str or list of str\")\n\n if as_scitype is None:\n if isinstance(to_type, str):\n as_scitype = mtype_to_scitype(to_type)\n else:\n as_scitype = mtype_to_scitype(to_type[0])\n elif not isinstance(as_scitype, str):\n raise TypeError(\"as_scitype must be a str or None\")\n\n from_type = infer_mtype(obj=obj, as_scitype=as_scitype)\n\n # if to_type is a list:\n if isinstance(to_type, list):\n # no conversion of from_type is in the list\n if from_type in to_type:\n to_type = from_type\n # otherwise convert to first element\n else:\n to_type = to_type[0]\n\n converted_obj = convert(\n obj=obj,\n from_type=from_type,\n to_type=to_type,\n as_scitype=as_scitype,\n store=store,\n )\n\n return converted_obj\n\n\ndef _conversions_defined(scitype: str):\n \"\"\"Return an indicator matrix which conversions are defined for scitype.\n\n Parameters\n ----------\n scitype: str - name of scitype for which conversions are queried\n\n Returns\n -------\n conv_df: pd.DataFrame, columns and index is list of mtypes for scitype\n entry of row i, col j is 1 if conversion from i to j is defined,\n 0 if conversion from i to j is not defined\n \"\"\"\n pairs = [(x[0], x[1]) for x in list(convert_dict.keys()) if x[2] == scitype]\n cols0 = set([x[0] for x in list(convert_dict.keys()) if x[2] == scitype])\n cols1 = set([x[1] for x in list(convert_dict.keys()) if x[2] == scitype])\n cols = sorted(list(cols0.union(cols1)))\n\n mat = np.zeros((len(cols), len(cols)), dtype=int)\n nkeys = len(cols)\n for i in range(nkeys):\n for j in range(nkeys):\n if (cols[i], cols[j]) in pairs:\n mat[i, j] = 1\n\n conv_df = pd.DataFrame(mat, index=cols, columns=cols)\n\n return conv_df\n" ]
[ [ "pandas.DataFrame" ] ]
gohsyi/tianshou
[ "9c0cd45a3bc6147ea94ff95f49df3e950ce02958" ]
[ "tianshou/utils/moving_average.py" ]
[ "import torch\nimport numpy as np\n\n\nclass MovAvg(object):\n \"\"\"Class for moving average. Usage:\n ::\n\n >>> stat = MovAvg(size=66)\n >>> stat.add(torch.tensor(5))\n 5.0\n >>> stat.add(float('inf')) # which will not add to stat\n 5.0\n >>> stat.add([6, 7, 8])\n 6.5\n >>> stat.get()\n 6.5\n >>> print(f'{stat.mean():.2f}±{stat.std():.2f}')\n 6.50±1.12\n \"\"\"\n def __init__(self, size=100):\n super().__init__()\n self.size = size\n self.cache = []\n\n def add(self, x):\n \"\"\"Add a scalar into :class:`MovAvg`. You can add ``torch.Tensor`` with\n only one element, a python scalar, or a list of python scalar. It will\n automatically exclude the infinity and NaN.\n \"\"\"\n if isinstance(x, torch.Tensor):\n x = x.item()\n if isinstance(x, list):\n for _ in x:\n if _ not in [np.inf, np.nan, -np.inf]:\n self.cache.append(_)\n elif x != np.inf:\n self.cache.append(x)\n if self.size > 0 and len(self.cache) > self.size:\n self.cache = self.cache[-self.size:]\n return self.get()\n\n def get(self):\n \"\"\"Get the average.\"\"\"\n if len(self.cache) == 0:\n return 0\n return np.mean(self.cache)\n\n def mean(self):\n \"\"\"Get the average. Same as :meth:`get`.\"\"\"\n return self.get()\n\n def std(self):\n \"\"\"Get the standard deviation.\"\"\"\n if len(self.cache) == 0:\n return 0\n return np.std(self.cache)\n" ]
[ [ "numpy.std", "numpy.mean" ] ]
DilipA/rlpyt
[ "edfd46484c56a47b4671006a16a642e6808da393" ]
[ "rlpyt/spaces/float_box.py" ]
[ "\nimport numpy as np\n\nfrom rlpyt.spaces.base import Space\n\n\nclass FloatBox(Space):\n \"\"\"A box in R^n, with specifiable bounds and dtype.\"\"\"\n\n def __init__(self, low, high, shape=None, null_value=0., dtype=\"float32\"):\n \"\"\"\n Two kinds of valid input:\n # low and high are scalars, and shape is provided\n Box(-1.0, 1.0, (3,4))\n # low and high are arrays of the same shape\n Box(np.array([-1.0,-2.0]), np.array([2.0,4.0]))\n \"\"\"\n self.dtype = np.dtype(dtype)\n assert np.issubdtype(self.dtype, np.floating)\n if shape is None:\n self.low = np.asarray(low, dtype=dtype)\n self.high = np.asarray(high, dtype=dtype)\n assert self.low.shape == self.high.shape\n else:\n assert np.isscalar(low) and np.isscalar(high)\n self.low = np.asarray(low + np.zeros(shape), dtype=dtype)\n self.high = np.asarray(high + np.zeros(shape), dtype=dtype)\n self._null_value = null_value\n\n def sample(self, size=None, null=False):\n if size is None:\n size = ()\n elif isinstance(size, int):\n size = (size,)\n\n if null:\n sample = self._null_value * np.ones(size + self.shape, dtype=self.dtype)\n else:\n sample = np.asarray(np.random.uniform(low=self.low, high=self.high,\n size=size + self.shape), dtype=self.dtype)\n return sample\n\n @property\n def shape(self):\n return self.low.shape\n\n @property\n def bounds(self):\n return self.low, self.high\n\n @property\n def null_value(self):\n return self._null_value\n\n def __repr__(self):\n return f\"FloatBox{self.shape}\"\n" ]
[ [ "numpy.asarray", "numpy.zeros", "numpy.ones", "numpy.random.uniform", "numpy.isscalar", "numpy.issubdtype", "numpy.dtype" ] ]
Sargastico/insp3ct0r
[ "3363fbcf4058054b1437620ee1deb640d272d0bf" ]
[ "core/processing/subtraction.py" ]
[ "from skimage.metrics import structural_similarity\nimport cv2 as cv\nimport numpy as np\n\ndef differenceInspection(baseimg, tocompareimg, showResults=False):\n\n height, width, _ = baseimg.shape\n\n tocompareimg = cv.resize(tocompareimg, (width, height))\n\n # Convert images to grayscale\n before_gray = cv.cvtColor(baseimg, cv.COLOR_BGR2GRAY)\n after_gray = cv.cvtColor(tocompareimg, cv.COLOR_BGR2GRAY)\n\n # Compute SSIM between two images\n (score, diff) = structural_similarity(before_gray, after_gray, full=True)\n\n\n # The diff image contains the actual image differences between the two images\n # and is represented as a floating point data type in the range [0,1]\n # so we must convert the array to 8-bit unsigned integers in the range\n # [0,255] baseimg we can use it with OpenCV\n diff = (diff * 255).astype(\"uint8\")\n\n # Threshold the difference image, followed by finding contours to\n # obtain the regions of the two input images that differ\n thresh = cv.threshold(diff, 0, 255, cv.THRESH_BINARY_INV | cv.THRESH_OTSU)[1]\n contours = cv.findContours(thresh.copy(), cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)\n contours = contours[0] if len(contours) == 2 else contours[1]\n\n mask = np.zeros(baseimg.shape, dtype='uint8')\n filled_after = tocompareimg.copy()\n\n for c in contours:\n area = cv.contourArea(c)\n if area > 40:\n x, y, w, h = cv.boundingRect(c)\n cv.rectangle(baseimg, (x, y), (x + w, y + h), (36, 255, 12), 2)\n cv.rectangle(tocompareimg, (x, y), (x + w, y + h), (36, 255, 12), 2)\n cv.drawContours(mask, [c], 0, (0, 255, 0), -1)\n cv.drawContours(filled_after, [c], 0, (0, 255, 0), -1)\n\n if showResults:\n\n cv.imshow('diff', diff)\n cv.imshow('mask', mask)\n cv.imshow('filled after', filled_after)\n\n return score, diff, mask, filled_after" ]
[ [ "numpy.zeros" ] ]
davidstutz/bpy-visualization-utils
[ "9aec34f4b58d83992c3c3ce7eb6369d9055760bc" ]
[ "write_binvox.py" ]
[ "import numpy as np\nimport argparse\nimport binvox_rw\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Write an example BINVOX file.')\n parser.add_argument('output', type=str, help='BINVOX file.')\n\n args = parser.parse_args()\n\n volume = np.zeros((32, 32, 32))\n volume[10:22, 10:22, 10:22] = 1\n\n model = binvox_rw.Voxels(volume > 0.5, volume.shape, (0, 0, 0), 1)\n with open(args.output, 'w') as fp:\n model.write(fp)\n print('Wote %s.' % args.output)" ]
[ [ "numpy.zeros" ] ]
MisterZhouZhou/pythonLearn
[ "8933c7a6d444d3d86a173984e6cf4c08dbf84039" ]
[ "opencv_demo/face_action/face_direction.py" ]
[ "#!/usr/bin/env python\n\nimport cv2\nimport numpy as np\nimport dlib\nimport time\nimport math\n\ndetector = dlib.get_frontal_face_detector()\npredictor = dlib.shape_predictor(\"shape_predictor_68_face_landmarks.dat\")\nPOINTS_NUM_LANDMARK = 68\n\n\n# 获取最大的人脸\ndef _largest_face(dets):\n if len(dets) == 1:\n return 0\n\n face_areas = [(det.right() - det.left()) * (det.bottom() - det.top()) for det in dets]\n\n largest_area = face_areas[0]\n largest_index = 0\n for index in range(1, len(dets)):\n if face_areas[index] > largest_area:\n largest_index = index\n largest_area = face_areas[index]\n\n print(\"largest_face index is {} in {} faces\".format(largest_index, len(dets)))\n\n return largest_index\n\n\n# 从dlib的检测结果抽取姿态估计需要的点坐标\ndef get_image_points_from_landmark_shape(landmark_shape):\n if landmark_shape.num_parts != POINTS_NUM_LANDMARK:\n print(\"ERROR:landmark_shape.num_parts-{}\".format(landmark_shape.num_parts))\n return -1, None\n\n # 2D image points. If you change the image, you need to change vector\n image_points = np.array([\n (landmark_shape.part(30).x, landmark_shape.part(30).y), # Nose tip\n (landmark_shape.part(8).x, landmark_shape.part(8).y), # Chin\n (landmark_shape.part(36).x, landmark_shape.part(36).y), # Left eye left corner\n (landmark_shape.part(45).x, landmark_shape.part(45).y), # Right eye right corne\n (landmark_shape.part(48).x, landmark_shape.part(48).y), # Left Mouth corner\n (landmark_shape.part(54).x, landmark_shape.part(54).y) # Right mouth corner\n ], dtype=\"double\")\n\n return 0, image_points\n\n\n# 用dlib检测关键点,返回姿态估计需要的几个点坐标\ndef get_image_points(img):\n # gray = cv2.cvtColor( img, cv2.COLOR_BGR2GRAY ) # 图片调整为灰色\n dets = detector(img, 0)\n\n if 0 == len(dets):\n print(\"ERROR: found no face\")\n return -1, None\n largest_index = _largest_face(dets)\n face_rectangle = dets[largest_index]\n\n landmark_shape = predictor(img, face_rectangle)\n\n return get_image_points_from_landmark_shape(landmark_shape)\n\n\n# 获取旋转向量和平移向量\ndef get_pose_estimation(img_size, image_points):\n # 3D model points.\n model_points = np.array([\n (0.0, 0.0, 0.0), # Nose tip\n (0.0, -330.0, -65.0), # Chin\n (-225.0, 170.0, -135.0), # Left eye left corner\n (225.0, 170.0, -135.0), # Right eye right corne\n (-150.0, -150.0, -125.0), # Left Mouth corner\n (150.0, -150.0, -125.0) # Right mouth corner\n\n ])\n\n # Camera internals\n\n focal_length = img_size[1]\n center = (img_size[1] / 2, img_size[0] / 2)\n camera_matrix = np.array(\n [[focal_length, 0, center[0]],\n [0, focal_length, center[1]],\n [0, 0, 1]], dtype=\"double\"\n )\n\n print(\"Camera Matrix :{}\".format(camera_matrix))\n\n dist_coeffs = np.zeros((4, 1)) # Assuming no lens distortion\n (success, rotation_vector, translation_vector) = cv2.solvePnP(model_points, image_points, camera_matrix,\n dist_coeffs, flags=cv2.SOLVEPNP_ITERATIVE)\n\n print(\"Rotation Vector:\\n {}\".format(rotation_vector))\n print(\"Translation Vector:\\n {}\".format(translation_vector))\n return success, rotation_vector, translation_vector, camera_matrix, dist_coeffs\n\n\n# 从旋转向量转换为欧拉角\ndef get_euler_angle(rotation_vector):\n # calculate rotation angles\n theta = cv2.norm(rotation_vector, cv2.NORM_L2)\n\n # transformed to quaterniond\n w = math.cos(theta / 2)\n x = math.sin(theta / 2) * rotation_vector[0][0] / theta\n y = math.sin(theta / 2) * rotation_vector[1][0] / theta\n z = math.sin(theta / 2) * rotation_vector[2][0] / theta\n\n ysqr = y * y\n # pitch (x-axis rotation)\n t0 = 2.0 * (w * x + y * z)\n t1 = 1.0 - 2.0 * (x * x + ysqr)\n print('t0:{}, t1:{}'.format(t0, t1))\n pitch = math.atan2(t0, t1)\n\n # yaw (y-axis rotation)\n t2 = 2.0 * (w * y - z * x)\n if t2 > 1.0:\n t2 = 1.0\n if t2 < -1.0:\n t2 = -1.0\n yaw = math.asin(t2)\n\n # roll (z-axis rotation)\n t3 = 2.0 * (w * z + x * y)\n t4 = 1.0 - 2.0 * (ysqr + z * z)\n roll = math.atan2(t3, t4)\n\n print('pitch:{}, yaw:{}, roll:{}'.format(pitch, yaw, roll))\n\n # 单位转换:将弧度转换为度\n Y = int((pitch / math.pi) * 180)\n X = int((yaw / math.pi) * 180)\n Z = int((roll / math.pi) * 180)\n\n return 0, Y, X, Z\n\n\ndef get_pose_estimation_in_euler_angle(landmark_shape, im_szie):\n try:\n ret, image_points = get_image_points_from_landmark_shape(landmark_shape)\n if ret != 0:\n print('get_image_points failed')\n return -1, None, None, None\n\n ret, rotation_vector, translation_vector, camera_matrix, dist_coeffs = get_pose_estimation(im_szie,\n image_points)\n if ret != True:\n print('get_pose_estimation failed')\n return -1, None, None, None\n\n ret, pitch, yaw, roll = get_euler_angle(rotation_vector)\n if ret != 0:\n print('get_euler_angle failed')\n return -1, None, None, None\n\n euler_angle_str = 'Y:{}, X:{}, Z:{}'.format(pitch, yaw, roll)\n print(euler_angle_str)\n return 0, pitch, yaw, roll\n\n except Exception as e:\n print('get_pose_estimation_in_euler_angle exception:{}'.format(e))\n return -1, None, None, None\n\n\nif __name__ == '__main__':\n\n # rtsp://admin:[email protected]:554\n cap = cv2.VideoCapture(0)\n while (cap.isOpened()):\n start_time = time.time()\n\n # Read Image\n ret, im = cap.read()\n if ret != True:\n print('read frame failed')\n continue\n size = im.shape\n\n if size[0] > 700:\n h = size[0] / 3\n w = size[1] / 3\n im = cv2.resize(im, (int(w), int(h)), interpolation=cv2.INTER_CUBIC)\n size = im.shape\n\n ret, image_points = get_image_points(im)\n if ret != 0:\n print('get_image_points failed')\n continue\n\n ret, rotation_vector, translation_vector, camera_matrix, dist_coeffs = get_pose_estimation(size, image_points)\n if ret != True:\n print('get_pose_estimation failed')\n continue\n used_time = time.time() - start_time\n print(\"used_time:{} sec\".format(round(used_time, 3)))\n\n ret, pitch, yaw, roll = get_euler_angle(rotation_vector)\n euler_angle_str = 'Y:{}, X:{}, Z:{}'.format(pitch, yaw, roll)\n print(euler_angle_str)\n\n # Project a 3D point (0, 0, 1000.0) onto the image plane.\n # We use this to draw a line sticking out of the nose\n\n (nose_end_point2D, jacobian) = cv2.projectPoints(np.array([(0.0, 0.0, 1000.0)]), rotation_vector,\n translation_vector, camera_matrix, dist_coeffs)\n\n for p in image_points:\n cv2.circle(im, (int(p[0]), int(p[1])), 3, (0, 0, 255), -1)\n\n p1 = (int(image_points[0][0]), int(image_points[0][1]))\n p2 = (int(nose_end_point2D[0][0][0]), int(nose_end_point2D[0][0][1]))\n\n cv2.line(im, p1, p2, (255, 0, 0), 2)\n\n # Display image\n # cv2.putText( im, str(rotation_vector), (0, 100), cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 255), 1 )\n cv2.putText(im, euler_angle_str, (0, 120), cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 255), 1)\n cv2.imshow(\"Output\", im)\n cv2.waitKey(1)" ]
[ [ "numpy.array", "numpy.zeros" ] ]
kaczmarj/tensorflow
[ "35a015349bab8f2e3276d843427a4501e56d18b6" ]
[ "tensorflow/python/keras/layers/preprocessing/text_vectorization.py" ]
[ "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Keras text vectorization preprocessing layer.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport json\nimport operator\n\nimport numpy as np\nimport six\n\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_spec\nfrom tensorflow.python.keras import backend as K\nfrom tensorflow.python.keras.engine.base_preprocessing_layer import Combiner\nfrom tensorflow.python.keras.engine.base_preprocessing_layer import CombinerPreprocessingLayer\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import gen_string_ops\nfrom tensorflow.python.ops import lookup_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import string_ops\nfrom tensorflow.python.ops.ragged import ragged_functional_ops\nfrom tensorflow.python.ops.ragged import ragged_string_ops\nfrom tensorflow.python.ops.ragged import ragged_tensor\nfrom tensorflow.python.util import compat\n\nLOWER_AND_STRIP_PUNCTUATION = \"lower_and_strip_punctuation\"\n\nSPLIT_ON_WHITESPACE = \"whitespace\"\n\nTFIDF = \"tf-idf\"\nINT = \"int\"\nBINARY = \"binary\"\nCOUNT = \"count\"\n\n# This is an explicit regex of all the tokens that will be stripped if\n# LOWER_AND_STRIP_PUNCTUATION is set. If an application requires other\n# stripping, a Callable should be passed into the 'standardize' arg.\nDEFAULT_STRIP_REGEX = r'[!\"#$%&()\\*\\+,-\\./:;<=>?@\\[\\\\\\]^_`{|}~\\']'\n\n# The string tokens in the extracted vocabulary\n_VOCAB_NAME = \"vocab\"\n# The inverse-document-frequency weights\n_IDF_NAME = \"idf\"\n# The IDF data for the OOV token\n_OOV_IDF_NAME = \"oov_idf\"\n\n# The string tokens in the full vocabulary\n_ACCUMULATOR_VOCAB_NAME = \"vocab\"\n# The total counts of each token in the vocabulary\n_ACCUMULATOR_COUNTS_NAME = \"counts\"\n# The number of doccumeents / examples that each token appears in.\n_ACCUMULATOR_DOCUMENT_COUNTS = \"document_counts\"\n# The total number of documents / examples in the dataset.\n_ACCUMULATOR_NUM_DOCUMENTS = \"num_documents\"\n\n\nclass TextVectorization(CombinerPreprocessingLayer):\n \"\"\"Text vectorization layer.\n\n This layer has basic options for managing text in a Keras model. It\n transforms a batch of strings (one sample = one string) into either a list of\n token indices (one sample = 1D tensor of integer token indices) or a dense\n representation (one sample = 1D tensor of float values representing data about\n the sample's tokens).\n\n The processing of each sample contains the following steps:\n 1) standardize each sample (usually lowercasing + punctuation stripping)\n 2) split each sample into substrings (usually words)\n 3) recombine substrings into tokens (usually ngrams)\n 4) index tokens (associate a unique int value with each token)\n 5) transform each sample using this index, either into a vector of ints or\n a dense float vector.\n\n Some notes on passing Callables to customize splitting and normalization for\n this layer:\n 1) Any callable can be passed to this Layer, but if you want to serialize\n this object you should only pass functions that are registered Keras\n serializables (see `tf.keras.utils.register_keras_serializable` for more\n details).\n 2) When using a custom callable for `standardize`, the data recieved\n by the callable will be exactly as passed to this layer. The callable\n should return a tensor of the same shape as the input.\n 3) When using a custom callable for `split`, the data recieved by the\n callable will have the 1st dimension squeezed out - instead of\n `[[\"string to split\"], [\"another string to split\"]]`, the Callable will\n see `[\"string to split\", \"another string to split\"]`. The callable should\n return a Tensor with the first dimension containing the split tokens -\n in this example, we should see something like `[[\"string\", \"to\", \"split],\n [\"another\", \"string\", \"to\", \"split\"]]`. This makes the callable site\n natively compatible with `tf.strings.split()`.\n\n Attributes:\n max_tokens: The maximum size of the vocabulary for this layer. If None,\n there is no cap on the size of the vocabulary.\n standardize: Optional specification for standardization to apply to the\n input text. Values can be None (no standardization),\n 'lower_and_strip_punctuation' (lowercase and remove punctuation) or a\n Callable. Default is 'lower_and_strip_punctuation'.\n split: Optional specification for splitting the input text. Values can be\n None (no splitting), 'whitespace' (split on ASCII whitespace), or a\n Callable. The default is 'whitespace'.\n ngrams: Optional specification for ngrams to create from the possibly-split\n input text. Values can be None, an integer or tuple of integers; passing\n an integer will create ngrams up to that integer, and passing a tuple of\n integers will create ngrams for the specified values in the tuple. Passing\n None means that no ngrams will be created.\n output_mode: Optional specification for the output of the layer. Values can\n be \"int\", \"binary\", \"count\" or \"tf-idf\", configuring the layer as follows:\n \"int\": Outputs integer indices, one integer index per split string\n token.\n \"binary\": Outputs a single int array per batch, of either vocab_size or\n max_tokens size, containing 1s in all elements where the token mapped\n to that index exists at least once in the batch item.\n \"count\": As \"binary\", but the int array contains a count of the number\n of times the token at that index appeared in the batch item.\n \"tf-idf\": As \"binary\", but the TF-IDF algorithm is applied to find the\n value in each token slot.\n output_sequence_length: Only valid in INT mode. If set, the output will have\n its time dimension padded or truncated to exactly `output_sequence_length`\n values, resulting in a tensor of shape [batch_size,\n output_sequence_length] regardless of how many tokens resulted from the\n splitting step. Defaults to None.\n pad_to_max_tokens: Only valid in \"binary\", \"count\", and \"tf-idf\" modes. If\n True, the output will have its feature axis padded to `max_tokens` even if\n the number of unique tokens in the vocabulary is less than max_tokens,\n resulting in a tensor of shape [batch_size, max_tokens] regardless of\n vocabulary size. Defaults to True.\n\n Example:\n This example instantiates a TextVectorization layer that lowercases text,\n splits on whitespace, strips punctuation, and outputs integer vocab indices.\n ```\n max_features = 5000 # Maximum vocab size.\n max_len = 40 # Sequence length to pad the outputs to.\n\n # Create the layer.\n vectorize_layer = text_vectorization.TextVectorization(\n max_tokens=max_features,\n output_mode='int',\n output_sequence_length=max_len)\n\n # Now that the vocab layer has been created, call `adapt` on the text-only\n # dataset to create the vocabulary. You don't have to batch, but for large\n # datasets this means we're not keeping spare copies of the dataset in memory.\n vectorize_layer.adapt(text_dataset.batch(64))\n\n # Create the model that uses the vectorize text layer\n model = tf.keras.models.Sequential()\n\n # Start by creating an explicit input layer. It needs to have a shape of (1,)\n # (because we need to guarantee that there is exactly one string input per\n # batch), and the dtype needs to be 'string'.\n model.add(tf.keras.Input(shape=(1,), dtype=tf.string))\n\n # The first layer in our model is the vectorization layer. After this layer,\n # we have a tensor of shape (batch_size, max_len) containing vocab indices.\n model.add(vectorize_layer)\n\n # Next, we add a layer to map those vocab indices into a space of\n # dimensionality 'embedding_dims'. Note that we're using max_features+1 here,\n # since there's an OOV token that gets added to the vocabulary in\n # vectorize_layer.\n model.add(tf.keras.layers.Embedding(max_features+1, embedding_dims))\n\n # At this point, you have embedded float data representing your tokens, and\n # can add whatever other layers you need to create your model.\n ```\n \"\"\"\n # TODO(momernick): Add an examples section to the docstring.\n\n def __init__(self,\n max_tokens=None,\n standardize=LOWER_AND_STRIP_PUNCTUATION,\n split=SPLIT_ON_WHITESPACE,\n ngrams=None,\n output_mode=INT,\n output_sequence_length=None,\n pad_to_max_tokens=True,\n **kwargs):\n\n # This layer only applies to string processing, and so should only have\n # a dtype of 'string'.\n if \"dtype\" in kwargs and kwargs[\"dtype\"] != dtypes.string:\n raise ValueError(\"TextVectorization may only have a dtype of string.\")\n elif \"dtype\" not in kwargs:\n kwargs[\"dtype\"] = dtypes.string\n\n # 'standardize' must be one of (None, LOWER_AND_STRIP_PUNCTUATION, callable)\n _validate_string_arg(\n standardize,\n allowable_strings=[LOWER_AND_STRIP_PUNCTUATION],\n arg_name=\"standardize\")\n\n # 'split' must be one of (None, SPLIT_ON_WHITESPACE, callable)\n _validate_string_arg(\n split, allowable_strings=[SPLIT_ON_WHITESPACE], arg_name=\"split\")\n\n # 'output_mode' must be one of (None, INT, COUNT, BINARY, TFIDF)\n _validate_string_arg(\n output_mode,\n allowable_strings=[INT, COUNT, BINARY, TFIDF],\n arg_name=\"output_mode\",\n allow_callables=False)\n\n # 'ngrams' must be one of (None, int, tuple(int))\n if not (ngrams is None or\n isinstance(ngrams, int) or\n isinstance(ngrams, tuple) and\n all(isinstance(item, int) for item in ngrams)):\n raise ValueError((\"`ngrams` must be None, an integer, or a tuple of \"\n \"integers. Got %s\") % (ngrams,))\n\n # 'output_sequence_length' must be one of (None, int) and is only\n # set if output_mode is INT.\n if (output_mode == INT and not (isinstance(output_sequence_length, int) or\n (output_sequence_length is None))):\n raise ValueError(\"`output_sequence_length` must be either None or an \"\n \"integer when `output_mode` is 'int'. \"\n \"Got %s\" % output_sequence_length)\n\n if output_mode != INT and output_sequence_length is not None:\n raise ValueError(\"`output_sequence_length` must not be set if \"\n \"`output_mode` is not 'int'.\")\n\n self._max_tokens = max_tokens\n\n # In INT mode, we have two reserved values (PAD and OOV). However, non-INT\n # modes don't have a PAD value, so we only need to reserve one value.\n self._reserved_values = 2 if output_mode == INT else 1\n\n # In INT mode, the zero value is reserved for padding (per Keras standard\n # padding approaches). In non-INT modes, there is no padding so we can set\n # the OOV value to zero instead of one.\n self._oov_value = 1 if output_mode == INT else 0\n\n # We always reduce the max token number by 1 to account for the OOV token\n # if it is set. The PAD marker isn't really a token (it's the absence of a\n # token) so we don't account for it here.\n self._max_vocab_size = max_tokens - 1 if max_tokens is not None else None\n\n self._standardize = standardize\n self._split = split\n self._ngrams_arg = ngrams\n if isinstance(ngrams, int):\n self._ngrams = tuple(range(1, ngrams + 1))\n else:\n self._ngrams = ngrams\n\n self._output_mode = output_mode\n self._output_sequence_length = output_sequence_length\n self._pad_to_max = pad_to_max_tokens\n self._has_vocab = False\n\n super(TextVectorization, self).__init__(\n combiner=_TextVectorizationCombiner(\n self._max_vocab_size, compute_idf=output_mode == TFIDF),\n **kwargs)\n\n self._table = lookup_ops.MutableHashTable(\n key_dtype=dtypes.string,\n value_dtype=dtypes.int64,\n default_value=self._oov_value,\n name=(self._name + \"_index_table\"))\n\n def fail(_):\n raise NotImplementedError(\n \"Saving is not yet supported for TextVectorization layers.\")\n self._table._list_extra_dependencies_for_serialization = fail # pylint: disable=protected-access\n\n self._add_trackable(self._table, trainable=False)\n\n # We are adding this here instead of in build() since it does not depend\n # on the input shape at all.\n if self._output_mode == TFIDF:\n # Create the TFIDF weight, but use a (None,) tensorshape. This creates\n # a 1D variable with arbitrary shape, which we can assign any weight to\n # so long as it has 1 dimension. In order to properly initialize this\n # weight in Keras, we need to provide a custom callable initializer which\n # does not depend on the shape of the weight (as all other initializers\n # do) since the weight is not known. Hence the lambda shape, dtype: [0].\n self._tf_idf_weights = self.add_weight(\n name=\"tfidf_data\",\n shape=tensor_shape.TensorShape((None,)),\n dtype=K.floatx(),\n trainable=False,\n initializer=lambda shape, dtype: [0])\n\n # These are V1/V2 shim points. There are V1 implementations in the V1 class.\n def _get_table_data(self):\n keys, values = self._table.export()\n return (keys.numpy(), values.numpy())\n\n def _get_table_size(self):\n return self._table.size().numpy()\n\n def _clear_table(self):\n keys, _ = self._table.export()\n self._table.remove(keys)\n self._has_vocab = False\n\n def _insert_table_data(self, keys, values):\n if len(values) != len(keys):\n raise RuntimeError(\"Size mismatch between values and key arrays. \"\n \"Keys had size %s, values had size %s.\" %\n (len(keys), len(values)))\n self._table.insert(keys, values)\n self._has_vocab = True\n\n def _to_numpy(self, preprocessed_data):\n \"\"\"Converts preprocessed inputs into numpy arrays.\"\"\"\n if isinstance(preprocessed_data, np.ndarray):\n return preprocessed_data\n return np.array(preprocessed_data.to_list())\n # End of V1/V2 shim points.\n\n def _assert_same_type(self, expected_type, values, value_name):\n if dtypes.as_dtype(expected_type) != dtypes.as_dtype(values.dtype):\n raise RuntimeError(\"Expected %s type %s, got %s\" %\n (value_name, expected_type, values.dtype))\n\n def _convert_to_ndarray(self, x):\n return np.array(x) if isinstance(x, (list, tuple)) else x\n\n def compute_output_shape(self, input_shape):\n if self._output_mode != INT:\n return tensor_shape.TensorShape([input_shape[0], self._max_tokens])\n\n if self._output_mode == INT and self._split is None:\n return input_shape\n\n if self._output_mode == INT and self._split is not None:\n input_shape = list(input_shape)\n input_shape[1] = self._output_sequence_length\n return tensor_shape.TensorShape(input_shape)\n\n def compute_output_signature(self, input_spec):\n output_shape = self.compute_output_shape(input_spec.shape.as_list())\n output_dtype = K.floatx() if self._output_mode == TFIDF else dtypes.int64\n return tensor_spec.TensorSpec(shape=output_shape, dtype=output_dtype)\n\n def adapt(self, data, reset_state=True):\n \"\"\"Fits the state of the preprocessing layer to the dataset.\n\n Overrides the default adapt method to apply relevant preprocessing to the\n inputs before passing to the combiner.\n\n Arguments:\n data: The data to train on. It can be passed either as a tf.data Dataset,\n or as a numpy array.\n reset_state: Optional argument specifying whether to clear the state of\n the layer at the start of the call to `adapt`. This must be True for\n this layer, which does not support repeated calls to `adapt`.\n \"\"\"\n if not reset_state:\n raise ValueError(\"TextVectorization does not support streaming adapts.\")\n\n # Build the layer explicitly with the original data shape instead of relying\n # on an implicit call to `build` in the base layer's `adapt`, since\n # preprocessing changes the input shape.\n if isinstance(data, np.ndarray):\n if data.ndim == 1:\n data = np.expand_dims(data, axis=-1)\n self.build(data.shape)\n preprocessed_inputs = self._to_numpy(self._preprocess(data))\n elif isinstance(data, dataset_ops.DatasetV2):\n # TODO(momernick): Replace this with a more V2-friendly API.\n shape = dataset_ops.get_legacy_output_shapes(data)\n if not isinstance(shape, tensor_shape.TensorShape):\n raise ValueError(\"The dataset passed to 'adapt' must contain a single \"\n \"tensor value.\")\n if shape.rank == 1:\n data = data.map(lambda tensor: array_ops.expand_dims(tensor, -1))\n self.build(dataset_ops.get_legacy_output_shapes(data))\n preprocessed_inputs = data.map(self._preprocess)\n else:\n raise ValueError(\n \"adapt() requires a Dataset or a Numpy array as input, got {}\".format(\n type(data)))\n super(TextVectorization, self).adapt(preprocessed_inputs, reset_state)\n\n def get_vocabulary(self):\n if not self._has_vocab:\n return []\n\n keys, values = self._get_table_data()\n # This is required because the MutableHashTable doesn't preserve insertion\n # order, but we rely on the order of the array to assign indices.\n return [x for _, x in sorted(zip(values, keys))]\n\n def get_config(self):\n config = {\n \"max_tokens\": self._max_tokens,\n \"standardize\": self._standardize,\n \"split\": self._split,\n \"ngrams\": self._ngrams_arg,\n \"output_mode\": self._output_mode,\n \"output_sequence_length\": self._output_sequence_length,\n \"pad_to_max_tokens\": self._pad_to_max,\n }\n base_config = super(TextVectorization, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n def set_vocabulary(self,\n vocab,\n df_data=None,\n oov_df_value=None,\n append=False):\n \"\"\"Sets vocabulary (and optionally document frequency) data for this layer.\n\n This method sets the vocabulary and DF data for this layer directly, instead\n of analyzing a dataset through 'adapt'. It should be used whenever the vocab\n (and optionally document frequency) information is already known. If\n vocabulary data is already present in the layer, this method will either\n replace it, if 'append' is set to False, or append to it (if 'append' is set\n to True).\n\n Arguments:\n vocab: An array of string tokens.\n df_data: An array of document frequency data. Only necessary if the layer\n output_mode is TFIDF.\n oov_df_value: The document frequency of the OOV token. Only necessary if\n output_mode is TFIDF. OOV data is optional when appending additional\n data in TFIDF mode; if an OOV value is supplied it will overwrite the\n existing OOV value.\n append: Whether to overwrite or append any existing vocabulary data.\n\n Raises:\n ValueError: If there are too many inputs, the inputs do not match, or\n input data is missing.\n \"\"\"\n total_vocab_size = len(vocab) + (self._get_table_size() if append else 0)\n if self._max_tokens is not None and total_vocab_size > self._max_vocab_size:\n raise ValueError(\n \"Attempted to set a vocabulary larger than the maximum vocab size. \"\n \"Passed vocab size is %s, max vocab size is %s. Note that the OOV \"\n \"token is automatically added to the number of tokens.\" %\n (total_vocab_size, self._max_vocab_size))\n\n # We're only _really_ appending if the table_size is nonzero. This is\n # important for some sanity checks in tfidf mode (specifically, checking if\n # oov_df_value is set or not) and handling existing tfidf weight data.\n append = append if self._get_table_size() > 0 else False\n\n if self._output_mode == TFIDF:\n if df_data is None:\n raise ValueError(\"df_data must be set if output_mode is TFIDF\")\n if len(vocab) != len(df_data):\n raise ValueError(\"df_data must be the same length as vocab. \"\n \"len(df_data) is %s, len(vocab) is %s\" %\n (len(vocab), len(df_data)))\n if not append and oov_df_value is None:\n raise ValueError(\"You must pass an oov_df_value the first time \"\n \"'set_vocabulary' is called when output_mode is \"\n \"TFIDF.\")\n else:\n if df_data is not None:\n raise ValueError(\"df_data should only be set if output_mode is TFIDF. \"\n \"output_mode is %s.\" % self._output_mode)\n\n start_index = self._reserved_values + (\n self._get_table_size() if append else 0)\n values = np.arange(start_index, len(vocab) + start_index, dtype=np.int64)\n\n vocab = self._convert_to_ndarray(vocab)\n self._assert_same_type(dtypes.string, vocab, \"vocab\")\n\n values = self._convert_to_ndarray(values)\n self._assert_same_type(dtypes.int64, values, \"values\")\n\n if self._output_mode == TFIDF:\n df_data = self._convert_to_ndarray(df_data)\n if append:\n # The existing IDF data is stored in a Keras weight, so we can get it\n # by calling K.get_value() on the weight object. Take the first\n # table_size+1 values in case we're padding the weight with zeros.\n existing_df_data = K.get_value(\n self._tf_idf_weights)[:self._get_table_size() + 1]\n df_data = np.append(existing_df_data, df_data, axis=0)\n # If we are appending and need to replace the OOV DF value, we can just\n # assign it over the existing OOV DF value at index 0 of the (already-\n # concatenated) DF value array.\n if oov_df_value is not None:\n df_data[0] = oov_df_value\n else:\n # If we are not appending (that is, we have only new data) we need to\n # insert the OOV value to the front of the array. (This is an append to\n # the head, not a replacement of the zeroth value.)\n if not isinstance(oov_df_value, np.ndarray):\n oov_df_value = np.array([oov_df_value])\n df_data = np.insert(df_data, 0, oov_df_value)\n\n if self._pad_to_max:\n padding_size = self._max_tokens - len(df_data)\n df_data = np.pad(df_data, (0, padding_size), \"constant\")\n\n # As above, we're using the fact that df_data is a Keras weight to\n # simplify storing the value back into the TF variable.\n K.set_value(self._tf_idf_weights, df_data)\n\n if not append and self._has_vocab:\n self._clear_table()\n\n self._insert_table_data(vocab, values)\n\n def build(self, input_shape):\n # We have to use 'and not ==' here, because input_shape[1] !/== 1 can result\n # in None for undefined shape axes. If using 'and !=', this causes the\n # expression to evaluate to False instead of True if the shape is undefined;\n # the expression needs to evaluate to True in that case.\n if self._split is not None and not input_shape[1] == 1: # pylint: disable=g-comparison-negation\n raise RuntimeError(\n \"When using TextVectorization to tokenize strings, the first \"\n \"dimension of the input array must be 1, got shape \"\n \"{}\".format(input_shape))\n\n self._final_vocab_size = self._get_table_size()\n super(TextVectorization, self).build(input_shape)\n\n def _set_state_variables(self, updates):\n if not self.built:\n raise RuntimeError(\"_set_state_variables() must be called after build().\")\n if self._output_mode == TFIDF:\n self.set_vocabulary(updates[_VOCAB_NAME], updates[_IDF_NAME],\n updates[_OOV_IDF_NAME])\n else:\n self.set_vocabulary(updates[_VOCAB_NAME])\n\n def _preprocess(self, inputs):\n if self._standardize is LOWER_AND_STRIP_PUNCTUATION:\n lowercase_inputs = gen_string_ops.string_lower(inputs)\n inputs = string_ops.regex_replace(lowercase_inputs, DEFAULT_STRIP_REGEX,\n \"\")\n elif callable(self._standardize):\n inputs = self._standardize(inputs)\n elif self._standardize is not None:\n raise ValueError((\"%s is not a supported standardization. \"\n \"TextVectorization supports the following options \"\n \"for `standardize`: None, \"\n \"'lower_and_strip_punctuation', or a \"\n \"Callable.\") % self._standardize)\n\n if self._split is not None:\n # If we are splitting, we validate that the 1st axis is of dimension 1 and\n # so can be squeezed out. We do this here instead of after splitting for\n # performance reasons - it's more expensive to squeeze a ragged tensor.\n inputs = array_ops.squeeze(inputs, axis=1)\n if self._split is SPLIT_ON_WHITESPACE:\n # This treats multiple whitespaces as one whitespace, and strips leading\n # and trailing whitespace.\n inputs = ragged_string_ops.string_split_v2(inputs)\n elif callable(self._split):\n inputs = self._split(inputs)\n else:\n raise ValueError(\n (\"%s is not a supported splitting.\"\n \"TextVectorization supports the following options \"\n \"for `split`: None, 'whitespace', or a Callable.\") % self._split)\n\n # Note that 'inputs' here can be either ragged or dense depending on the\n # configuration choices for this Layer. The strings.ngrams op, however, does\n # support both ragged and dense inputs.\n if self._ngrams is not None:\n inputs = ragged_string_ops.ngrams(\n inputs, ngram_width=self._ngrams, separator=\" \")\n\n return inputs\n\n def call(self, inputs):\n inputs = self._preprocess(inputs)\n\n # If we're not doing any output processing, return right away.\n if self._output_mode is None:\n return inputs\n\n # The table lookup ops don't natively support ragged tensors, so if we have\n # a RT we need to use map_flat_values to look up every element.\n if ragged_tensor.is_ragged(inputs):\n indexed_data = ragged_functional_ops.map_flat_values(\n self._table.lookup, inputs)\n else:\n indexed_data = self._table.lookup(inputs)\n\n if self._output_mode == INT:\n # Once we have the dense tensor, we can return it if we weren't given a\n # fixed output sequence length. If we were, though, we have to dynamically\n # choose whether to pad or trim it based on each tensor.\n\n # We need to convert to dense if we have a ragged tensor.\n if ragged_tensor.is_ragged(indexed_data):\n dense_data = indexed_data.to_tensor(default_value=0)\n else:\n dense_data = indexed_data\n\n if self._output_sequence_length is None:\n dense_data.set_shape(tensor_shape.TensorShape((None, None)))\n return dense_data\n else:\n sequence_len = K.shape(dense_data)[1]\n pad_amt = self._output_sequence_length - sequence_len\n pad_fn = lambda: array_ops.pad(dense_data, [[0, 0], [0, pad_amt]])\n slice_fn = lambda: dense_data[:, :self._output_sequence_length]\n output_tensor = control_flow_ops.cond(\n sequence_len < self._output_sequence_length,\n true_fn=pad_fn,\n false_fn=slice_fn)\n output_tensor.set_shape(\n tensor_shape.TensorShape((None, self._output_sequence_length)))\n return output_tensor\n\n out_depth = self._max_tokens if self._pad_to_max else (\n self._final_vocab_size + self._reserved_values)\n\n if self._output_mode == BINARY:\n bool_one_hot_data = array_ops.one_hot(\n indexed_data, depth=out_depth, on_value=True, off_value=False)\n reduced_bool_data = math_ops.reduce_any(bool_one_hot_data, axis=1)\n binary_data = math_ops.cast(reduced_bool_data, dtypes.int64)\n binary_data.set_shape(tensor_shape.TensorShape((None, out_depth)))\n return binary_data\n\n one_hot_data = array_ops.one_hot(indexed_data, depth=out_depth)\n counts = math_ops.reduce_sum(one_hot_data, axis=1)\n if self._output_mode == COUNT:\n count_data = math_ops.cast(counts, dtypes.int64)\n count_data.set_shape(tensor_shape.TensorShape((None, out_depth)))\n return count_data\n\n tf_idf_data = math_ops.multiply(counts, self._tf_idf_weights)\n tf_idf_data.set_shape(tensor_shape.TensorShape((None, out_depth)))\n if self._output_mode == TFIDF:\n return tf_idf_data\n\n # We can only get here if we didn't recognize the passed mode.\n raise ValueError(\"Unknown output mode %s\" % self._output_mode)\n\n\ndef _validate_string_arg(input_data,\n allowable_strings,\n arg_name,\n allow_none=True,\n allow_callables=True):\n \"\"\"Validates the correctness of a string-based arg for VectorizeText.\"\"\"\n if allow_none and input_data is None:\n return\n elif allow_callables and callable(input_data):\n return\n elif isinstance(input_data,\n six.string_types) and input_data in allowable_strings:\n return\n else:\n allowed_args = \"`None`, \" if allow_none else \"\"\n allowed_args += \"a `Callable`, \" if allow_callables else \"\"\n allowed_args += \"or one of the following values: %s\" % allowable_strings\n raise ValueError(\n (\"VectorizeText's %s arg received an invalid value %s. \" +\n \"Allowed values are %s.\") % (arg_name, input_data, allowed_args))\n\n\nclass _TextVectorizationCombiner(Combiner):\n \"\"\"Combiner for the TextVectorization preprocessing layer.\n\n This class encapsulates the logic for computing a vocabulary based on the\n frequency of each token.\n\n Attributes:\n vocab_size: (Optional) If set, only the top `vocab_size` tokens (based on\n frequency across the dataset) are retained in the vocabulary. If None, or\n set to a value greater than the total number of distinct tokens in the\n dataset, all tokens are retained.\n \"\"\"\n\n def __init__(self, vocab_size=None, compute_idf=False):\n self._vocab_size = vocab_size\n self._compute_idf = compute_idf\n self._input_dtype = dtypes.string\n\n def compute(self, values, accumulator=None):\n \"\"\"Compute a step in this computation, returning a new accumulator.\"\"\"\n if dtypes.as_dtype(self._input_dtype) != dtypes.as_dtype(values.dtype):\n raise RuntimeError(\"Expected input type %s, got %s\" %\n (self._input_dtype, values.dtype))\n if ragged_tensor.is_ragged(values):\n values = values.to_list()\n if isinstance(values, ops.EagerTensor):\n values = values.numpy()\n if isinstance(values, np.ndarray):\n values = values.tolist()\n\n if accumulator is None:\n accumulator = self._create_accumulator()\n\n # TODO(momernick): Benchmark improvements to this algorithm.\n for document in values:\n current_doc_id = accumulator.metadata[0]\n for token in document:\n accumulator.count_dict[token] += 1\n if self._compute_idf:\n doc_count = accumulator.per_doc_count_dict[token]\n if doc_count[\"last_doc_id\"] != current_doc_id:\n doc_count[\"count\"] += 1\n doc_count[\"last_doc_id\"] = current_doc_id\n accumulator.metadata[0] += 1\n\n return accumulator\n\n def merge(self, accumulators):\n \"\"\"Merge several accumulators to a single accumulator.\"\"\"\n if not accumulators:\n return accumulators\n\n base_accumulator = accumulators[0]\n\n for accumulator in accumulators[1:]:\n base_accumulator.metadata[0] += accumulator.metadata[0]\n for token, value in accumulator.count_dict.items():\n base_accumulator.count_dict[token] += value\n if self._compute_idf:\n for token, value in accumulator.per_doc_count_dict.items():\n # Any newly created token counts in 'base_accumulator''s\n # per_doc_count_dict will have a last_doc_id of -1. This is always\n # less than the next doc id (which are strictly positive), so any\n # future occurences are guaranteed to be counted.\n base_accumulator.per_doc_count_dict[token][\"count\"] += value[\"count\"]\n\n return base_accumulator\n\n def _inverse_document_frequency(self, document_counts, num_documents):\n \"\"\"Compute the inverse-document-frequency (IDF) component of TFIDF.\n\n Uses the default weighting scheme described in\n https://en.wikipedia.org/wiki/Tf%E2%80%93idf.\n\n Args:\n document_counts: An array of the # of documents each token appears in.\n num_documents: An int representing the total number of documents\n\n Returns:\n An array of \"inverse document frequency\" weights.\n \"\"\"\n return np.log(1 + num_documents / (1 + np.array(document_counts)))\n\n def extract(self, accumulator):\n \"\"\"Convert an accumulator into a dict of output values.\n\n Args:\n accumulator: An accumulator aggregating over the full dataset.\n\n Returns:\n A dict of:\n \"vocab\": A list of the retained items in the vocabulary.\n \"idf\": The inverse-document-frequency for each item in vocab.\n idf[vocab_idx] is the IDF value for the corresponding vocab item.\n \"oov_idf\": The inverse-document-frequency for the OOV token.\n \"\"\"\n if self._compute_idf:\n vocab_counts, document_counts, num_documents = accumulator\n else:\n vocab_counts, _, _ = accumulator\n\n sorted_counts = sorted(\n vocab_counts.items(), key=operator.itemgetter(1, 0), reverse=True)\n vocab_data = (\n sorted_counts[:self._vocab_size] if self._vocab_size else sorted_counts)\n vocab = [data[0] for data in vocab_data]\n\n if self._compute_idf:\n doc_counts = [document_counts[token][\"count\"] for token in vocab]\n idf = self._inverse_document_frequency(doc_counts, num_documents[0])\n oov_idf = np.array([np.log(1 + num_documents[0])])\n return {_VOCAB_NAME: vocab, _IDF_NAME: idf, _OOV_IDF_NAME: oov_idf}\n else:\n return {_VOCAB_NAME: vocab}\n\n def restore(self, output):\n \"\"\"Create an accumulator based on 'output'.\"\"\"\n raise NotImplementedError(\n \"TextVectorization does not restore or support streaming updates.\")\n\n def serialize(self, accumulator):\n \"\"\"Serialize an accumulator for a remote call.\"\"\"\n output_dict = {}\n output_dict[\"metadata\"] = accumulator.metadata\n output_dict[\"vocab\"] = list(accumulator.count_dict.keys())\n output_dict[\"vocab_counts\"] = list(accumulator.count_dict.values())\n if self._compute_idf:\n output_dict[\"idf_vocab\"] = list(accumulator.per_doc_count_dict.keys())\n output_dict[\"idf_counts\"] = [\n counter[\"count\"]\n for counter in accumulator.per_doc_count_dict.values()\n ]\n return compat.as_bytes(json.dumps(output_dict))\n\n def deserialize(self, encoded_accumulator):\n \"\"\"Deserialize an accumulator received from 'serialize()'.\"\"\"\n accumulator_dict = json.loads(compat.as_text(encoded_accumulator))\n\n accumulator = self._create_accumulator()\n accumulator.metadata[0] = accumulator_dict[\"metadata\"][0]\n\n count_dict = dict(\n zip(accumulator_dict[\"vocab\"], accumulator_dict[\"vocab_counts\"]))\n accumulator.count_dict.update(count_dict)\n\n if self._compute_idf:\n create_dict = lambda x: {\"count\": x, \"last_doc_id\": -1}\n idf_count_dicts = [\n create_dict(count) for count in accumulator_dict[\"idf_counts\"]\n ]\n idf_dict = dict(zip(accumulator_dict[\"idf_vocab\"], idf_count_dicts))\n accumulator.per_doc_count_dict.update(idf_dict)\n\n return accumulator\n\n def _create_accumulator(self):\n \"\"\"Accumulate a sorted array of vocab tokens and corresponding counts.\"\"\"\n accumulator = collections.namedtuple(\n \"Accumulator\", [\"count_dict\", \"per_doc_count_dict\", \"metadata\"])\n\n count_dict = collections.defaultdict(int)\n if self._compute_idf:\n create_default_dict = lambda: {\"count\": 0, \"last_doc_id\": -1}\n per_doc_count_dict = collections.defaultdict(create_default_dict)\n else:\n per_doc_count_dict = None\n metadata = [0]\n return accumulator(count_dict, per_doc_count_dict, metadata)\n" ]
[ [ "tensorflow.python.ops.ragged.ragged_string_ops.string_split_v2", "tensorflow.python.ops.math_ops.reduce_sum", "tensorflow.python.ops.array_ops.expand_dims", "tensorflow.python.ops.gen_string_ops.string_lower", "tensorflow.python.ops.math_ops.cast", "tensorflow.python.util.compat.as_text", "tensorflow.python.ops.ragged.ragged_functional_ops.map_flat_values", "numpy.log", "tensorflow.python.ops.string_ops.regex_replace", "numpy.append", "tensorflow.python.ops.array_ops.pad", "numpy.expand_dims", "numpy.array", "numpy.pad", "tensorflow.python.ops.math_ops.reduce_any", "tensorflow.python.keras.backend.shape", "tensorflow.python.framework.dtypes.as_dtype", "tensorflow.python.ops.array_ops.squeeze", "tensorflow.python.ops.ragged.ragged_string_ops.ngrams", "tensorflow.python.data.ops.dataset_ops.get_legacy_output_shapes", "tensorflow.python.framework.tensor_spec.TensorSpec", "tensorflow.python.ops.lookup_ops.MutableHashTable", "tensorflow.python.keras.backend.set_value", "numpy.insert", "tensorflow.python.framework.tensor_shape.TensorShape", "tensorflow.python.ops.control_flow_ops.cond", "tensorflow.python.keras.backend.floatx", "tensorflow.python.ops.array_ops.one_hot", "tensorflow.python.keras.backend.get_value", "tensorflow.python.ops.math_ops.multiply", "tensorflow.python.ops.ragged.ragged_tensor.is_ragged" ] ]
tanimutomo/advex-uar
[ "251e0978dba7a5473c40e55fb350aa2144135120" ]
[ "advex_uar/examples/eval.py" ]
[ "import click\nimport importlib\nimport os\n\nimport numpy as np\nimport torch\n\nfrom advex_uar.eval import ImagenetEvaluator, ImagenetCEvaluator\nfrom advex_uar.eval import CIFAR10Evaluator, CIFAR10CEvaluator\nfrom advex_uar.common.pyt_common import *\nfrom advex_uar.common import FlagHolder\n\ndef get_ckpt(FLAGS):\n if FLAGS.ckpt_path is not None:\n print('Loading ckpt from {}'.format(FLAGS.ckpt_path))\n return torch.load(FLAGS.ckpt_path)\n elif FLAGS.use_wandb and FLAGS.wandb_run_id is not None:\n globals()['wandb'] = importlib.import_module('wandb')\n print('Loading ckpt from wandb run id {}'.format(FLAGS.wandb_run_id))\n api = wandb.Api()\n run = api.run(\"{}/{}/{}\".format(\n FLAGS.wandb_username, FLAGS.wandb_ckpt_project, FLAGS.wandb_run_id))\n ckpt_file = run.file(\"ckpt.pth\")\n ckpt_file.download(replace=False)\n os.rename('ckpt.pth', os.path.join(wandb.run.dir, 'ckpt.pth'))\n return torch.load(os.path.join(wandb.run.dir, 'ckpt.pth'))\n else:\n raise ValueError('You must specify a wandb_run_id or a ckpt_path.')\n \ndef run(**flag_kwargs):\n FLAGS = FlagHolder()\n FLAGS.initialize(**flag_kwargs)\n if FLAGS.wandb_ckpt_project is None:\n FLAGS._dict['wandb_ckpt_project'] = FLAGS.wandb_project\n if FLAGS.step_size is None:\n FLAGS.step_size = get_step_size(FLAGS.epsilon, FLAGS.n_iters, FLAGS.use_max_step)\n FLAGS._dict['step_size'] = FLAGS.step_size\n FLAGS.summary()\n\n logger = init_logger(FLAGS.use_wandb, 'eval', FLAGS._dict)\n\n if FLAGS.dataset in ['cifar-10', 'cifar-10-c']:\n nb_classes = 10\n else:\n nb_classes = 1000 // FLAGS.class_downsample_factor\n\n model_dataset = FLAGS.dataset\n if model_dataset == 'imagenet-c':\n model_dataset = 'imagenet'\n print(FLAGS.resnet_size)\n model = get_model(model_dataset, FLAGS.resnet_size, nb_classes)\n ckpt = get_ckpt(FLAGS)\n model.load_state_dict(ckpt['model'])\n\n attack = get_attack(FLAGS.dataset, FLAGS.attack, FLAGS.epsilon,\n FLAGS.n_iters, FLAGS.step_size, False)\n\n if FLAGS.dataset == 'imagenet':\n Evaluator = ImagenetEvaluator\n elif FLAGS.dataset == 'imagenet-c':\n Evaluator = ImagenetCEvaluator\n elif FLAGS.dataset == 'cifar-10':\n Evaluator = CIFAR10Evaluator\n elif FLAGS.dataset == 'cifar-10-c':\n Evaluator = CIFAR10CEvaluator\n \n evaluator = Evaluator(model=model, attack=attack, dataset=FLAGS.dataset,\n dataset_path=FLAGS.dataset_path, nb_classes=nb_classes,\n corruption_type=FLAGS.corruption_type, corruption_name=FLAGS.corruption_name,\n corruption_level=FLAGS.corruption_level,\n batch_size=FLAGS.batch_size, stride=FLAGS.class_downsample_factor,\n fp_all_reduce=FLAGS.use_fp16, logger=logger, tag=FLAGS.tag)\n evaluator.evaluate()\n\[email protected]()\n# wandb options\[email protected](\"--use_wandb/--no_wandb\", is_flag=True, default=True)\[email protected](\"--wandb_project\", default=None, help=\"WandB project to log to\")\[email protected](\"--tag\", default='eval', help=\"Short tag for WandB\")\n\n# Dataset options\n# Allowed values: ['imagenet', 'imagenet-c', 'cifar-10', 'cifar-10-c']\[email protected](\"--dataset\", default='imagenet')\[email protected](\"--dataset_path\", default=None)\n\n# Model options\[email protected](\"--resnet_size\")\[email protected](\"--class_downsample_factor\", default=1, type=int)\n\n# checkpoint options; if --ckpt_path is None, assumes that ckpt is pulled from WandB\[email protected](\"--ckpt_path\", default=None, help=\"Path to the checkpoint for evaluation\")\[email protected](\"--wandb_username\", default=None, help=\"WandB username to pull ckpt from\")\[email protected](\"--wandb_ckpt_project\", default=None, help='WandB project to pull ckpt from')\[email protected](\"--wandb_run_id\", default=None,\n help='If --use_wandb is set, WandB run_id to pull ckpt from. Otherwise'\\\n 'a run_id which will be associated with --ckpt_path')\n\n# Evaluation options\[email protected](\"--use_fp16/--no_fp16\", is_flag=True, default=False)\[email protected](\"--batch_size\", default=128)\n\n# Options for ImageNet-C and CIFAR-10-C\[email protected](\"--corruption_type\", default=None)\[email protected](\"--corruption_name\", default=None)\[email protected](\"--corruption_level\", default=None)\n\n# Attack options\n# Allowed values: ['pgd_linf', 'pgd_l2', 'fw_l1', 'jpeg_linf', 'jpeg_l2', 'jpeg_l1', 'elastic', 'fog', 'gabor', 'snow']\[email protected](\"--attack\", default=None, type=str)\[email protected](\"--epsilon\", default=16.0, type=float)\[email protected](\"--step_size\", default=None, type=float)\[email protected](\"--use_max_step\", is_flag=True, default=False)\[email protected](\"--n_iters\", default=50, type=int)\n\ndef main(**flags):\n run(**flags)\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.load" ] ]
headupinclouds/pico
[ "45564fca95cbc8f598c78e3016ca21cffbe3d853" ]
[ "gen/sample/genki.py" ]
[ "#\n#\n#\n\n#\nimport sys\nimport random\nimport numpy\nfrom scipy import misc\nfrom PIL import Image\nfrom PIL import ImageOps\nimport struct\nimport argparse\nimport os\n\n#\nparser = argparse.ArgumentParser()\nparser.add_argument('src', help='GENKI source folder')\nargs = parser.parse_args()\n\n#\nsrc = args.src\n\n#\nplot = 0\n\nif plot:\n\timport matplotlib.pyplot\n\timport matplotlib.image\n\timport matplotlib.cm\n\n#\ndef write_rid(im):\n\t#\n\t# raw intensity data\n\t#\n\n\t#\n\th = im.shape[0]\n\tw = im.shape[1]\n\n\t#\n\thw = struct.pack('ii', h, w)\n\n\ttmp = [None]*w*h\n\tfor y in range(0, h):\n\t\tfor x in range(0, w):\n\t\t\ttmp[y*w + x] = im[y, x]\n\n\t#\n\tpixels = struct.pack('%sB' % w*h, *tmp)\n\n\t#\n\tsys.stdout.buffer.write(hw)\n\tsys.stdout.buffer.write(pixels)\n\n#\ndef export(im, r, c, s):\n\t#\n\tnrows = im.shape[0]\n\tncols = im.shape[1]\n\n\t# crop\n\tr0 = max(int(r - 0.75*s), 0); r1 = min(r + 0.75*s, nrows)\n\tc0 = max(int(c - 0.75*s), 0); c1 = min(c + 0.75*s, ncols)\n\n\tim = im[r0:r1, c0:c1]\n\n\tnrows = im.shape[0]\n\tncols = im.shape[1]\n\n\tr = r - r0\n\tc = c - c0\n\n\t# resize, if needed\n\tmaxwsize = 192.0\n\twsize = max(nrows, ncols)\n\n\tratio = maxwsize/wsize\n\n\tif ratio<1.0:\n\t\tim = numpy.asarray( Image.fromarray(im).resize((int(ratio*ncols), int(ratio*nrows))) )\n\n\t\tr = ratio*r\n\t\tc = ratio*c\n\t\ts = ratio*s\n\n\t#\n\tnrands = 7;\n\n\tlst = []\n\n\tfor i in range(0, nrands):\n\t\t#\n\t\tstmp = s*random.uniform(0.9, 1.1)\n\n\t\trtmp = r + s*random.uniform(-0.05, 0.05)\n\t\tctmp = c + s*random.uniform(-0.05, 0.05)\n\n\t\t#\n\t\tif plot:\n\t\t\tmatplotlib.pyplot.cla()\n\n\t\t\tmatplotlib.pyplot.plot([ctmp-stmp/2, ctmp+stmp/2], [rtmp-stmp/2, rtmp-stmp/2], 'b', linewidth=3)\n\t\t\tmatplotlib.pyplot.plot([ctmp+stmp/2, ctmp+stmp/2], [rtmp-stmp/2, rtmp+stmp/2], 'b', linewidth=3)\n\t\t\tmatplotlib.pyplot.plot([ctmp+stmp/2, ctmp-stmp/2], [rtmp+stmp/2, rtmp+stmp/2], 'b', linewidth=3)\n\t\t\tmatplotlib.pyplot.plot([ctmp-stmp/2, ctmp-stmp/2], [rtmp+stmp/2, rtmp-stmp/2], 'b', linewidth=3)\n\n\t\t\tmatplotlib.pyplot.imshow(im, cmap=matplotlib.cm.Greys_r)\n\n\t\t\tmatplotlib.pyplot.show()\n\n\t\tlst.append( (int(rtmp), int(ctmp), int(stmp)) )\n\n\t#\n\twrite_rid(im)\n\n\tsys.stdout.buffer.write( struct.pack('i', nrands) )\n\n\tfor i in range(0, nrands):\n\t\tsys.stdout.buffer.write( struct.pack('iii', lst[i][0], lst[i][1], lst[i][2]) )\n\ndef mirror_and_export(im, r, c, s):\n\t#\n\t# exploit mirror symmetry of the face\n\t#\n\n\t# flip image\n\tim = numpy.asarray(ImageOps.mirror(Image.fromarray(im)))\n\n\t# flip column coordinate of the object\n\tc = im.shape[1] - c\n\n\t# export\n\texport(im, r, c, s)\n\n# image list\nimlist = open(src + '/Subsets/GENKI-SZSL/GENKI-SZSL_Images.txt', 'r').readlines()\n\n# object sample is specified by three coordinates (row, column and size; all in pixels)\nrs = [float(line.split()[1]) for line in open(src+'/Subsets/GENKI-SZSL/GENKI-SZSL_labels.txt', 'r').readlines()]\ncs = [float(line.split()[0]) for line in open(src+'/Subsets/GENKI-SZSL/GENKI-SZSL_labels.txt', 'r').readlines()]\nss = [float(line.split()[2]) for line in open(src+'/Subsets/GENKI-SZSL/GENKI-SZSL_labels.txt', 'r').readlines()]\n\n#\nn = 0\n\nfor i in range(0, len(rs)):\n\t# construct full image path\n\tpath = src + '/files/' + imlist[i].strip()\n\n\tr = rs[i]\n\tc = cs[i]\n\ts = ss[i]\n\n\t#\n\ttry:\n\t\tim = Image.open(path).convert('L')\n\texcept:\n\t\tcontinue\n\n\t#\n\tim = numpy.asarray(im)\n\n\t#\n\texport(im, r, c, s)\n\n\t# faces are symmetric and we exploit this here\n\tmirror_and_export(im, r, c, s)\n\n" ]
[ [ "numpy.asarray" ] ]
YantingWan/cloudmesh-analytics
[ "00fc978e8bb720f321caa080995085f826fff5ce" ]
[ "cloudmesh/analytics/server/cloudmesh/analytics.py" ]
[ "import os\nfrom flask import jsonify, current_app\nimport numpy as np\nfrom sklearn.linear_model import LinearRegression\nfrom werkzeug.utils import secure_filename\nfrom cloudmesh.file_helpers import *\n\ndef linear_regression(file_name, body):\n \"\"\"\n Linear regression operation on two dimension data. The input format should be a list of pairs, e.g. [[1, 2], [3, 4]...]\n :param file_name: the data source\n :param body: the request body\n :return:\n \"\"\"\n # Extract parameters from the request body\n paras = body['paras']\n #TODO: Data format is not correct\n try:\n data = read_csv(file_name)\n X = data[:,0]\n Y = data[:,1]\n reg = LinearRegression(**paras).fit(X,Y)\n return jsonify({'coefficient':reg.coef_ })\n except Exception as e:\n return jsonify({'error_message': str(e)})\n\ndef pca():\n return jsonify({\"output\": 'run_pca_success'})\n\n" ]
[ [ "sklearn.linear_model.LinearRegression" ] ]
vfdev-5/fairscale
[ "b75a5e266d0d7953186a59feff8d808af4e0bf82" ]
[ "fairscale/optim/oss.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n#\n# This source code is licensed under the BSD license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom collections import OrderedDict\nimport copy\nfrom itertools import chain\nimport logging\nfrom math import inf\nfrom typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Type, Union\n\nimport torch\nimport torch.distributed as dist\nfrom torch.nn import Parameter\nfrom torch.optim import SGD, Optimizer\n\nfrom .utils import broadcast_object, calc_grad_norm, recursive_copy_to_device\n\n__all__ = [\"OSS\"]\n\nif TYPE_CHECKING: # pragma: no cover\n from torch.optim.optimizer import _params_t\nelse:\n _params_t = Any\n\n\nclass OSS(Optimizer):\n \"\"\"Wraps an arbitrary :class:`optim.Optimizer <torch.optim.Optimizer>`\n optimizer and shards its state as described by ZeRO_.\n ::\n\n opt = OSS(params, optim=torch.optim.Adam, lr=0.01)\n\n .. _ZeRO: https://arxiv.org/abs/1910.02054\n\n We use a greedy algorithm to pack a number of parameters\n at each rank. Each parameter belongs to a single rank and\n is not divided among rank.\n\n After each rank completed their parameter update, they broadcast\n the new version of the parameters to all other ranks to synchronize\n the parameters for next round forward/backward computation.\n\n Args:\n params (list of tensors):\n parameters to be optimized\n Keyword Args:\n optim (torch.nn.Optimizer):\n optimizer to shard (default: SGD)\n group (group):\n torch.distributed group (default: group.WORLD)\n broadcast_buffer_size (int):\n (deprecated) used to cap the size of the broadcast buffers, not being used anymore.\n\n\n .. warning: the communication patterns that OSS use depend on the \"trainability\" graph,\n meaning that all the parameters which `require_grad` are handled differently. This is\n not reevaluated at every step, please use `refresh_trainable()` if your model changed\n (freeze or unfreeze for instance).\n If used with :class:<fairscale.nn.ShardedDDP> then an automatic change detection is possible,\n via the `auto_refresh_trainable` parameter.\n \"\"\"\n\n #: The optimizer used for a given shard\n optim: Optimizer\n\n in_super_constructor: bool\n\n def __init__(\n self,\n params: _params_t,\n optim: Type[Optimizer] = SGD,\n group: Optional[Any] = None,\n broadcast_buffer_size: int = -1,\n **default: Any,\n ):\n\n # Hold all the model params in the root .param_groups\n self.in_super_constructor = True\n super().__init__(params, default)\n self.in_super_constructor = False\n\n # Partition information. lazy evaluation, computed when requested\n self._per_device_params: Dict[torch.device, List[List[Parameter]]] = OrderedDict() # device, rank, params\n self._param_rank: Dict[torch.Tensor, int] = {}\n self._partition_parameters: List[List[dict]] = []\n self._index_to_param: Dict[int, torch.Tensor] = {}\n self._param_to_index: Dict[int, int] = {}\n self._local_params: Optional[List[torch.Tensor]] = None\n\n # Default empty values + immutables\n self._optim_defaults = default\n self._optim_constructor = optim\n\n self.group = group if group is not None else dist.group.WORLD\n self.world_size = dist.get_world_size(self.group)\n self.rank = dist.get_rank(self.group)\n self.global_rank = self.get_global_rank(self.group, self.rank)\n self.buckets: Dict[torch.device, List[torch.Tensor]] = {}\n\n self._all_states: List[Dict[str, Any]] = [] # Optional consolidated optimizer state\n self._default_device = torch.device(\"cpu\")\n\n # Setup everything which is related to the parameters to be trained\n # (partition and optimizer for the shard)\n self.refresh_trainable()\n\n # Partition helpers\n def partition_parameters(self) -> List[List[dict]]:\n \"\"\"Partitions parameters across distributed data parallel ranks.\n\n Returns a list of param_groups (which is a list of dict) where each\n element of the list contains the param_groups for a rank. Element 0\n corresponds to rank 0, etc. We need all the ranks for the broadcast\n inside step().\n \"\"\"\n if len(self._partition_parameters) == 0:\n self._partition_parameters = [list() for _ in range(self.world_size)]\n sizes = [0] * self.world_size\n for param_group in self.param_groups:\n param_lists: List[List] = [list() for _ in range(self.world_size)]\n for param in param_group[\"params\"]:\n # Add this param to rank with smallest size.\n rank = sizes.index(min(sizes))\n param_lists[rank].append(param)\n\n # We're partitioning the optimizer state,\n # so trainable parameters are the ones which really count\n if param.requires_grad:\n sizes[rank] += param.numel()\n else:\n # Spread frozen params on a per-tensor basis\n # Mostly useful for balance partitions for fine tuning for instance\n # Not required strictly speaking\n sizes[rank] += 1\n\n for rank, params in enumerate(param_lists):\n param_group_rank = copy.copy(param_group)\n param_group_rank[\"params\"] = params\n self._partition_parameters[rank].append(param_group_rank)\n\n assert min(sum(len(pg[\"params\"]) for pg in partition) for partition in self._partition_parameters) > 0, (\n \"One or more empty shards detected, the world size is too big or the model too small.\\n\"\n + \"Please reduce your world size if this is the model you would like to train\\n\"\n + f\"Current world size: {self.world_size}\\n\"\n + \"Current number of parameters: {}\".format(sum(len(pg[\"params\"]) for pg in self.param_groups))\n )\n\n return self._partition_parameters\n\n @property\n def local_params(self) -> List[torch.Tensor]:\n \"\"\" Iterable which goes through the parameters that this rank owns\n \"\"\"\n if self._local_params is None:\n self._local_params = list(\n chain(\n *[\n list(filter(lambda x: x.grad is not None, device_params[self.rank]))\n for device_params in self.per_device_params.values()\n ]\n )\n )\n\n # Make sure that the iterator is not consumed, only expose a copy\n return self._local_params\n\n @property\n def index_to_param(self) -> Dict[int, torch.Tensor]:\n \"\"\" Hash table in between parameter indices in the global optimizer scheme, and the actual params\n \"\"\"\n if len(self._index_to_param) == 0:\n self._index_to_param = {i: p for i, p in enumerate(chain(*(g[\"params\"] for g in self.param_groups)))}\n\n return self._index_to_param\n\n @property\n def param_to_index(self) -> Dict[int, int]:\n \"\"\" Hash table in between parameter indices in the global optimizer scheme, and the actual params\n \"\"\"\n if len(self._param_to_index) == 0:\n self._param_to_index = {id(p): i for i, p in enumerate(chain(*(g[\"params\"] for g in self.param_groups)))}\n\n return self._param_to_index\n\n @property\n def per_device_params(self) -> Dict[torch.device, List[List[Parameter]]]:\n \"\"\"Sorted list of all the params, first per device then per rank.\n\n Within a list params are sorted per number of elements to allow for an easy bucketing.\n \"\"\"\n if len(self._per_device_params) == 0:\n # Go through all params, log them per device\n # The ordering is important here, needs to be the same on all ranks\n # So that ulterior broadcast calls are matching\n for param_group in self.param_groups:\n for param in param_group[\"params\"]:\n device = param.device\n if self._per_device_params.get(device) is None:\n self._per_device_params[device] = [[] for _ in range(self.world_size)]\n self._per_device_params[device][self.param_to_rank[param]] += [param]\n\n # Sort param_lists by size\n for device in self._per_device_params.keys():\n for rank_params in self._per_device_params[device]:\n rank_params.sort(key=lambda x: x.numel())\n\n return self._per_device_params\n\n @property\n def param_to_rank(self) -> Dict[torch.Tensor, int]:\n \"\"\"param to data parallel rank\"\"\"\n if len(self._param_rank) == 0:\n for rank, param_groups in enumerate(self.partition_parameters()):\n for param_group in param_groups:\n for param in param_group[\"params\"]:\n self._param_rank[param] = rank\n\n logging.debug(\"ZeRO: Parameters dispatched to ranks %s \" % list(self._param_rank.values()))\n\n return self._param_rank\n\n # NOTE(msb) We add a kwargs in order to support Optimizer sub-classes that support extra kwargs.\n # For example, the apex library contains fused optimizers with a step that supports extra kwargs.\n def step(self, closure: Optional[Callable[[], float]] = None, **kwargs: Any) -> Optional[float]:\n \"\"\"Performs a single optimization step (parameter update).\n\n Arguments:\n closure (callable): A closure that reevaluates the model and\n returns the loss. Optional for most optimizers.\n\n .. note: Any extra parameter is passed to the base optimizer as-is\"\"\"\n\n # Sync oss param_groups attributes in case they've been updated by a scheduler.\n OSS._sync_param_groups(self.param_groups, self.optim.param_groups)\n\n # Run the optimizer step on this shard only:\n if closure is not None:\n loss = self.optim.step(closure=closure, **kwargs) # type: ignore\n else:\n loss = self.optim.step(**kwargs)\n\n # Sync all the updated shards in between the ranks\n self._broadcast_params()\n\n # Sync hypothethical new results from the wrapped optimizer to the exposed param_groups\n OSS._sync_param_groups(self.optim.param_groups, self.param_groups)\n\n return loss\n\n def clip_grad_norm(\n self,\n max_norm: Union[float, int],\n norm_type: Union[float, int] = 2.0,\n filter_params_fn: Callable[[Any], Any] = None,\n ) -> torch.Tensor:\n \"\"\"\n Clip all gradients at this point in time. The norm is computed over all gradients together, as if they were\n concatenated into a single vector. Gradients are modified in-place.\n\n Arguments:\n max_norm (float or int): max norm of the gradients\n norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for infinity norm.\n\n Returns:\n Total norm of the parameters (viewed as a single vector).\n\n .. note: This is analogous to `torch.nn.utils.clip_grad_norm_` but handles the partitioning and multiple devices per rank\n under the hood. The default torch util is not applicable here, because each rank only has a partial view of all the grads\n in the model, so calling it in the OSS context would lead to different scaling being applied per subset of model parameters\n\n .. warning: This needs to be called on all ranks, since synchronization primitives will be used\n\n \"\"\"\n\n # Compute the max norm for this shards's worth of gradients\n max_norm = float(max_norm)\n norm_type = float(norm_type)\n\n # Option to filter parameters from the grad_norm calculation. This is useful for model parallelism.\n # To avoid double counting, only consider parameters on rank zero + anything marked 'model_parallel'\n # 'model_parallel' flag is set in Megatron-LM:\n # https://github.com/NVIDIA/Megatron-LM/blob/19301985dd31c8b612095cbad15bd903e8ddd497/megatron/mpu/layers.py#L54\n local_params = filter_params_fn(self.local_params) if filter_params_fn is not None else self.local_params\n\n local_norm = calc_grad_norm(local_params, norm_type).to(self._default_device)\n # Compute the norm on this grad set,\n # then sync all the norms from all ranks\n if norm_type == inf:\n total_norm = local_norm\n # all reduce over data parallel and model parallel workers\n dist.all_reduce(total_norm, op=torch.distributed.ReduceOp.MAX, group=dist.group.WORLD)\n else:\n # local norm result can be accumulated with the remote ones if put to the right power\n # n_i = sum_rank(a^p)^1/p\n # -> n_total = all_reduce(n_i^p)^(1/p) = sum_i(n_i^p)^1/p = sum_i(sum_rank(a^p))^1/p\n # all reduce over data parallel and model parallel workers\n total_norm = local_norm ** norm_type\n dist.all_reduce(total_norm)\n total_norm = total_norm ** (1.0 / norm_type)\n\n clip_coef = torch.tensor(max_norm, dtype=total_norm.dtype, device=total_norm.device) / (total_norm + 1e-6)\n if clip_coef < 1:\n for device, device_params in self.per_device_params.items():\n for p in filter(lambda x: x.grad is not None, device_params[self.rank]):\n p.grad.detach().mul_(clip_coef.to(device)) # type: ignore\n\n return total_norm\n\n # State dict interfaces\n def consolidate_state_dict(self, recipient_rank: int = 0) -> None:\n \"\"\"Update the consolidated state_dict list, one per rank.\n\n .. warning: This needs to be called on all replicas\"\"\"\n\n # Sync lr and other attributes in case its been updated\n OSS._sync_param_groups(self.param_groups, self.optim.param_groups)\n\n if self.rank == recipient_rank:\n # Pull the sharded state from all the other replicas\n # Store all the states in order, rank by rank\n logging.debug(\"Pulling the sharded optimizer state from all replicas\")\n self._all_states = self._collect_sharded_states()\n else:\n # Acknowledge broadcasts, and send this rank's shard when needed\n self._broadcast_state_dict()\n\n def local_state_dict(self) -> dict:\n \"\"\" .. deprecated:: 0.1.5\n\n Returns this rank's state_dict as a :class:`dict` which contains two entries:\n\n * state - a dict holding current optimization state. Its content\n differs between optimizer classes.\n\n * param_groups - a dict containing all parameter groups\n\n .. warning: This does not represent the optimizer state dict, only a shard.\n \"\"\"\n return self.optim.state_dict()\n\n def state_dict(self) -> Dict[str, Any]:\n \"\"\"Return the last known global optimizer state. The returned state is compatible with Pytorch, in that the\n sharded properties are not exposed. It contains two entries:\n\n * state - a dict holding current optimization state. Its content\n differs between optimizer classes.\n\n * param_groups - a dict containing all parameter groups\n\n .. warning:\n If the state has not been consolidated, this returns a shard's worth, not the global state.\n\n .. warning:\n Returning the global state is limited to the replica which was responsible for the consolidation.\n The state may also not be up to date, depending on when `consolidate_state_dict` was last called.\n \"\"\"\n\n if len(self._all_states) == 0:\n raise RuntimeError(\n \"Optimizer state has not been consolidated on this rank. \\\n Please call `consolidate_state_dict()` on all ranks beforehand if you meant to save the global state\"\n )\n\n # Unify the shard states and the state that pytorch would expect, given the model.\n # Indexation needs several redirections, since each shard only knows a limited scope of the model\n # - get the pytorch compliant parameter indexing\n state_dict = super().state_dict()\n\n # - go through the per-shard states, which are all indexed locally\n for rank, s in enumerate(self._all_states):\n # -- match the local indexing and the global partition, update the corresponding saved state globally\n for local_pg, global_pg in zip(s[\"param_groups\"], self.partition_parameters()[rank]):\n local_index_to_param_id = {\n i_param: id(global_pg[\"params\"][i]) for i, i_param in enumerate(local_pg[\"params\"])\n }\n\n for local_param_index in local_pg[\"params\"]:\n # Update the state, if any\n if local_param_index in s[\"state\"].keys():\n global_id = self.param_to_index[local_index_to_param_id[local_param_index]]\n state_dict[\"state\"][global_id] = s[\"state\"][local_param_index]\n\n # Make sure that the parameters are sorted in the state, as expected\n state_dict[\"state\"] = dict(sorted(state_dict[\"state\"].items()))\n return state_dict\n\n def load_state_dict(self, state_dict: Dict[str, Any]) -> None:\n \"\"\"Restore the global parameter groups as well as the shard.\n\n Arguments:\n state_dict (dict): optimizer state. Should be an object returned\n from a call to :meth:`state_dict`\n \"\"\"\n\n # NOTE: PyTorch 1.5 does not index linearly but with the id(params) at saving time\n # we work around that here by using the fact that the params are ordered as in the param_groups\n pytorch15_index_redirect = {k: i for i, k in enumerate(state_dict[\"state\"].keys())}\n\n for key, value in state_dict[\"state\"].items():\n param = self.index_to_param[pytorch15_index_redirect[key]]\n\n # Populate the sharded optimizer state on the fly\n if self.param_to_rank[param] != self.rank:\n state_dict[\"state\"][key] = None\n else:\n self.optim.state[param] = recursive_copy_to_device(value, non_blocking=True, device=param.device)\n\n super().load_state_dict(state_dict)\n\n # Sync with the optimizer param groups\n OSS._sync_param_groups(state_dict[\"param_groups\"], self.param_groups)\n OSS._sync_param_groups(self.param_groups, self.optim.param_groups)\n\n def refresh_trainable(self) -> None:\n \"\"\" Updates the partitioning and communication patterns if the trainability (`requires_grad`)\n of some parameters changed.\n \"\"\"\n\n # Create the optim which will work on the param shard\n if not hasattr(self, \"optim\"):\n self._clear_cache()\n self._default_device = list(self.per_device_params.keys())[0]\n self.optim = self._optim_constructor(self.partition_parameters()[self.rank], **self._optim_defaults)\n OSS._sync_param_groups(self.optim.param_groups, self.param_groups)\n\n self._setup_flat_buffers()\n\n def _broadcast_state_dict(self) -> None:\n \"\"\"Broadcast this rank's state shard, discard others\"\"\"\n\n # Tensor cannot be really empty, even if its size is meaningless\n dummy_sync_tensor = torch.tensor([1], device=self._default_device)\n\n for rank in range(self.world_size):\n if rank == self.rank:\n # Send the state to the reference replica\n logging.debug(\n \"Sending the sharded optimizer state to the reference replica from rank %s\", rank,\n )\n # legacy compatibility for old torch versions\n broadcast_object(\n self.local_state_dict(),\n src_rank=self.global_rank,\n group=self.group,\n dist_device=self._default_device,\n )\n else:\n global_rank = self.get_global_rank(self.group, rank)\n\n # Discard this tensor/rank, broadcast necessary for syncing and because NCCL does not support gather\n broadcast_object(\n torch.tensor([dummy_sync_tensor], dtype=torch.uint8, device=self._default_device),\n src_rank=global_rank,\n group=self.group,\n dist_device=self._default_device,\n )\n\n def _collect_sharded_states(self) -> List[Dict[str, Any]]:\n \"\"\"Collect all the state shards, in CPU memory.\"\"\"\n all_states = []\n\n for rank in range(self.world_size):\n if rank == self.rank:\n logging.debug(\"Saving self state\")\n all_states.append(\n recursive_copy_to_device(self.optim.state_dict(), non_blocking=True, device=torch.device(\"cpu\"))\n )\n\n # Sync with other replicas\n broadcast_object(\n torch.tensor([0], dtype=torch.uint8, device=self._default_device),\n src_rank=self.global_rank,\n group=self.group,\n dist_device=self._default_device,\n )\n else:\n # Fetch the optim state from the other replicas\n global_rank = self.get_global_rank(self.group, rank)\n replica_state = broadcast_object(\n torch.tensor([0], dtype=torch.uint8, device=self._default_device),\n src_rank=global_rank,\n group=self.group,\n dist_device=self._default_device,\n )\n\n all_states.append(\n recursive_copy_to_device(replica_state, non_blocking=True, device=torch.device(\"cpu\"))\n )\n\n logging.debug(\"State from rank %s received\", rank)\n\n return all_states\n\n def add_param_group(self, param_group: dict) -> None:\n \"\"\"Add a param group to the :class:`Optimizer` s `param_groups`.\n\n This can be useful when fine tuning a pre-trained network as frozen layers can be made\n trainable and added to the :class:`Optimizer` as training progresses.\n\n Arguments:\n param_group (dict): Specifies what Tensors should be optimized along with group\n specific optimization options\n\n .. warning: This handles updating the shards on all partitions, but needs to be called on all ranks.\n \"\"\"\n\n super().add_param_group(param_group)\n if not self.in_super_constructor:\n # Force a re-partitioning\n self._clear_cache()\n\n # Update the partition\n param_groups = self.partition_parameters()[self.rank]\n if len(param_groups) == len(self.optim.param_groups) + 1:\n self.optim.add_param_group(param_groups[-1])\n\n # Update the bucketing strategy accordingly\n self._setup_flat_buffers()\n\n def _clear_cache(self) -> None:\n self._partition_parameters.clear()\n self._per_device_params.clear()\n self._param_rank.clear()\n self._index_to_param.clear()\n self._param_to_index.clear()\n self._local_params = None\n\n @staticmethod\n def get_global_rank(group: Any, rank: int) -> int:\n if group is dist.group.WORLD:\n return rank\n else:\n global_rank = dist.distributed_c10d._get_global_rank(group, rank)\n return global_rank\n\n @staticmethod\n def _sync_param_groups(source: List[Dict[Any, Any]], destination: List[Dict[Any, Any]]) -> None:\n \"\"\"Sync learning rate and other optimizer attributes (needed to support schedulers).\"\"\"\n\n for source_group, destination_group in zip(source, destination):\n # Sync everything but the parameters\n for k in filter(lambda x: x != \"params\", source_group.keys()):\n destination_group[k] = source_group[k]\n\n @torch.no_grad()\n def _broadcast_params(self) -> None:\n \"\"\"Helper function to broadcast all the parameters from a given device\"\"\"\n\n last_work_handle = None # Work handles are consumed within this scope, no callback\n\n for device in self.buckets.keys():\n for src_rank, bucket in enumerate(self.buckets[device]):\n global_src_rank = self.get_global_rank(self.group, src_rank)\n last_work_handle = dist.broadcast(tensor=bucket, src=global_src_rank, group=self.group, async_op=True)\n\n # Only check on the last handle, they're all inlined on the same CUDA stream\n if last_work_handle:\n last_work_handle.wait()\n\n def _setup_flat_buffers(self) -> None:\n \"\"\"Make all params which are on the same device and tied to the same rank views of a single buffer.\n This is used at construction time, and anytime parameter trainability is changed (frozen or unfrozen) and\n `refresh_trainability` is called.\n \"\"\"\n\n for device, per_rank_params in self.per_device_params.items():\n # Only wipe the existing buckets if there are none\n # (could be that this is called twice, when trainability changes)\n if device not in self.buckets.keys():\n self.buckets[device] = []\n\n # Make parameters a view of the bucket\n for dst_rank, params in enumerate(per_rank_params):\n if len(params) > 0:\n\n # Clone the non-trainable params, if in a bucket it will get destroyed\n for param in filter(lambda x: not x.requires_grad, params):\n param.data = param.data.detach().clone()\n\n # Merge all the trainable params in a single bucket\n trainable_params = list(filter(lambda x: x.requires_grad, params))\n buffer_size = sum(map(lambda x: x.numel(), trainable_params))\n bucket = torch.empty(buffer_size, dtype=params[0].dtype, device=device)\n offset = 0\n\n for param in trainable_params:\n offset_next = offset + param.numel()\n bucket[offset:offset_next].copy_(param.data.flatten())\n param.data = bucket[offset:offset_next].view_as(param.data)\n offset = offset_next\n\n # Either replace the existing bucket, or create it\n if len(self.buckets[device]) == dst_rank:\n self.buckets[device].append(bucket)\n else:\n self.buckets[device][dst_rank] = bucket\n else:\n self.buckets[device].append(torch.zeros(1, device=device))\n" ]
[ [ "torch.zeros", "torch.distributed.get_world_size", "torch.device", "torch.no_grad", "torch.tensor", "torch.distributed.all_reduce", "torch.distributed.get_rank", "torch.empty", "torch.distributed.distributed_c10d._get_global_rank", "torch.distributed.broadcast" ] ]
wenh06/database_reader
[ "179505ec5c21636b05e916e7b9025e6ef1f388d3" ]
[ "database_reader/image_databases/dermnet.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\n\"\"\"\nimport os\nfrom typing import Union, Optional, Any, List, NoReturn\nfrom numbers import Real\n\nimport numpy as np\nnp.set_printoptions(precision=5, suppress=True)\nimport pandas as pd\n\nfrom ..utils.common import (\n ArrayLike,\n get_record_list_recursive,\n)\nfrom ..base import ImageDataBase\n\n\n__all__ = [\n \"DermNet\"\n]\n\n\nclass DermNet(ImageDataBase):\n \"\"\"\n \"\"\"\n def __init__(self, db_dir:str, working_dir:Optional[str]=None, verbose:int=2, **kwargs:Any) -> NoReturn:\n \"\"\"\n Parameters\n ----------\n db_dir: str,\n storage path of the database\n working_dir: str, optional,\n working directory, to store intermediate files and log file\n verbose: int, default 2,\n log verbosity\n kwargs: auxilliary key word arguments\n \"\"\"\n super().__init__(db_name=\"DermNet\", db_dir=db_dir, working_dir=working_dir, verbose=verbose, **kwargs)\n" ]
[ [ "numpy.set_printoptions" ] ]
guanghuixu/multi-model-forgetting
[ "d347b268124ef50ebb50b04aab76e0686c1a5819" ]
[ "main.py" ]
[ "\"\"\"Entry point.\"\"\"\nimport os\n\nimport torch\n\nimport data\nimport config\nimport utils\nimport trainer\n\nimport numpy\nimport random\n\nlogger = utils.get_logger()\n\n\ndef main(args): # pylint:disable=redefined-outer-name\n \"\"\"main: Entry point.\"\"\"\n utils.prepare_dirs(args)\n\n torch.manual_seed(args.random_seed)\n # Add this for the random seed\n numpy.random.seed(args.random_seed)\n random.seed(args.random_seed)\n torch.backends.cudnn.deterministic = True\n\n if args.num_gpu > 0:\n torch.cuda.manual_seed(args.random_seed)\n\n if args.network_type == 'rnn':\n dataset = data.text.Corpus(args.data_path)\n trnr = trainer.Trainer(args, dataset)\n elif 'cnn' in args.network_type:\n dataset = data.image.Image(args)\n trnr = trainer.CNNTrainer(args, dataset)\n else:\n raise NotImplementedError(f\"{args.dataset} is not supported\")\n\n if args.mode == 'train':\n utils.save_args(args)\n trnr.train()\n elif args.mode == 'derive':\n assert args.load_path != \"\", (\"`--load_path` should be given in \"\n \"`derive` mode\")\n trnr.derive()\n else:\n if not args.load_path:\n raise Exception(\"[!] You should specify `load_path` to load a \"\n \"pretrained model\")\n trnr.test()\n\n\nif __name__ == \"__main__\":\n args, unparsed = config.get_args()\n main(args)\n" ]
[ [ "torch.manual_seed", "torch.cuda.manual_seed", "numpy.random.seed" ] ]
kinghaoYPGE/my_python
[ "4f1368b7a1eba872ab67f8a867d81d5c71a2e9ba" ]
[ "remote_project/ershoufang_info2/pic.py" ]
[ "import numpy\nfrom matplotlib import pyplot as plt\n\nprice, size = numpy.loadtxt('houses.csv', delimiter='|', usecols=(1,2,), unpack=True)\nprint(price)\nprint(size)\n\n# 求价格和面积的平均值\nprice_mean = numpy.mean(price)\nsize_mean = numpy.mean(size)\n\nprint('平均房价: %s万'%round(price_mean, 2))\n\nplt.figure()\nplt.subplot(211)\nplt.title('/ 10000RMB')\nplt.hist(price, bins=20)\n\nplt.subplot(212)\nplt.xlabel('/ m**2')\nplt.hist(size, bins=20)\n\nplt.figure(2)\nplt.plot(price)\nplt.show()" ]
[ [ "matplotlib.pyplot.xlabel", "matplotlib.pyplot.title", "matplotlib.pyplot.plot", "numpy.mean", "matplotlib.pyplot.figure", "matplotlib.pyplot.hist", "numpy.loadtxt", "matplotlib.pyplot.show", "matplotlib.pyplot.subplot" ] ]
qwerty29544/Volume_nonStationary_acoustics
[ "5b56e0417804b659f88364f7b8abe0f4ea11a68d" ]
[ "src/GMSI_test.py" ]
[ "import numpy as np\nfrom iterations.GMSI.iter_solver import muFind, GMSI_solver\n\nif __name__ == \"__main__\":\n lambs = np.array([6., 5. + 0j, 20.])\n print(muFind(lambs))" ]
[ [ "numpy.array" ] ]
ClarkGuilty/Tesis
[ "2d0af394a8555bcfe8c977570fa584719ed7b037" ]
[ "Legacy/simPlots2D.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Aug 11 15:55:35 2018\n\n@author: Javier Alejandro Acevedo Barroso\nScript de Python para la visualización de la simulación en 2D.\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\nfrom matplotlib import rcParams\nrcParams.update({'figure.autolayout': True})\nrcParams.update({'font.size': 10})\nplt.rcParams['image.cmap'] = 'plasma'\ndpiT = 200\nfsize = 16\n\ndef fastShow(image, title=\"none\"):\n plt.figure()\n plt.imshow(image)\n plt.colorbar()\n plt.title(title)\n\ndef fmt(x, pos):\n a, b = '{:.2e}'.format(x).split('e')\n b = int(b)\n return r'${} \\times 10^{{{}}}$'.format(a, b)\n\nconstantes = np.loadtxt(\"datFiles/constants.dat\", usecols = 1)\nNt = int(constantes[-2])\nTAU = int(constantes[-1])\n\n#x = np.linspace(constantes[0], constantes[1], int(constantes[4])) \ndpi = 200\n\n#densidadTheo = np.loadtxt('./datFiles/density0.dat').T\n#potTheo = np.loadtxt('./datFiles/potential0.dat').T\n#potReal = np.loadtxt('./datFiles/potential1.dat').T\n\n\n#plt.imshow(densidadTheo)\n#plt.title(\"Densidad cumple la ecuación de poisson del potTeorico\")\n#cbar = plt.colorbar(format=ticker.FuncFormatter(fmt))\n#plt.savefig(\"densidadTeorica.png\",dpi=dpi)\n\n#plt.figure()\n#plt.imshow(potTheo)\n#plt.title(\"potTeorico V = cos(0.5*pi*x)cos(0.5*pi*y)\")\n#cbar = plt.colorbar(format=ticker.FuncFormatter(fmt))\n#plt.savefig(\"PotencialTeorico.png\",dpi=dpi)\n\n#plt.figure()\n#plt.imshow(potReal)\n#plt.title(\"potCalculado a partir de la densidad\")\n#cbar = plt.colorbar(format=ticker.FuncFormatter(fmt))\n#plt.savefig(\"potReal.png\",dpi=dpi)\n\n\ndef darX(inx):\n return -1.0+2.0/128*inx\n\ndef darY(iny):\n return -1.0+2.0/128*iny\n\ndista = (int) (0.2*Nt)\nmina = (int) (Nt//2 ) - dista \nmaxa = (int) ( Nt//2+dista) \n\n\n#diff = potTheo-potReal\n#\n#plt.figure()\n#plt.imshow(diff)\n#plt.title(\"potTeorico/potCalculado\")\n#cbar = plt.colorbar(format=ticker.FuncFormatter(fmt))\n#plt.savefig(\"potCalpotTeo.png\", dpi=dpi)\n#\n#\n#realx = np.loadtxt('./datFiles/realx.dat').T\n#realy = np.loadtxt('./datFiles/realy.dat').T\n#calcx = np.loadtxt('./datFiles/calcx.dat').T\n#calcy = np.loadtxt('./datFiles/calcy.dat').T\n#\n#fastShow(realx, title=\"realx\")\n#fastShow(realy, title=\"realy\")\n#fastShow(calcx, title=\"accex Calc - Teorica\")\n#fastShow(calcy, title=\"accey Calc - Teorica\")\n\n\n#\n \n#cbar = plt.colorbar(format=ticker.FuncFormatter(fmt))\ndef giveDens(i,n):\n cosa = ['a','b']\n cosa[0] = './datFiles/density{:d}.dat'.format(i)\n cosa[1] = './images/density{:d}.png'.format(i)\n return cosa[n]\n#\ndef giveGridX(i,n):\n cosa = ['a','b']\n cosa[0] = './datFiles/gridx{:d}.dat'.format(i)\n cosa[1] = './images/gridx{:d}.png'.format(i)\n return cosa[n]\n#\ndef giveGridY(i,n):\n cosa = ['a','b']\n cosa[0] = './datFiles/gridy{:d}.dat'.format(i)\n cosa[1] = './images/gridy{:d}.png'.format(i)\n return cosa[n]\n#\n#\n#dpi = 300\n#\nfsize=16\ninterval = 1\ndpII = 200\nvelUnit = 1183 #m/s\nestUnit = 50 #kpc\npotUnit = 1400318153625 #J/kg\nacceUnit = 9.0761782e-13 #km/s²\ndt = 0.5\nplt.figure()\nfor i in range(0,Nt,interval):\n dens = np.loadtxt(giveDens(i,0)).T\n plt.imshow(dens,extent=[-1,1,-1,1],aspect = 'auto')\n cbar = plt.colorbar(format=ticker.FuncFormatter(fmt))\n plt.xticks(plt.xticks()[0], [str(t*estUnit) for t in plt.xticks()[0]])\n plt.xlabel(\"Position [kpc]\",fontsize=fsize)\n plt.yticks(plt.xticks()[0], [str(t*estUnit) for t in plt.xticks()[0]])\n plt.ylabel(\"Position [kpc]\",fontsize=fsize)\n plt.clim(0,5e8)\n cbar.set_label(\"Density [$M_{\\odot}$ / kpc$^2$]\",fontsize=fsize)\n #plt.title(\"Density $\\\\tau =$ {:d}\".format(TAU),fontsize=fsize)\n plt.title(\"Density $t =$ {:.2f} ut\".format(i*dt),fontsize=fsize)\n #plt.title('Densidad t = %d' %(i))\n plt.savefig(giveDens(i,1),dpi=dpi)\n plt.clf()\n\n#\nfor i in range(0,Nt,interval):\n phasex = np.loadtxt(giveGridX(i,0)).T\n plt.imshow(phasex,extent=[-1,1,-1,1])\n cbar = plt.colorbar(format=ticker.FuncFormatter(fmt))\n plt.xticks(plt.xticks()[0], [str(t*estUnit) for t in plt.xticks()[0]])\n plt.xlabel(\"Position [kpc]\",fontsize=fsize)\n plt.yticks(plt.xticks()[0], [str(t*velUnit) for t in plt.xticks()[0]])\n plt.ylabel(\"Velocity [km/s]\",fontsize=fsize)\n plt.title(\"Phase Space $t =$ {:.2f} ut\".format(i*dt),fontsize=fsize)\n cbar.set_label(\"Phase space density [$M_{\\odot}$ / (kpc $\\\\frac{km}{s}$)$^2$]\",fontsize=fsize-2)\n plt.savefig(giveGridX(i,1),dpi=dpi)\n plt.clf()\n\n\n#\n#for i in range(0,Nt,interval):\n# phasey = np.loadtxt(giveGridY(i,0)).T\n# plt.imshow(phasey,extent=[-1,1,-1,1])\n# cbar = plt.colorbar(format=ticker.FuncFormatter(fmt))\n# plt.xticks(plt.xticks()[0], [str(t*estUnit) for t in plt.xticks()[0]])\n# plt.xlabel(\"Position [kpc]\",fontsize=fsize)\n# plt.yticks(plt.xticks()[0], [str(t*velUnit) for t in plt.xticks()[0]])\n# plt.ylabel(\"Velocity [km/s]\",fontsize=fsize)\n# plt.title(\"Phase space $t =$ {:.2f} ut\".format(i*dt),fontsize=fsize)\n# #plt.clim(0,1e-4)\n# cbar.set_label(\"Density [$M_{\\odot}$ / kpc$^2$]\",fontsize=fsize)\n# plt.savefig(giveGridY(i,1),dpi=dpi)\n# plt.clf()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" ]
[ [ "matplotlib.pyplot.colorbar", "matplotlib.pyplot.clim", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.title", "matplotlib.pyplot.figure", "numpy.loadtxt", "matplotlib.rcParams.update", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.clf", "matplotlib.ticker.FuncFormatter", "matplotlib.pyplot.xticks", "matplotlib.pyplot.imshow" ] ]
OmegaZhou/MAgent
[ "65fd6fe82535a9502b3be1dcea692a183bdaa6f9" ]
[ "examples/train_trans.py" ]
[ "\"\"\"\ntrain agents to walk through some walls, avoiding collide\n\"\"\"\n\nimport argparse\nimport time\nimport os\nimport logging as log\nimport math\nimport random\n\nimport numpy as np\n\nimport magent\nfrom magent.builtin.tf_model import DeepQNetwork, DeepRecurrentQNetwork\n\n\ndef get_config(map_size):\n gw = magent.gridworld\n cfg = gw.Config()\n\n cfg.set({\"map_width\": map_size * 2, \"map_height\": map_size})\n cfg.set({\"minimap_mode\": True})\n cfg.set({\"embedding_size\": 10})\n\n agent = cfg.register_agent_type(\n \"agent\",\n {'width': 1, 'length': 1, 'hp': 10, 'speed': 1,\n 'view_range': gw.CircleRange(6),\n 'damage': 2, 'step_recover': 0.1,\n\n 'step_reward': -1,\n })\n\n g0 = cfg.add_group(agent)\n\n return cfg\n\n\nleftID, rightID = 0, 1\ndef generate_map(env, map_size, handles):\n \"\"\" generate a map, which consists of two squares of agents and vertical lines\"\"\"\n width = map_size * 2\n height = map_size\n margin = map_size * 0.1\n line_num = 9\n wall_width = 4\n gap = 2\n road_height = 2\n road_num = 4\n init_num = margin * height * 0.8\n\n def random_add(x1, x2, y1, y2, n):\n added = set()\n ct = 0\n while ct < n:\n x = random.randint(x1, x2)\n y = random.randint(y1, y2)\n\n next = (x, y)\n if next in added:\n continue\n added.add(next)\n ct += 1\n return list(added)\n\n # left\n pos = random_add(0, margin, 0, height, init_num)\n env.add_agents(handles[leftID], method=\"custom\", pos=pos)\n\n # right\n # pos = random_add(width - margin, width, 0, height, init_num)\n # env.add_agents(handles[rightID], method=\"custom\", pos=pos)\n\n # wall\n lines = set()\n low, high = margin * 2 + wall_width, width - margin * 2 - wall_width\n ct = 0\n while ct < line_num:\n next = random.randint(low, high)\n collide = False\n for j in range(-wall_width - gap, wall_width+gap + 1):\n if next+j in lines:\n collide = True\n break\n\n if collide:\n continue\n lines.add(next)\n ct += 1\n\n lines = list(lines)\n walls = []\n for item in lines:\n road_skip = set()\n for i in range(road_num):\n road_start = random.randint(1, height-1 - road_height)\n for j in range(road_height):\n road_skip.add(road_start + j)\n\n for i in range(height):\n if i in road_skip:\n continue\n for j in range(-wall_width//2, wall_width//2 + 1):\n walls.append((item+j, i))\n\n env.add_walls(method=\"custom\", pos=walls)\n\n\n\ndef play_a_round(env, map_size, handles, models, print_every, train=True, render=False, eps=None):\n env.reset()\n generate_map(env, map_size, handles)\n\n step_ct = 0\n done = False\n\n n = len(handles)\n obs = [[] for _ in range(n)]\n ids = [[] for _ in range(n)]\n acts = [[] for _ in range(n)]\n nums = [env.get_num(handle) for handle in handles]\n sample_buffer = magent.utility.EpisodesBuffer(capacity=1000)\n total_reward = [0 for _ in range(n)]\n\n print(\"===== sample =====\")\n print(\"eps %.2f number %s\" % (eps, nums))\n start_time = time.time()\n while not done:\n # take actions for every model\n for i in range(n):\n obs[i] = env.get_observation(handles[i])\n ids[i] = env.get_agent_id(handles[i])\n acts[i] = models[i].infer_action(obs[i], ids[i], 'e_greedy', eps=eps)\n env.set_action(handles[i], acts[i])\n\n # simulate one step\n done = env.step()\n\n # sample\n step_reward = []\n for i in range(n):\n rewards = env.get_reward(handles[i])\n if train:\n alives = env.get_alive(handles[i])\n sample_buffer.record_step(ids[i], obs[i], acts[i], rewards, alives)\n s = sum(rewards)\n step_reward.append(s)\n total_reward[i] += s\n\n # render\n if render:\n env.render()\n\n # stat info\n nums = [env.get_num(handle) for handle in handles]\n\n # clear dead agents\n env.clear_dead()\n\n if step_ct % print_every == 0:\n print(\"step %3d, nums: %s reward: %s, total_reward: %s \" %\n (step_ct, nums, np.around(step_reward, 2), np.around(total_reward, 2)))\n step_ct += 1\n if step_ct > 550:\n break\n\n sample_time = time.time() - start_time\n print(\"steps: %d, total time: %.2f, step average %.2f\" % (step_ct, sample_time, sample_time / step_ct))\n\n # train\n total_loss, value = 0, 0\n if train:\n print(\"===== train =====\")\n start_time = time.time()\n total_loss, value = models[0].train(sample_buffer, 500)\n train_time = time.time() - start_time\n print(\"train_time %.2f\" % train_time)\n\n def round_list(l): return [round(x, 2) for x in l]\n return total_loss, nums, round_list(total_reward), value\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--save_every\", type=int, default=5)\n parser.add_argument(\"--render_every\", type=int, default=10)\n parser.add_argument(\"--n_round\", type=int, default=2000)\n parser.add_argument(\"--render\", action=\"store_true\")\n parser.add_argument(\"--load_from\", type=int)\n parser.add_argument(\"--train\", action=\"store_true\")\n parser.add_argument(\"--map_size\", type=int, default=60)\n parser.add_argument(\"--greedy\", action=\"store_true\")\n parser.add_argument(\"--name\", type=str, default=\"battle\")\n parser.add_argument(\"--eval\", action=\"store_true\")\n parser.add_argument('--alg', default='dqn', choices=['dqn', 'drqn', 'a2c'])\n args = parser.parse_args()\n\n # set logger\n log.basicConfig(level=log.INFO, filename=args.name + '.log')\n console = log.StreamHandler()\n console.setLevel(log.INFO)\n log.getLogger('').addHandler(console)\n\n # init the game\n env = magent.GridWorld(get_config(args.map_size))\n env.set_render_dir(\"build/render\")\n\n # two groups of agents\n names = [args.name + \"-l\", args.name + \"-r\"]\n handles = env.get_handles()\n\n # sample eval observation set\n eval_obs = None\n if args.eval:\n print(\"sample eval set...\")\n env.reset()\n generate_map(env, args.map_size, handles)\n eval_obs = magent.utility.sample_observation(env, handles, 2048, 500)[0]\n\n # init models\n batch_size = 256\n unroll_step = 8\n target_update = 1000\n train_freq = 5\n\n models = []\n if args.alg == 'dqn':\n models.append(DeepQNetwork(env, handles[0], \"selfplay\",\n batch_size=batch_size,\n memory_size=2 ** 20, target_update=target_update,\n train_freq=train_freq, eval_obs=eval_obs))\n elif args.alg == 'drqn':\n models.append(DeepRecurrentQNetwork(env, handles[0], \"selfplay\",\n batch_size=batch_size/unroll_step, unroll_step=unroll_step,\n memory_size=2 * 8 * 625, target_update=target_update,\n train_freq=train_freq, eval_obs=eval_obs))\n else:\n raise NotImplementedError\n\n models.append(models[0])\n\n # load if\n savedir = 'save_model'\n if args.load_from is not None:\n start_from = args.load_from\n print(\"load ... %d\" % start_from)\n for model in models:\n model.load(savedir, start_from)\n else:\n start_from = 0\n\n # print debug info\n print(args)\n print(\"view_space\", env.get_view_space(handles[0]))\n print(\"feature_space\", env.get_feature_space(handles[0]))\n\n # play\n start = time.time()\n for k in range(start_from, start_from + args.n_round):\n tic = time.time()\n eps = magent.utility.piecewise_decay(k, [0, 700, 1400], [1, 0.2, 0.05]) if not args.greedy else 0\n loss, num, reward, value = play_a_round(env, args.map_size, handles, models,\n train=args.train, print_every=50,\n render=args.render or (k+1) % args.render_every == 0,\n eps=eps) # for e-greedy\n\n log.info(\"round %d\\t loss: %s\\t num: %s\\t reward: %s\\t value: %s\" % (k, loss, num, reward, value))\n print(\"round time %.2f total time %.2f\\n\" % (time.time() - tic, time.time() - start))\n\n # save models\n if (k + 1) % args.save_every == 0 and args.train:\n print(\"save model... \")\n for model in models:\n model.save(savedir, k)\n" ]
[ [ "numpy.around" ] ]
tvst/streamlit-forum-metrics
[ "d7bac10b13c9605d78f79e3b54eeea678d546570" ]
[ "discourse_api.py" ]
[ "from urllib.parse import urljoin, urlencode\nimport datetime\nimport pytz\n\nimport pandas as pd\nimport requests\n\nimport streamlit as st\n\n\nBASE_URL = 'https://discuss.streamlit.io'\nTTL = 60 * 60 # 1 hour\n\n\[email protected](ttl=TTL, show_spinner=False)\ndef fetch(path, **query):\n url = urljoin(BASE_URL, path)\n if query:\n query_str = urlencode(query)\n url = \"%s?%s\" % (url, query_str)\n return requests.get(url)\n\n\[email protected](ttl=TTL, show_spinner=False)\ndef fetch_categories():\n resp = fetch('categories.json')\n data = resp.json()\n cat_data = data['category_list']['categories']\n table = get_categories_as_table(cat_data)\n table.set_index('name', inplace=True)\n return table\n\n\[email protected](ttl=TTL, show_spinner=False)\ndef fetch_categories_dict():\n resp = fetch('categories.json')\n data = resp.json()\n cat_data = data['category_list']['categories']\n return get_categories_as_dict(cat_data)\n\n\[email protected](ttl=TTL, show_spinner=False)\ndef fetch_page_of_latest_posts(page=0):\n resp = fetch('posts.json', page=page)\n data = resp.json()\n post_data = data['latest_posts']\n return get_post_data_as_table(post_data)\n\n\[email protected](ttl=TTL, show_spinner=False)\ndef fetch_page_of_latest_topics(page=0):\n resp = fetch('latest.json', page=page)\n data = resp.json()\n topics_data = data['topic_list']['topics']\n return get_topics_data_as_table(topics_data)\n\n\[email protected](ttl=TTL, show_spinner=False)\ndef fetch_latest_topics_by_timedelta(**kwargs):\n now = datetime.datetime.now(tz=pytz.UTC)\n timedelta = datetime.timedelta(**kwargs)\n threshold_date = now - timedelta\n\n posts_list = []\n page = 0\n\n while True:\n batched_posts = fetch_page_of_latest_topics(page)\n\n # Remove posts more than 7 days old.\n batched_posts = batched_posts.loc[\n batched_posts['last_posted_at'] > threshold_date]\n\n if batched_posts.empty:\n break\n\n posts_list.append(batched_posts)\n page += 1\n\n return pd.concat(posts_list)\n\n\[email protected](ttl=TTL, show_spinner=False)\ndef get_categories_as_table(cat_data):\n table = pd.DataFrame(cat_data)\n return table[[\n 'id',\n 'name',\n 'topic_count',\n 'post_count',\n 'topics_day',\n 'topics_week',\n 'topics_month',\n 'topics_year',\n 'topics_all_time',\n ]]\n\n\[email protected](ttl=TTL, show_spinner=False)\ndef get_categories_as_dict(cat_data):\n return {d['id']: d['name'] for d in cat_data}\n\n\[email protected](ttl=TTL, show_spinner=False)\ndef get_post_data_as_table(post_data):\n table = pd.DataFrame(post_data)\n table['created_at'] = pd.to_datetime(table['created_at'])\n return table[[\n 'id',\n 'display_username',\n 'created_at',\n 'raw',\n 'staff',\n 'reads',\n 'post_number',\n ]]\n\n\[email protected](ttl=TTL, show_spinner=False)\ndef get_topics_data_as_table(topics_data):\n table = pd.DataFrame(topics_data)\n table['created_at'] = pd.to_datetime(table['created_at'])\n table['last_posted_at'] = pd.to_datetime(table['last_posted_at'])\n\n categories = fetch_categories_dict()\n table['category'] = table['category_id'].map(categories)\n\n return table[[\n 'title',\n 'last_posted_at',\n 'created_at',\n 'category',\n 'views',\n 'posts_count',\n 'like_count',\n ]]\n\n\[email protected](ttl=TTL, show_spinner=False)\[email protected](show_spinner=False)\ndef to_date(json_date):\n if json_date.endswith('Z'):\n json_date = json_date[:-1]\n return datetime.datetime.fromisoformat(json_date)\n" ]
[ [ "pandas.to_datetime", "pandas.DataFrame", "pandas.concat" ] ]
williamdjones/cv_assignment_4
[ "3d654fb6b079e7c5b68c0ca545c28f29f0500a1b" ]
[ "source/utils.py" ]
[ "import h5py\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.cluster import KMeans\n\n\ndef get_codebook_features_labels(path,codebook,sample_size):\n train_data = pd.read_csv(path)\n\n if sample_size == None:\n sample_size = train_data.shape[0]\n\n # select a subset of the data of size sample_size to make the computation more efficient\n train_data = train_data.sample(sample_size)\n\n labels = train_data[\"classid\"]\n labels = labels.as_matrix()\n\n features = np.zeros([train_data.shape[0],codebook.cluster_centers_.shape[0]])\n\n i = 0\n for filepath in train_data[\"filename\"]:\n feats_i = load_surf_features(filepath)\n feats_i = np.transpose(feats_i)\n\n hgram_i = np.zeros([1,codebook.cluster_centers_.shape[0]])\n\n for feat in feats_i:\n f_i = codebook.predict([feat])\n hgram_i[0,f_i] +=1\n\n hgram_i = np.divide(hgram_i,float(feats_i.shape[0]))\n\n features[i] = hgram_i #try features[i,:] = hgram_i\n i += 1\n\n features = np.asarray(features)\n\n for i in xrange(0,labels.shape[0]):\n labels[i] = labels[i] - 1\n\n return features,labels\n\n\ndef get_alex_feats_labels(path,sample_size):\n train_data = pd.read_csv(path)\n\n if sample_size == None:\n sample_size = train_data.shape[0]\n # in order to decrease computation burden, select a random sample of the rows equivalent of size sample_size\n train_data = train_data.sample(sample_size)\n\n labels = train_data[\"classid\"]\n labels = labels.as_matrix()\n\n i = 0\n features = np.zeros([sample_size,4096])\n\n for filepath in train_data[\"filename\"]:\n feats_i = load_alex_net_image_features(filepath)\n feats_i = np.ndarray.flatten(feats_i)\n features[i] = feats_i\n i+=1\n\n for i in xrange(0,labels.shape[0]):\n labels[i] = labels[i] - 1\n\n return features,labels\n\n\ndef make_codebook(path,size,num_words):\n data = pd.read_csv(path)\n\n codebook_feats = []\n for filepath in data[\"filename\"]:\n feats_i = load_surf_features(filepath)\n feats_i = np.transpose(feats_i)\n\n if feats_i.shape[0] >= num_words: # throw out samples with smaller number of features than the min number of features we would like to extract\n rand_indices = np.unique(np.random.choice(feats_i.shape[0],num_words,replace=False))\n for index in rand_indices:\n codebook_feats.append(feats_i[index])\n\n codebook_feats = np.asarray(codebook_feats)\n cluster = KMeans(n_clusters=size,max_iter=300)\n cluster.fit(codebook_feats)\n\n return cluster\n\n\ndef load_surf_features(path):\n path = path+\".h5\"\n image_features = h5py.File(path)\n image_features = image_features.values()\n image_features[1] = np.array(image_features[1])\n image_features = image_features[1]\n\n return image_features\n\n\ndef load_alex_net_image_features(path):\n path = path +\".h5\"\n image_features = h5py.File(path)\n image_features = image_features.values()\n image_features[0] = np.array(image_features[0])\n image_features = image_features[0]\n\n return image_features\n\n\ndef plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45,fontsize=2)\n plt.yticks(tick_marks, classes,fontsize=2)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n #thresh = cm.max() / 2.\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n\n" ]
[ [ "numpy.array", "matplotlib.pyplot.colorbar", "numpy.random.choice", "numpy.asarray", "numpy.zeros", "matplotlib.pyplot.xlabel", "sklearn.cluster.KMeans", "matplotlib.pyplot.title", "matplotlib.pyplot.yticks", "numpy.transpose", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.ylabel", "numpy.ndarray.flatten", "pandas.read_csv", "matplotlib.pyplot.xticks", "matplotlib.pyplot.imshow" ] ]
IrvingGomez/academic-hugo
[ "4f6e4ec4aab7a11f477883441b768bb6cf843a9c" ]
[ "content/courses/mod2021/17_logistic_regression_heart_disease.py" ]
[ "#########################\n## ##\n## Irving Gomez Mendez ##\n## May 04, 2021 ##\n## ##\n#########################\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import LogisticRegression\n\n# Data from Hosmer, D.W. & Lemeshow, S. (1989) Applied logistic regression. Wiley\n# Data are ages (in years) and indicator of significant damage in the coronary of 100 people\ndat = pd.read_csv('Heart_Disease_vs_Age.csv')\nn = dat.shape[0]\nX = np.vstack([np.ones(n), dat['age']]).T\ny = dat['chd']\n\n# Logistic regression by hand\nb = [0,0] # initial values\n\ntolm = 1e-6 # tolerance (minimum norm of the difference of the betas)\niterm = 100 # maximum number of iterations\ntolera = 1 # initialize tolera\nitera = 0 # initialize ittera\nhisto = b # initialize beta upgrade\n\nwhile((tolera > tolm) and (itera < iterm)):\n p = 1/(1+np.exp(-X @ b))\n W = np.diag(p*(1-p))\n delta = np.linalg.solve(X.T @ W @ X, X.T @ (y-p))\n b = b+delta\n tolera = np.sqrt(sum(delta**2))\n histo = np.vstack([histo, b])\n itera = itera+1\n\nhisto\n\nn0 = 50\nx0 = np.linspace(dat['age'].min(), dat['age'].max(), n0)\nX0 = np.vstack([np.ones(n0), x0]).T\n\nplt.figure(figsize=(10,7.5))\nplt.plot(dat['age'], dat['chd'], 'o')\nplt.plot(x0, 1/(1+np.exp(-X0 @ b)), 'r'),\nplt.xlabel('Age')\nplt.ylabel('Prob. of Coronary Heart Disease')\n\n# Using LogisticRegression\nlogreg = LogisticRegression(penalty='none')\nlogreg.fit(np.array(dat['age'])[:,None],y)\n\nb_star = logreg.coef_[0]\nb_star\nlogreg.intercept_\n\nplt.figure(figsize=(10,7.5))\nplt.plot(dat['age'], dat['chd'], 'o')\nplt.plot(x0, (logreg.predict_proba(x0[:,None]).T)[1,], 'r'),\nplt.xlabel('Age')\nplt.ylabel('Prob. of Coronary Heart Disease')\n\n###\n" ]
[ [ "numpy.array", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.plot", "numpy.ones", "numpy.exp", "matplotlib.pyplot.figure", "sklearn.linear_model.LogisticRegression", "matplotlib.pyplot.ylabel", "numpy.linalg.solve", "pandas.read_csv", "numpy.diag", "numpy.vstack" ] ]
eranbTAU/Closing-the-Reality-Gap-for-a-Multi-Agent-System-Using-GAN
[ "3df5f8ba1069ce3f16f1ab743da9cbdd3bddd43c" ]
[ "Fish/carnet/data/gan_trajectories.py" ]
[ "import logging\r\nimport os\r\nimport pickle\r\nimport numpy as np\r\nimport pandas as pd\r\nimport torch\r\nfrom torch.utils.data import Dataset\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\ndef standardization(pred_seq, real_seq):\r\n x, y = pred_seq, real_seq\r\n meansx, stdsx = x.mean(axis=0), x.std(axis=0)\r\n meansy, stdsy = y.mean(axis=0), y.std(axis=0)\r\n x_stand = (x - meansx) / (1e-7 + stdsx)\r\n y_stand = (y - meansy) / (1e-7 + stdsy)\r\n x_stand = torch.from_numpy(x_stand).type(torch.float)\r\n y_stand = torch.from_numpy(y_stand).type(torch.float)\r\n return x_stand, y_stand\r\n\r\ndef Normalization(x, x_min, x_max):\r\n x_nor = (x - x_min) / (x_max - x_min)\r\n x_nor = torch.from_numpy(x_nor).type(torch.float)\r\n return x_nor\r\n\r\ndef CarNormalization(pred_seq, real_seq, x_min=-120, x_max=120, dx_min=-21.9717, dx_max=21.9717,\r\n dy_min=-44.9300, dy_max=44.9875, dtheta_min=-1.3000, dtheta_max=1.2928\r\n):\r\n x_stand = Normalization(pred_seq, x_min, x_max)\r\n y_dx = Normalization(real_seq[:,0], dx_min, dx_max).view(-1,1)\r\n y_dy = Normalization(real_seq[:,1], dy_min, dy_max).view(-1,1)\r\n y_dtheta = Normalization(real_seq[:,2], dtheta_min, dtheta_max).view(-1,1)\r\n y_stand = torch.cat([y_dx, y_dy, y_dtheta], dim=1)\r\n return x_stand, y_stand\r\n\r\ndef seq_collate_gan(data):\r\n (pred_seq, real_seq) = zip(*data)\r\n pred_seq = np.asarray([t.numpy() for t in pred_seq])\r\n real_seq = np.asarray([t.numpy() for t in real_seq])\r\n\r\n # Normalize the data\r\n pred_seq_stand, real_seq_stand = CarNormalization(pred_seq, real_seq)\r\n out = [\r\n pred_seq_stand, real_seq_stand]\r\n return tuple(out)\r\n\r\nclass TrajectoryDataset(Dataset):\r\n def __init__(self, data_dir):\r\n super(TrajectoryDataset, self).__init__()\r\n\r\n self.data_dir = data_dir\r\n all_files = os.listdir(self.data_dir)\r\n all_files = [os.path.join(self.data_dir, _path) for _path in all_files]\r\n Dtrain = []\r\n for path in all_files:\r\n data = pickle.load(file=open(path, \"rb\"))\r\n Dtrain += data\r\n\r\n action, state = Dtrain\r\n # Convert numpy -> Torch Tensor\r\n self.action_x = torch.from_numpy(\r\n action).type(torch.float)\r\n self.state_y = torch.from_numpy(\r\n state).type(torch.float)\r\n self.num_samples = len(action)\r\n\r\n def __len__(self):\r\n return self.num_samples\r\n\r\n def __getitem__(self, index):\r\n out = [\r\n self.action_x[index], self.state_y[index]\r\n ]\r\n return out\r\n\r\n\r\nclass Datasets(Dataset):\r\n def __init__(self, data_dir, validation):\r\n super(Datasets, self).__init__()\r\n\r\n if not validation:\r\n self.data_dir = data_dir\r\n all_files = os.listdir(self.data_dir)\r\n all_files = [os.path.join(self.data_dir, _path) for _path in all_files]\r\n for path in all_files:\r\n data = pickle.load(file=open(path, \"rb\"))\r\n data = np.array(data)\r\n action, state = data[:, :2], data[:, 2:]\r\n else:\r\n data = pickle.load(file=open(data_dir, \"rb\"))\r\n action, state = data[0], data[1]\r\n\r\n\r\n\r\n # Convert numpy -> Torch Tensor\r\n self.action_x = torch.from_numpy(\r\n action).type(torch.float)\r\n self.state_y = torch.from_numpy(\r\n state).type(torch.float)\r\n self.num_samples = len(action)\r\n\r\n def __len__(self):\r\n return self.num_samples\r\n\r\n def __getitem__(self, index):\r\n out = [\r\n self.action_x[index], self.state_y[index]\r\n ]\r\n return out" ]
[ [ "numpy.array", "torch.cat", "torch.from_numpy" ] ]
p2pquake/eew-detector
[ "0e9221c04c4c4ab051329ee117624abe1d841987" ]
[ "fft_analyze.py" ]
[ "import os\nimport sys\nimport wave\nimport numpy as np\n\n# ch: 1\n# bit: 8\n# sample rate: 8000\n\n# FFT window\nN = 1024\n\n# Keep \"detected\" status\nFREEZE_COUNT = int(8000 / N * 10)\n\n# -----------------------------------\n# Initialize answer data\n# -----------------------------------\ndef load_wave(filename):\n array = []\n with wave.open(filename, mode='rb') as f:\n while True:\n data = np.frombuffer(f.readframes(N), dtype='b')\n if len(data) < N:\n break\n fft_data = np.fft.fft(data)\n fft_abs = np.abs(fft_data)\n array.append(fft_abs)\n return np.array(array)\n\nfft_chime = load_wave('./baseFFTs.wav')\nfft_voice = load_wave('./baseAnnFFT3.wav')\n\n# print(len(fft_chime))\n# print(len(fft_voice))\nfft_size = max(len(fft_chime), len(fft_voice))\n# print(fft_size)\n\n# np.set_printoptions(precision=0)\n# np.set_printoptions(suppress=True)\n# np.set_printoptions(linewidth=480)\n\n\n# -----------------------------------\n# Calculate score\n# -----------------------------------\ndef calc_score(fft_chime, ring_fft_buffers, index):\n ring_size = len(ring_fft_buffers)\n chime_size = len(fft_chime)\n start_index = (index + (ring_size - chime_size) + 1) % ring_size \n diff = 0\n for var in range(0, chime_size):\n #diff += np.linalg.norm(np.take(fft_chime, var, axis=0) - np.take(ring_fft_buffers, (index+var)%ring_size, axis=0))\n diff += np.linalg.norm(np.take(fft_chime, var, axis=0) - ring_fft_buffers[(start_index+var)%ring_size])\n return diff\n\n# -----------------------------------\n# Read infinite\n# -----------------------------------\nread_pipe = os.fdopen(sys.stdin.fileno(), 'rb', buffering=N)\n\nindex = 0\nring_fft_buffers = [0] * fft_size # np.zeros((fft_size, N))\n\nchime_remain = 0\nvoice_remain = 0\n\nwhile True:\n data = np.frombuffer(read_pipe.read(N), dtype='b')\n fft_data = np.fft.fft(data)\n fft_abs = np.abs(fft_data)\n ring_fft_buffers[index] = fft_abs\n #print(index)\n if chime_remain == 0:\n score = calc_score(fft_chime, ring_fft_buffers, index)\n print(score)\n if score < 1000000:\n print(\"*** EEW chime detected! ***\")\n chime_remain = FREEZE_COUNT\n elif voice_remain == 0:\n score = calc_score(fft_voice, ring_fft_buffers, index)\n print(score)\n if score < 600000:\n print(\"*** EEW voice detected! ***\")\n voice_remain = FREEZE_COUNT\n\n if chime_remain > 0:\n chime_remain -= 1\n if voice_remain > 0:\n voice_remain -= 1\n\n index = (index + 1) % fft_size\n\n\n" ]
[ [ "numpy.array", "numpy.fft.fft", "numpy.abs", "numpy.take" ] ]
LucaCamerani/EcoFin-library
[ "ad8d628e0d447d1b5e8d3b16610d382e7df086e1", "ad8d628e0d447d1b5e8d3b16610d382e7df086e1" ]
[ "Tesi/2_indicatorsModelling/4_noArbitrageReturn/noArbitragePrice.py", "Tesi/3_modelTester/4_portfolioTester.py" ]
[ "\"\"\"\n4_noArbitragePrice.py\n\nCreated by Luca Camerani at 22/11/2020, University of Milano-Bicocca.\n([email protected])\nAll rights reserved.\n\nThis file is part of the EcoFin-Library (https://github.com/LucaCamerani/EcoFin-Library),\nand is released under the \"BSD Open Source License\".\n\"\"\"\n\nimport os\nfrom datetime import datetime\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nfrom EcoFin.dataDownload.optionsManager import OptionManager\nfrom EcoFin.dataDownload.ticker import Ticker\nfrom EcoFin.options.deepOptionChain import DeepOptionChain\n\n# -------------------------[Set-up]-------------------------\nticker = Ticker('MSFT')\noptionManager = OptionManager(ticker, now=1492646400)\nexp = optionManager.getExpirationByMaturity(30, method='greater')\noptionChain = optionManager.getOptionChain(exp=exp)\n# ----------------------------------------------------------\n\nticker_info = ticker.getInfo()\nforwardPrice = optionChain.getForwardPrice()\n\ndeepOptionChain = DeepOptionChain(optionChain)\ndata = deepOptionChain.getDeepOptionChain()\n\ndata['weights'] = data.loc[:, ['openInterest_call', 'openInterest_put']].sum(axis=1) / \\\n np.nansum(data.loc[:, ['openInterest_call', 'openInterest_put']].to_numpy())\n\nhistory_back = 100\nhistory = ticker.getHistory(end=optionChain.getChainDate()).tail(history_back)\n\ndata['S_mean'] = np.mean(data[['S_U', 'S_L']], axis=1)\nforecast = np.mean(data[['S_U', 'S_L', 'S_mean']])\n\nfig, axs = plt.subplots(3, figsize=(15, 8))\nfig.suptitle('No-arbitrage price bounds ({})'.format(ticker_info.longName), fontsize=16)\n\n# chart 1\naxs[0].set_title('No arbitrage price bounds')\naxs[0].plot(data.strike, data['S_U'], label='Upper bound ($S^{U})_{now}$)', color='green')\naxs[0].plot(data.strike, data['S_L'], label='Lower bound ($S^{L})_{now}$)', color='red')\n\naxs[0].plot(data.strike, data['S_mean'], label='Price AVG ($S{AVG})_{now}$)', linewidth=3, linestyle=\"dotted\")\nbounds = data[['S_U', 'S_L']]\naxs[0].plot(forwardPrice, np.nanmin(bounds), markersize=8, marker='^', color='violet')\naxs[0].vlines(forwardPrice, np.nanmin(bounds), np.nanmax(bounds),\n linestyles='dashed', color='violet', alpha=.6, label='Forward Price')\naxs[0].legend()\naxs[0].grid()\n\n# chart 2\naxs[1].set_title('Market underlying price forecast')\naxs[1].plot(history.index, history.Close, label='Underlying price ($S_t$)')\n\nfunnel = pd.DataFrame({'S_U': [history.tail(1).Close.values[0], forecast['S_U']],\n 'S_mean': [history.tail(1).Close.values[0], forecast['S_mean']],\n 'S_L': [history.tail(1).Close.values[0], forecast['S_L']]},\n index=[history.tail(1).index.values[0], np.datetime64(datetime.utcfromtimestamp(exp))])\n\naxs[1].plot(funnel.index, funnel['S_U'], color='green', linestyle=\"dotted\", label='$S^{U}$')\naxs[1].plot(funnel.index, funnel['S_mean'], color='blue', label='$S^{AVG}$')\naxs[1].plot(funnel.index, funnel['S_L'], color='red', linestyle=\"dotted\", label='$S^{L}$')\naxs[1].plot(datetime.utcfromtimestamp(exp), forecast['S_mean'], '<')\n\naxs[1].legend()\naxs[1].grid()\n\n# chart 3\nlines = []\naxs[2].set_title('Weights')\nlines.append(axs[2].plot(data.strike, np.abs(data.moneyness), label='$Moneyness$')[0])\nlines.append(axs[2].vlines(forwardPrice, 0, np.nanmax(data.moneyness),\n linestyles=\"dashed\", color='violet', alpha=.6, label='Forward Price'))\naxs[2].set(xlabel='Strike')\naxs[2].grid()\n\nax_bis = axs[2]\nlines.append(ax_bis.bar(data.strike, data['openInterest_call'],\n label='Open Interest (Call)', color='green', alpha=.3))\nlines.append(ax_bis.bar(data.strike, data['openInterest_put'],\n label='Open Interest (Put)', color='red', alpha=.3))\n\nax_ter = axs[2].twinx()\nlines.append(ax_ter.plot(data.strike, data['weights'],\n label='Weights', color='blue', alpha=.3)[0])\n\naxs[2].legend(lines, [l.get_label() for l in lines], loc=0)\n\nplt.figtext(.9, 0.02, \"{} | {}\".format(optionChain.getChainDate(), optionChain.getChainExpiration()),\n ha=\"right\", fontsize=10, bbox={\"facecolor\": \"orange\", \"alpha\": 0.2, \"pad\": 8})\nplt.show()\n\nprint('Price: {}'.format(forecast['S_mean']))\n\n# ----------------------[EXPORT BLOCK]--------------------------------\npath = '../Export/[{}]_({})'.format(ticker.ticker, ticker_info.longName)\nif not os.path.exists(path):\n os.makedirs(path)\n\nfig.savefig('{}/noArbitrageBounds_[{}].png'.format(path, exp))\n# ----------------------[EXPORT BLOCK]--------------------------------\n", "\"\"\"\n4_portfolioTester.py\n\nCreated by Luca Camerani at 10/02/2021, University of Milano-Bicocca.\n([email protected])\nAll rights reserved.\n\nThis file is part of the EcoFin-Library (https://github.com/LucaCamerani/EcoFin-Library),\nand is released under the \"BSD Open Source License\".\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\n\nfrom EcoFin.assetAllocation.performance import Performance\nfrom EcoFin.utils import utils\nfrom EcoFin.assetAllocation.allocation import Allocation\n\n\n# -------------------------[Set-up]-------------------------\nticker_list = [line.rstrip('\\n') for line in open(r'../INDEXs/DJIA.txt')]\nmaturity_min = 15\n\nbase_path = r'../Export/BackTest_C'\nstart_date = 0\n\n# Strategy set-up\ndirection = 'OPS_[OI]' # Direction driver\nforce = 'VIX_[CBOE]' # In None, don't use force driver\npolarize = True # True or False: polarize direction component\n\n# Portfolio set-up\nbuy_only = True # Set a buy only strategy that ignore negative signals\nw_limit = 5 # Ranks best N ticker based on strategy\nw_equally = True # Equally weighted mode\nleverage = None # Strategy leverage (1 is no leverage, None is auto-compensation)\n\n# Transaction costs\ntc = 8 # unit in basis points\n# ----------------------------------------------------------\n\nbase = ['SpotPrice']\ndata = {b: {} for b in base + [direction, force]}\nif None in data.keys():\n del data[None]\n\nfor tick in tqdm(ticker_list, desc='Importing data'):\n try:\n # Import data and clean-up\n source = pd.read_excel(r'{}/{}/backTest_[{}].xlsx'.format(base_path, tick, maturity_min), engine='openpyxl')\n source = source.loc[source['Date'] >= start_date, ~source.columns.str.contains('^Unnamed')]\n source.set_index(pd.to_datetime(source['Date'], format='%Y%m%d'), drop=True, inplace=True)\n\n for driver in data.keys():\n data[driver][tick] = source[driver]\n except:\n pass\n\n# Merge (concatenate) data and create dataframes\nfor driver in data.keys():\n data[driver] = pd.concat(data[driver], axis=1)\n\n # ❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌[Normalize direction data]❌❌❌❌❌❌❌❌❌❌❌\n if driver == direction:\n data[driver] = data[driver].sub(data[driver].mean(axis=1), axis=0)\n # ❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌\n\n# Generate strategy signals\n# -----------------------------------[STRATEGY SET-UP]-----------------------------------\nif polarize: #\n data[direction] = utils.polarizeTable(data[direction]) #\n #\nif force is None: #\n force_v = 1 #\nelse: #\n force_v = data[force] #\n #\ndata['signals'] = data[direction] * force_v #\n# -----------------------------------[STRATEGY SET-UP]-----------------------------------\n\n# =====================================================================================\n# FROM HERE NO 'signals data' MANIPULATION\n# =====================================================================================\n\n# [1] Compute ln-returns of benchmark\ndata['lnReturns'] = np.log(data['SpotPrice'].shift(-1) / data['SpotPrice'])\n\n# [2] Compute strategy weights\nallocation = Allocation(data['signals'], buyOnly=buy_only, limit=w_limit)\nif w_equally:\n data['weights'] = allocation.getEquallyWeights()\nelse:\n data['weights'] = allocation.getSignalWeights()\n\n# [3] Compute strategy ln-returns\nif leverage is None:\n leverage = data['SpotPrice'].shape[1]\ndata['strategy'] = data['lnReturns'] * data['weights'] * leverage\n\n# [4] Compute turnover and transaction costs\nturnover = allocation.getTurnover(data['weights'])\ndata['costs'] = np.log(turnover.byTime * 2 * (tc/1e4) + 1)\ndata['strategy_net'] = data['strategy'].mean(axis=1) - data['costs']\n\n# Compute and print portfolio metrics\nperformance = Performance(data['lnReturns'].mean(axis=1), {'St.gy (Gross)': data['strategy'].mean(axis=1),\n 'St.gy (Net)': data['strategy_net']}, r=0.019)\nperformance.printPerformanceSummary()\nprint('\\n\\033[1mTurnover avg.:\\033[0m {0:.2%}'.format(allocation.getTurnover(data['weights']).mean))\n\n# =====================================================================================\n# FROM HERE NO DATA MANIPULATION\n# =====================================================================================\n\n# Create plot framework\nfig, axs = plt.subplots(2, figsize=(15, 8), sharex=True)\nfig.suptitle('Strategy tester', fontsize=16)\n\n# Plot strategy return vs. benchmark (data)\naxs[0].set_title('data returns')\naxs[0].plot(data['lnReturns'].mean(axis=1).cumsum(), linestyle='dotted', label='Benchmark')\naxs[0].plot(data['strategy'].mean(axis=1).cumsum(), label=r'$Strategy_{GROSS}$')\naxs[0].plot(data['strategy_net'].cumsum(), label=r'$Strategy_{NET}$')\naxs[0].set(ylabel='Cumulated ln-returns ($X_t$)')\naxs[0].legend()\n\n# Plot transaction costs\nax2 = axs[0].twinx()\ncolor = 'tab:gray'\nax2.set_ylabel('Transaction Costs', color=color)\nax2.fill_between(data['costs'].index, 0, data['costs'], linewidth=.5, alpha=.2, color=color)\nax2.plot(data['costs'], linewidth=.5, alpha=.6, color=color)\nax2.set_ylim([0, data['costs'].max() * 4])\nax2.get_yaxis().set_ticks([])\n\n# Plot evolution of weights\npositive = data['weights'][data['weights'] >= 0].fillna(0)\nnegative = data['weights'][data['weights'] < 0].fillna(0)\n\naxs[1].set_title('Weights evolution')\naxs[1].stackplot(data['weights'].index, positive.T)\naxs[1].stackplot(data['weights'].index, negative.T)\naxs[1].set(xlabel=r'days ($t$)', ylabel=r'data weights')\n\nwith pd.ExcelWriter('{}/portfolio.xlsx'.format(base_path)) as writer:\n data['SpotPrice'].to_excel(writer, sheet_name='SpotPrices', index=True)\n data['lnReturns'].to_excel(writer, sheet_name='lnReturns', index=True)\n data['signals'].to_excel(writer, sheet_name='Signals', index=True)\n data['weights'].to_excel(writer, sheet_name='Weights', index=True)\n data['strategy'].to_excel(writer, sheet_name='Strategy', index=True)\n data['strategy_net'].to_excel(writer, sheet_name='Strategy', index=True)\n\nplt.show()\n" ]
[ [ "matplotlib.pyplot.subplots", "numpy.mean", "numpy.nanmin", "numpy.abs", "matplotlib.pyplot.show", "numpy.nanmax" ], [ "pandas.to_datetime", "numpy.log", "matplotlib.pyplot.subplots", "pandas.concat", "matplotlib.pyplot.show" ] ]
ahhuisg/ML-Data-Prep-Zoo
[ "195733b5767d69c9992456f1380e6c646e30a5ae" ]
[ "clean/datazoo/base.py" ]
[ "import pandas as pd\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nfrom logzero import logger\r\n\r\n\r\nclass ZooBase(object):\r\n def __init__(self):\r\n self.vectorizer = CountVectorizer(ngram_range=(2, 2), analyzer='char')\r\n\r\n def _process_stats(self, data):\r\n data1 = data[['total_vals', 'num_nans', '%_nans', 'num_of_dist_val', '%_dist_val', 'mean', 'std_dev', 'min_val',\r\n 'max_val', 'has_delimiters', 'has_url', 'has_email', 'has_date', 'mean_word_count',\r\n 'std_dev_word_count', 'mean_stopword_total', 'stdev_stopword_total',\r\n 'mean_char_count', 'stdev_char_count', 'mean_whitespace_count',\r\n 'stdev_whitespace_count', 'mean_delim_count', 'stdev_delim_count',\r\n 'is_list', 'is_long_sentence']]\r\n data1 = data1.reset_index(drop=True)\r\n data1 = data1.fillna(0)\r\n\r\n return data1\r\n\r\n def _feature_extraction(self, data, data1):\r\n arr = data['Attribute_name'].values\r\n arr = [str(x) for x in arr]\r\n\r\n if self._is_vectorizer_fitted():\r\n logger.info('vectorizer already fitted. Doing transform')\r\n X = self.vectorizer.transform(arr)\r\n else:\r\n logger.info('vectorizer not fitted. Doing fit and transform')\r\n X = self.vectorizer.fit_transform(arr)\r\n\r\n attr_df = pd.DataFrame(X.toarray())\r\n\r\n data2 = pd.concat([data1, attr_df], axis=1, sort=False)\r\n data2 = data2.fillna(0)\r\n\r\n logger.info(f'total length of from feature extraction: {len(data2)}')\r\n\r\n return data2\r\n\r\n def _is_vectorizer_fitted(self):\r\n try:\r\n self.vectorizer.get_feature_names_out()\r\n except:\r\n logger.warn('vectorizer not fitted yet')\r\n return False\r\n\r\n return True\r\n" ]
[ [ "sklearn.feature_extraction.text.CountVectorizer", "pandas.concat" ] ]
sdaulton/Ax-1
[ "8815896c0ff094871a76b73e5adbe897a0df5bf1" ]
[ "ax/models/torch/botorch.py" ]
[ "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom copy import deepcopy\nfrom typing import Any, Callable, Dict, List, Optional, Tuple, cast\n\nimport numpy as np\nimport torch\nfrom ax.core.types import TCandidateMetadata, TConfig, TGenMetadata\nfrom ax.models.torch.botorch_defaults import (\n get_and_fit_model,\n get_NEI,\n recommend_best_observed_point,\n scipy_optimizer,\n)\nfrom ax.models.torch.utils import (\n _get_X_pending_and_observed,\n _to_inequality_constraints,\n normalize_indices,\n predict_from_model,\n subset_model,\n)\nfrom ax.models.torch_base import TorchModel\nfrom ax.utils.common.constants import Keys\nfrom ax.utils.common.docutils import copy_doc\nfrom ax.utils.common.logger import get_logger\nfrom ax.utils.common.typeutils import checked_cast\nfrom botorch.acquisition.acquisition import AcquisitionFunction\nfrom botorch.models.model import Model\nfrom torch import Tensor\n\n\nlogger = get_logger(__name__)\n\n\nTModelConstructor = Callable[\n [\n List[Tensor],\n List[Tensor],\n List[Tensor],\n List[int],\n List[int],\n List[str],\n Optional[Dict[str, Tensor]],\n Any,\n ],\n Model,\n]\nTModelPredictor = Callable[[Model, Tensor], Tuple[Tensor, Tensor]]\nTAcqfConstructor = Callable[\n [\n Model,\n Tensor,\n Optional[Tuple[Tensor, Tensor]],\n Optional[Tensor],\n Optional[Tensor],\n Any,\n ],\n AcquisitionFunction,\n]\nTOptimizer = Callable[\n [\n AcquisitionFunction,\n Tensor,\n int,\n Optional[List[Tuple[Tensor, Tensor, float]]],\n Optional[Dict[int, float]],\n Optional[Callable[[Tensor], Tensor]],\n Any,\n ],\n Tuple[Tensor, Tensor],\n]\nTBestPointRecommender = Callable[\n [\n TorchModel,\n List[Tuple[float, float]],\n Tensor,\n Optional[Tuple[Tensor, Tensor]],\n Optional[Tuple[Tensor, Tensor]],\n Optional[Dict[int, float]],\n Optional[TConfig],\n Optional[Dict[int, float]],\n ],\n Optional[Tensor],\n]\n\n\nclass BotorchModel(TorchModel):\n r\"\"\"\n Customizable botorch model.\n\n By default, this uses a noisy Expected Improvement acquisition function on\n top of a model made up of separate GPs, one for each outcome. This behavior\n can be modified by providing custom implementations of the following\n components:\n\n - a `model_constructor` that instantiates and fits a model on data\n - a `model_predictor` that predicts outcomes using the fitted model\n - a `acqf_constructor` that creates an acquisition function from a fitted model\n - a `acqf_optimizer` that optimizes the acquisition function\n - a `best_point_recommender` that recommends a current \"best\" point (i.e.,\n what the model recommends if the learning process ended now)\n\n Args:\n model_constructor: A callable that instantiates and fits a model on data,\n with signature as described below.\n model_predictor: A callable that predicts using the fitted model, with\n signature as described below.\n acqf_constructor: A callable that creates an acquisition function from a\n fitted model, with signature as described below.\n acqf_optimizer: A callable that optimizes the acquisition function, with\n signature as described below.\n best_point_recommender: A callable that recommends the best point, with\n signature as described below.\n refit_on_cv: If True, refit the model for each fold when performing\n cross-validation.\n refit_on_update: If True, refit the model after updating the training\n data using the `update` method.\n warm_start_refitting: If True, start model refitting from previous\n model parameters in order to speed up the fitting process.\n\n\n Call signatures:\n\n ::\n\n model_constructor(\n Xs,\n Ys,\n Yvars,\n task_features,\n fidelity_features,\n metric_names,\n state_dict,\n **kwargs,\n ) -> model\n\n Here `Xs`, `Ys`, `Yvars` are lists of tensors (one element per outcome),\n `task_features` identifies columns of Xs that should be modeled as a task,\n `fidelity_features` is a list of ints that specify the positions of fidelity\n parameters in 'Xs', `metric_names` provides the names of each `Y` in `Ys`,\n `state_dict` is a pytorch module state dict, and `model` is a BoTorch `Model`.\n Optional kwargs are being passed through from the `BotorchModel` constructor.\n This callable is assumed to return a fitted BoTorch model that has the same\n dtype and lives on the same device as the input tensors.\n\n ::\n\n model_predictor(model, X) -> [mean, cov]\n\n Here `model` is a fitted botorch model, `X` is a tensor of candidate points,\n and `mean` and `cov` are the posterior mean and covariance, respectively.\n\n ::\n\n acqf_constructor(\n model,\n objective_weights,\n outcome_constraints,\n X_observed,\n X_pending,\n **kwargs,\n ) -> acq_function\n\n\n Here `model` is a botorch `Model`, `objective_weights` is a tensor of weights\n for the model outputs, `outcome_constraints` is a tuple of tensors describing\n the (linear) outcome constraints, `X_observed` are previously observed points,\n and `X_pending` are points whose evaluation is pending. `acq_function` is a\n BoTorch acquisition function crafted from these inputs. For additional\n details on the arguments, see `get_NEI`.\n\n ::\n\n acqf_optimizer(\n acq_function,\n bounds,\n n,\n inequality_constraints,\n fixed_features,\n rounding_func,\n **kwargs,\n ) -> candidates\n\n Here `acq_function` is a BoTorch `AcquisitionFunction`, `bounds` is a tensor\n containing bounds on the parameters, `n` is the number of candidates to be\n generated, `inequality_constraints` are inequality constraints on parameter\n values, `fixed_features` specifies features that should be fixed during\n generation, and `rounding_func` is a callback that rounds an optimization\n result appropriately. `candidates` is a tensor of generated candidates.\n For additional details on the arguments, see `scipy_optimizer`.\n\n ::\n\n best_point_recommender(\n model,\n bounds,\n objective_weights,\n outcome_constraints,\n linear_constraints,\n fixed_features,\n model_gen_options,\n target_fidelities,\n ) -> candidates\n\n Here `model` is a TorchModel, `bounds` is a list of tuples containing bounds\n on the parameters, `objective_weights` is a tensor of weights for the model outputs,\n `outcome_constraints` is a tuple of tensors describing the (linear) outcome\n constraints, `linear_constraints` is a tuple of tensors describing constraints\n on the design, `fixed_features` specifies features that should be fixed during\n generation, `model_gen_options` is a config dictionary that can contain\n model-specific options, and `target_fidelities` is a map from fidelity feature\n column indices to their respective target fidelities, used for multi-fidelity\n optimization problems. % TODO: refer to an example.\n \"\"\"\n\n dtype: Optional[torch.dtype]\n device: Optional[torch.device]\n Xs: List[Tensor]\n Ys: List[Tensor]\n Yvars: List[Tensor]\n\n def __init__(\n self,\n model_constructor: TModelConstructor = get_and_fit_model,\n model_predictor: TModelPredictor = predict_from_model,\n # pyre-fixme[9]: acqf_constructor has type `Callable[[Model, Tensor,\n # Optional[Tuple[Tensor, Tensor]], Optional[Tensor], Optional[Tensor], Any],\n # AcquisitionFunction]`; used as `Callable[[Model, Tensor,\n # Optional[Tuple[Tensor, Tensor]], Optional[Tensor], Optional[Tensor],\n # **(Any)], AcquisitionFunction]`.\n acqf_constructor: TAcqfConstructor = get_NEI,\n # pyre-fixme[9]: acqf_optimizer declared/used type mismatch\n acqf_optimizer: TOptimizer = scipy_optimizer,\n best_point_recommender: TBestPointRecommender = recommend_best_observed_point,\n refit_on_cv: bool = False,\n refit_on_update: bool = True,\n warm_start_refitting: bool = True,\n **kwargs: Any,\n ) -> None:\n self.model_constructor = model_constructor\n self.model_predictor = model_predictor\n self.acqf_constructor = acqf_constructor\n self.acqf_optimizer = acqf_optimizer\n self.best_point_recommender = best_point_recommender\n self._kwargs = kwargs\n self.refit_on_cv = refit_on_cv\n self.refit_on_update = refit_on_update\n self.warm_start_refitting = warm_start_refitting\n self.model: Optional[Model] = None\n self.Xs = []\n self.Ys = []\n self.Yvars = []\n self.dtype = None\n self.device = None\n self.task_features: List[int] = []\n self.fidelity_features: List[int] = []\n self.metric_names: List[str] = []\n\n @copy_doc(TorchModel.fit)\n def fit(\n self,\n Xs: List[Tensor],\n Ys: List[Tensor],\n Yvars: List[Tensor],\n bounds: List[Tuple[float, float]],\n task_features: List[int],\n feature_names: List[str],\n metric_names: List[str],\n fidelity_features: List[int],\n candidate_metadata: Optional[List[List[TCandidateMetadata]]] = None,\n ) -> None:\n self.dtype = Xs[0].dtype\n self.device = Xs[0].device\n self.Xs = Xs\n self.Ys = Ys\n self.Yvars = Yvars\n # ensure indices are non-negative\n self.task_features = normalize_indices(task_features, d=Xs[0].size(-1))\n self.fidelity_features = normalize_indices(fidelity_features, d=Xs[0].size(-1))\n self.metric_names = metric_names\n self.model = self.model_constructor( # pyre-ignore [28]\n Xs=Xs,\n Ys=Ys,\n Yvars=Yvars,\n task_features=self.task_features,\n fidelity_features=self.fidelity_features,\n metric_names=self.metric_names,\n **self._kwargs,\n )\n\n @copy_doc(TorchModel.predict)\n def predict(self, X: Tensor) -> Tuple[Tensor, Tensor]:\n return self.model_predictor(model=self.model, X=X) # pyre-ignore [28]\n\n @copy_doc(TorchModel.gen)\n def gen(\n self,\n n: int,\n bounds: List[Tuple[float, float]],\n objective_weights: Tensor,\n outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,\n linear_constraints: Optional[Tuple[Tensor, Tensor]] = None,\n fixed_features: Optional[Dict[int, float]] = None,\n pending_observations: Optional[List[Tensor]] = None,\n model_gen_options: Optional[TConfig] = None,\n rounding_func: Optional[Callable[[Tensor], Tensor]] = None,\n target_fidelities: Optional[Dict[int, float]] = None,\n ) -> Tuple[Tensor, Tensor, TGenMetadata, Optional[List[TCandidateMetadata]]]:\n options = model_gen_options or {}\n acf_options = options.get(Keys.ACQF_KWARGS, {})\n optimizer_options = options.get(Keys.OPTIMIZER_KWARGS, {})\n\n if target_fidelities:\n raise NotImplementedError(\n \"target_fidelities not implemented for base BotorchModel\"\n )\n\n X_pending, X_observed = _get_X_pending_and_observed(\n Xs=self.Xs,\n pending_observations=pending_observations,\n objective_weights=objective_weights,\n outcome_constraints=outcome_constraints,\n bounds=bounds,\n linear_constraints=linear_constraints,\n fixed_features=fixed_features,\n )\n\n model = self.model\n\n # subset model only to the outcomes we need for the optimization\t357\n if options.get(Keys.SUBSET_MODEL, True):\n model, objective_weights, outcome_constraints, _ = subset_model(\n model=model, # pyre-ignore [6]\n objective_weights=objective_weights,\n outcome_constraints=outcome_constraints,\n )\n\n bounds_ = torch.tensor(bounds, dtype=self.dtype, device=self.device)\n bounds_ = bounds_.transpose(0, 1)\n\n botorch_rounding_func = get_rounding_func(rounding_func)\n acquisition_function = self.acqf_constructor( # pyre-ignore: [28]\n model=model,\n objective_weights=objective_weights,\n outcome_constraints=outcome_constraints,\n X_observed=X_observed,\n X_pending=X_pending,\n **acf_options,\n )\n acquisition_function = checked_cast(AcquisitionFunction, acquisition_function)\n # pyre-ignore: [28]\n candidates, expected_acquisition_value = self.acqf_optimizer(\n acq_function=checked_cast(AcquisitionFunction, acquisition_function),\n bounds=bounds_,\n n=n,\n inequality_constraints=_to_inequality_constraints(\n linear_constraints=linear_constraints\n ),\n fixed_features=fixed_features,\n rounding_func=botorch_rounding_func,\n **optimizer_options,\n )\n return (\n candidates.detach().cpu(),\n torch.ones(n, dtype=self.dtype),\n {\"expected_acquisition_value\": expected_acquisition_value.tolist()},\n None,\n )\n\n @copy_doc(TorchModel.best_point)\n def best_point(\n self,\n bounds: List[Tuple[float, float]],\n objective_weights: Tensor,\n outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,\n linear_constraints: Optional[Tuple[Tensor, Tensor]] = None,\n fixed_features: Optional[Dict[int, float]] = None,\n model_gen_options: Optional[TConfig] = None,\n target_fidelities: Optional[Dict[int, float]] = None,\n ) -> Optional[Tensor]:\n\n return self.best_point_recommender( # pyre-ignore [28]\n model=self,\n bounds=bounds,\n objective_weights=objective_weights,\n outcome_constraints=outcome_constraints,\n linear_constraints=linear_constraints,\n fixed_features=fixed_features,\n model_gen_options=model_gen_options,\n target_fidelities=target_fidelities,\n )\n\n @copy_doc(TorchModel.cross_validate)\n def cross_validate(\n self,\n Xs_train: List[Tensor],\n Ys_train: List[Tensor],\n Yvars_train: List[Tensor],\n X_test: Tensor,\n ) -> Tuple[Tensor, Tensor]:\n if self.model is None:\n raise RuntimeError(\"Cannot cross-validate model that has not been fitted\")\n if self.refit_on_cv:\n state_dict = None\n else:\n state_dict = deepcopy(self.model.state_dict()) # pyre-ignore: [16]\n model = self.model_constructor( # pyre-ignore: [28]\n Xs=Xs_train,\n Ys=Ys_train,\n Yvars=Yvars_train,\n task_features=self.task_features,\n state_dict=state_dict,\n fidelity_features=self.fidelity_features,\n metric_names=self.metric_names,\n **self._kwargs,\n )\n return self.model_predictor(model=model, X=X_test) # pyre-ignore: [28]\n\n @copy_doc(TorchModel.update)\n def update(\n self,\n Xs: List[Tensor],\n Ys: List[Tensor],\n Yvars: List[Tensor],\n candidate_metadata: Optional[List[List[TCandidateMetadata]]] = None,\n ) -> None:\n if self.model is None:\n raise RuntimeError(\"Cannot update model that has not been fitted\")\n self.Xs = Xs\n self.Ys = Ys\n self.Yvars = Yvars\n if self.refit_on_update and not self.warm_start_refitting:\n state_dict = None # pragma: no cover\n else:\n state_dict = deepcopy(self.model.state_dict()) # pyre-ignore: [16]\n self.model = self.model_constructor( # pyre-ignore: [28]\n Xs=self.Xs,\n Ys=self.Ys,\n Yvars=self.Yvars,\n task_features=self.task_features,\n state_dict=state_dict,\n fidelity_features=self.fidelity_features,\n metric_names=self.metric_names,\n refit_model=self.refit_on_update,\n **self._kwargs,\n )\n\n def feature_importances(self) -> np.ndarray:\n if self.model is None:\n raise RuntimeError(\n \"Cannot calculate feature_importances without a fitted model\"\n )\n else:\n ls = self.model.covar_module.base_kernel.lengthscale # pyre-ignore: [16]\n return cast(Tensor, (1 / ls)).detach().cpu().numpy()\n\n\ndef get_rounding_func(\n rounding_func: Optional[Callable[[Tensor], Tensor]]\n) -> Optional[Callable[[Tensor], Tensor]]:\n if rounding_func is None:\n botorch_rounding_func = rounding_func\n else:\n # make sure rounding_func is properly applied to q- and t-batches\n def botorch_rounding_func(X: Tensor) -> Tensor:\n batch_shape, d = X.shape[:-1], X.shape[-1]\n X_round = torch.stack(\n [rounding_func(x) for x in X.view(-1, d)] # pyre-ignore: [16]\n )\n return X_round.view(*batch_shape, d)\n\n return botorch_rounding_func\n" ]
[ [ "torch.tensor", "torch.ones" ] ]
YiZhiXiaoGuLI/Differential-Evolution-Algorithm
[ "9505c31963c499f878daaa6d02c44d2320b32ff4" ]
[ "DEIndividual/DEIndividual.py" ]
[ "import numpy as np\r\nimport DE.ObjFunction as ObjFunction\r\n\r\n\r\nclass DEIndividual:\r\n\r\n '''\r\n individual of differential evolution algorithm\r\n '''\r\n\r\n def __init__(self, vardim, bound):\r\n '''\r\n vardim: dimension of variables\r\n bound: boundaries of variables\r\n '''\r\n self.vardim = vardim\r\n self.bound = bound\r\n self.fitness = 0.\r\n\r\n def generate(self):\r\n '''\r\n generate a random chromsome for differential evolution algorithm\r\n '''\r\n len = self.vardim\r\n rnd = np.random.random(size=len)\r\n self.chrom = np.zeros(len)\r\n for i in range(0, len):\r\n self.chrom[i] = self.bound[0, i] + \\\r\n (self.bound[1, i] - self.bound[0, i]) * rnd[i]\r\n\r\n def calculateFitness(self):\r\n '''\r\n calculate the fitness of the chromsome\r\n '''\r\n self.fitness = ObjFunction.GrieFunc(\r\n self.vardim, self.chrom, self.bound)" ]
[ [ "numpy.random.random", "numpy.zeros" ] ]
vsoch/NETransliteration-COLING2018
[ "5d5f59e561ecea45a6d3602121e1049baa7a76c3" ]
[ "xlit_s2s_nmt/utils/iterator_utils.py" ]
[ "# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"For loading data into NMT models.\"\"\"\nfrom __future__ import print_function\n\nimport collections\n\nimport tensorflow as tf\n\n__all__ = [\"BatchedInput\", \"get_iterator\", \"get_infer_iterator\"]\n\n\n# NOTE(ebrevdo): When we subclass this, instances' __dict__ becomes empty.\nclass BatchedInput(\n collections.namedtuple(\"BatchedInput\",\n (\"initializer\", \"source\", \"target_input\",\n \"target_output\", \"source_sequence_length\",\n \"target_sequence_length\"))):\n pass\n\n\ndef get_infer_iterator(src_dataset,\n src_vocab_table,\n batch_size,\n source_reverse,\n eos,\n src_max_len=None):\n src_eos_id = tf.cast(src_vocab_table.lookup(tf.constant(eos)), tf.int32)\n src_dataset = src_dataset.map(lambda src: tf.string_split([src]).values)\n\n if src_max_len:\n src_dataset = src_dataset.map(lambda src: src[:src_max_len])\n # Convert the word strings to ids\n src_dataset = src_dataset.map(\n lambda src: tf.cast(src_vocab_table.lookup(src), tf.int32))\n if source_reverse:\n src_dataset = src_dataset.map(lambda src: tf.reverse(src, axis=[0]))\n # Add in the word counts.\n src_dataset = src_dataset.map(lambda src: (src, tf.size(src)))\n\n def batching_func(x):\n return x.padded_batch(\n batch_size,\n # The entry is the source line rows;\n # this has unknown-length vectors. The last entry is\n # the source row size; this is a scalar.\n padded_shapes=(\n tf.TensorShape([None]), # src\n tf.TensorShape([])), # src_len\n # Pad the source sequences with eos tokens.\n # (Though notice we don't generally need to do this since\n # later on we will be masking out calculations past the true sequence.\n padding_values=(\n src_eos_id, # src\n 0)) # src_len -- unused\n\n batched_dataset = batching_func(src_dataset)\n batched_iter = batched_dataset.make_initializable_iterator()\n (src_ids, src_seq_len) = batched_iter.get_next()\n return BatchedInput(\n initializer=batched_iter.initializer,\n source=src_ids,\n target_input=None,\n target_output=None,\n source_sequence_length=src_seq_len,\n target_sequence_length=None)\n\n\ndef get_iterator(src_dataset,\n tgt_dataset,\n src_vocab_table,\n tgt_vocab_table,\n batch_size,\n sos,\n eos,\n source_reverse,\n random_seed,\n num_buckets,\n src_max_len=None,\n tgt_max_len=None,\n num_threads=4,\n output_buffer_size=None,\n skip_count=None,\n num_shards=1,\n shard_index=0):\n if not output_buffer_size:\n output_buffer_size = batch_size * 1000\n src_eos_id = tf.cast(src_vocab_table.lookup(tf.constant(eos)), tf.int32)\n tgt_sos_id = tf.cast(tgt_vocab_table.lookup(tf.constant(sos)), tf.int32)\n tgt_eos_id = tf.cast(tgt_vocab_table.lookup(tf.constant(eos)), tf.int32)\n\n src_tgt_dataset = tf.contrib.data.Dataset.zip((src_dataset, tgt_dataset))\n\n src_tgt_dataset = src_tgt_dataset.shard(num_shards, shard_index)\n if skip_count is not None:\n src_tgt_dataset = src_tgt_dataset.skip(skip_count)\n\n src_tgt_dataset = src_tgt_dataset.shuffle(output_buffer_size, random_seed)\n\n src_tgt_dataset = src_tgt_dataset.map(\n lambda src, tgt: (tf.string_split([src]).values, tf.string_split([tgt]).values),\n num_threads=num_threads,\n output_buffer_size=output_buffer_size)\n\n # Filter zero length input sequences.\n src_tgt_dataset = src_tgt_dataset.filter(\n lambda src, tgt: tf.logical_and(tf.size(src) > 0, tf.size(tgt) > 0))\n\n if src_max_len:\n src_tgt_dataset = src_tgt_dataset.map(\n lambda src, tgt: (src[:src_max_len], tgt),\n num_threads=num_threads,\n output_buffer_size=output_buffer_size)\n if tgt_max_len:\n src_tgt_dataset = src_tgt_dataset.map(\n lambda src, tgt: (src, tgt[:tgt_max_len]),\n num_threads=num_threads,\n output_buffer_size=output_buffer_size)\n if source_reverse:\n src_tgt_dataset = src_tgt_dataset.map(\n lambda src, tgt: (tf.reverse(src, axis=[0]), tgt),\n num_threads=num_threads,\n output_buffer_size=output_buffer_size)\n # Convert the word strings to ids. Word strings that are not in the\n # vocab get the lookup table's default_value integer.\n src_tgt_dataset = src_tgt_dataset.map(\n lambda src, tgt: (tf.cast(src_vocab_table.lookup(src), tf.int32),\n tf.cast(tgt_vocab_table.lookup(tgt), tf.int32)),\n num_threads=num_threads, output_buffer_size=output_buffer_size)\n # Create a tgt_input prefixed with <sos> and a tgt_output suffixed with <eos>.\n src_tgt_dataset = src_tgt_dataset.map(\n lambda src, tgt: (src,\n tf.concat(([tgt_sos_id], tgt), 0),\n tf.concat((tgt, [tgt_eos_id]), 0)),\n num_threads=num_threads, output_buffer_size=output_buffer_size)\n # Add in sequence lengths.\n src_tgt_dataset = src_tgt_dataset.map(\n lambda src, tgt_in, tgt_out: (src, tgt_in, tgt_out, tf.size(src), tf.size(tgt_in)),\n num_threads=num_threads,\n output_buffer_size=output_buffer_size)\n\n # Bucket by source sequence length (buckets for lengths 0-9, 10-19, ...)\n def batching_func(x):\n return x.padded_batch(\n batch_size,\n # The first three entries are the source and target line rows;\n # these have unknown-length vectors. The last two entries are\n # the source and target row sizes; these are scalars.\n padded_shapes=(\n tf.TensorShape([None]), # src\n tf.TensorShape([None]), # tgt_input\n tf.TensorShape([None]), # tgt_output\n tf.TensorShape([]), # src_len\n tf.TensorShape([])), # tgt_len\n # Pad the source and target sequences with eos tokens.\n # (Though notice we don't generally need to do this since\n # later on we will be masking out calculations past the true sequence.\n padding_values=(\n src_eos_id, # src\n tgt_eos_id, # tgt_input\n tgt_eos_id, # tgt_output\n 0, # src_len -- unused\n 0)) # tgt_len -- unused\n\n if num_buckets > 1:\n\n def key_func(unused_1, unused_2, unused_3, src_len, tgt_len):\n # Calculate bucket_width by maximum source sequence length.\n # Pairs with length [0, bucket_width) go to bucket 0, length\n # [bucket_width, 2 * bucket_width) go to bucket 1, etc. Pairs with length\n # over ((num_bucket-1) * bucket_width) words all go into the last bucket.\n if src_max_len:\n bucket_width = (src_max_len + num_buckets - 1) // num_buckets\n else:\n bucket_width = 10\n\n # Bucket sentence pairs by the length of their source sentence and target\n # sentence.\n bucket_id = tf.maximum(src_len // bucket_width, tgt_len // bucket_width)\n return tf.to_int64(tf.minimum(num_buckets, bucket_id))\n\n def reduce_func(unused_key, windowed_data):\n return batching_func(windowed_data)\n\n batched_dataset = src_tgt_dataset.apply(\n tf.contrib.data.group_by_window(\n key_func=key_func, reduce_func=reduce_func, window_size=batch_size))\n \n else:\n batched_dataset = batching_func(src_tgt_dataset)\n batched_iter = batched_dataset.make_initializable_iterator()\n (src_ids, tgt_input_ids, tgt_output_ids, src_seq_len,\n tgt_seq_len) = (batched_iter.get_next())\n return BatchedInput(\n initializer=batched_iter.initializer,\n source=src_ids,\n target_input=tgt_input_ids,\n target_output=tgt_output_ids,\n source_sequence_length=src_seq_len,\n target_sequence_length=tgt_seq_len)\n" ]
[ [ "tensorflow.size", "tensorflow.minimum", "tensorflow.concat", "tensorflow.contrib.data.group_by_window", "tensorflow.TensorShape", "tensorflow.reverse", "tensorflow.constant", "tensorflow.contrib.data.Dataset.zip", "tensorflow.maximum", "tensorflow.string_split" ] ]
wychmod/chatrobot
[ "241b5ac09f2b5084a2a66c4aa95aba5752b32e43" ]
[ "chatbot/word_sequence.py" ]
[ "import numpy as np\n\nclass WordSequence(object):\n\n PAD_TAG = '<pad>'\n UNK_TAG = '<unk>'\n START_TAG = '<s>'\n END_TAG = '</s>'\n\n PAD = 0\n UNK = 1\n START = 2\n END = 3\n\n def __init__(self):\n #初始化基本的字典dict\n self.dict = {\n WordSequence.PAD_TAG: WordSequence.PAD,\n WordSequence.UNK_TAG: WordSequence.UNK,\n WordSequence.START_TAG: WordSequence.START,\n WordSequence.END_TAG: WordSequence.END,\n }\n self.fited = False\n\n def to_index(self, word):\n assert self.fited, \"WordSequence 尚未进行 fit 操作\"\n if word in self.dict:\n return self.dict[word]\n return WordSequence.UNK\n\n def to_word(self, index):\n assert self.fited, \"WordSequence 尚未进行 fit 操作\"\n for k, v in self.dict.items():\n if v == index:\n return k\n return WordSequence.UNK_TAG\n\n def size(self):\n\n assert self.fited, \"WordSequence 尚未进行 fit 操作\"\n return len(self.dict) + 1\n\n def __len__(self):\n return self.size()\n\n def fit(self, sentences, min_count=5, max_count=None, max_features=None):\n\n assert not self.fited, 'WordSequence 只能fit一次'\n count = {}\n for sentence in sentences:\n arr = list(sentence)\n for a in arr:\n if a not in count:\n count[a] = 0\n count[a] += 1\n\n if min_count is not None:\n count = {k: v for k, v in count.items() if v >= min_count}\n\n if max_count is not None:\n count = {k: v for k, v in count.items() if v <= max_count}\n\n self.dict = {\n WordSequence.PAD_TAG: WordSequence.PAD,\n WordSequence.UNK_TAG: WordSequence.UNK,\n WordSequence.START_TAG: WordSequence.START,\n WordSequence.END_TAG: WordSequence.END,\n }\n\n if isinstance(max_features, int):\n count = sorted(list(count.items()), key=lambda x:x[1])\n if max_features is not None and len(count) > max_features:\n count = count[-int(max_features):]\n for w, _ in count:\n self.dict[w] = len(self.dict)\n else:\n for w in sorted(count.keys()):\n self.dict[w] = len(self.dict)\n\n self.fited = True\n\n def transform(self, sentence, max_len=None):\n assert self.fited, \"WordSequence 尚未进行 fit 操作\"\n\n if max_len is not None:\n r = [self.PAD] * max_len\n else:\n r = [self.PAD] * len(sentence)\n\n for index, a in enumerate(sentence):\n if max_len is not None and index >= len(r):\n break\n r[index] = self.to_index(a)\n\n return np.array(r)\n\n def inverse_transform(self, indices,\n ignore_pad=False, ignore_unk=False,\n ignore_start=False, igonre_end=False):\n ret = []\n for i in indices:\n word = self.to_word(i)\n if word == WordSequence.PAD_TAG and ignore_pad:\n continue\n if word == WordSequence.UNK_TAG and ignore_unk:\n continue\n if word == WordSequence.START_TAG and ignore_start:\n continue\n if word == WordSequence.END_TAG and igonre_end:\n continue\n ret.append(word)\n\n return ret\n\ndef test():\n\n ws = WordSequence()\n ws.fit([\n ['你', '好', '啊'],\n ['你', '好', '哦'],\n ])\n\n indice = ws.transform(['我', '们', '好'])\n print(indice)\n\n back = ws.inverse_transform(indice)\n print(back)\n\nif __name__ == '__main__':\n test()" ]
[ [ "numpy.array" ] ]
cheerfulwang/python-tutorial
[ "d0f7348e1da4ff954e3add66e1aae55d599283ee", "d0f7348e1da4ff954e3add66e1aae55d599283ee" ]
[ "20pytorch/pos_tag_allennlp.py", "37confident-learning/03cleanlib_retrain_model_isri_demo.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\n@author:XuMing([email protected])\n@description: \n\"\"\"\n\nfrom typing import Iterator, List, Dict\nimport torch\nimport torch.optim as optim\nimport numpy as np\nfrom allennlp.data import Instance\nfrom allennlp.data.fields import TextField, SequenceLabelField\nfrom allennlp.data.dataset_readers import DatasetReader\nfrom allennlp.common.file_utils import cached_path\nfrom allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer\nfrom allennlp.data.tokenizers import Token\nfrom allennlp.data.vocabulary import Vocabulary\nfrom allennlp.models import Model\nfrom allennlp.modules.text_field_embedders import TextFieldEmbedder, BasicTextFieldEmbedder\nfrom allennlp.modules.token_embedders import Embedding\nfrom allennlp.modules.seq2seq_encoders import Seq2SeqEncoder, PytorchSeq2SeqWrapper\nfrom allennlp.nn.util import get_text_field_mask, sequence_cross_entropy_with_logits\nfrom allennlp.training.metrics import CategoricalAccuracy\nfrom allennlp.data.iterators import BucketIterator\nfrom allennlp.training.trainer import Trainer\nfrom allennlp.predictors import SentenceTaggerPredictor\n\ntorch.manual_seed(1)\n\n\nclass PosDatasetReader(DatasetReader):\n \"\"\"\n DatasetReader for PoS tagging data, one sentence per line, like\n\n The###DET dog###NN ate###V the###DET apple###NN\n \"\"\"\n\n def __init__(self, token_indexers: Dict[str, TokenIndexer] = None) -> None:\n super().__init__(lazy=False)\n self.token_indexers = token_indexers or {\"tokens\": SingleIdTokenIndexer()}\n\n def text_to_instance(self, tokens: List[Token], tags: List[str] = None) -> Instance:\n sentence_field = TextField(tokens, self.token_indexers)\n fields = {\"sentence\": sentence_field}\n\n if tags:\n label_field = SequenceLabelField(labels=tags, sequence_field=sentence_field)\n fields[\"labels\"] = label_field\n\n return Instance(fields)\n\n def _read(self, file_path: str) -> Iterator[Instance]:\n with open(file_path) as f:\n for line in f:\n pairs = line.strip().split()\n sentence, tags = zip(*(pair.split(\"###\") for pair in pairs))\n yield self.text_to_instance([Token(word) for word in sentence], tags)\n\n\nclass LstmTagger(Model):\n def __init__(self,\n word_embeddings: TextFieldEmbedder,\n encoder: Seq2SeqEncoder,\n vocab: Vocabulary) -> None:\n super().__init__(vocab)\n self.word_embeddings = word_embeddings\n self.encoder = encoder\n self.hidden2tag = torch.nn.Linear(in_features=encoder.get_output_dim(),\n out_features=vocab.get_vocab_size('labels'))\n self.accuracy = CategoricalAccuracy()\n\n def forward(self,\n sentence: Dict[str, torch.Tensor],\n labels: torch.Tensor = None) -> torch.Tensor:\n mask = get_text_field_mask(sentence)\n embeddings = self.word_embeddings(sentence)\n encoder_out = self.encoder(embeddings, mask)\n tag_logits = self.hidden2tag(encoder_out)\n output = {\"tag_logits\": tag_logits}\n if labels is not None:\n self.accuracy(tag_logits, labels, mask)\n output[\"loss\"] = sequence_cross_entropy_with_logits(tag_logits, labels, mask)\n\n return output\n\n def get_metrics(self, reset: bool = False) -> Dict[str, float]:\n return {\"accuracy\": self.accuracy.get_metric(reset)}\n\n\nreader = PosDatasetReader()\ntrain_dataset = reader.read(cached_path(\n 'https://raw.githubusercontent.com/allenai/allennlp'\n '/master/tutorials/tagger/training.txt'))\nvalidation_dataset = reader.read(cached_path(\n 'https://raw.githubusercontent.com/allenai/allennlp'\n '/master/tutorials/tagger/validation.txt'))\nvocab = Vocabulary.from_instances(train_dataset + validation_dataset)\nEMBEDDING_DIM = 6\nHIDDEN_DIM = 6\ntoken_embedding = Embedding(num_embeddings=vocab.get_vocab_size('tokens'),\n embedding_dim=EMBEDDING_DIM)\nword_embeddings = BasicTextFieldEmbedder({\"tokens\": token_embedding})\nlstm = PytorchSeq2SeqWrapper(torch.nn.LSTM(EMBEDDING_DIM, HIDDEN_DIM, batch_first=True))\nmodel = LstmTagger(word_embeddings, lstm, vocab)\noptimizer = optim.SGD(model.parameters(), lr=0.1)\niterator = BucketIterator(batch_size=2, sorting_keys=[(\"sentence\", \"num_tokens\")])\niterator.index_with(vocab)\ntrainer = Trainer(model=model,\n optimizer=optimizer,\n iterator=iterator,\n train_dataset=train_dataset,\n validation_dataset=validation_dataset,\n patience=10,\n num_epochs=800)\ntrainer.train()\npredictor = SentenceTaggerPredictor(model, dataset_reader=reader)\ntag_logits = predictor.predict(\"The dog ate the apple\")['tag_logits']\ntag_ids = np.argmax(tag_logits, axis=-1)\nprint([model.vocab.get_token_from_index(i, 'labels') for i in tag_ids])\n", "# -*- coding: utf-8 -*-\n\"\"\"\n@author:XuMing([email protected])\n@description: \n\"\"\"\n\n# # simplified Confident Learning Tutorial\n# *Author: Curtis G. Northcutt, [email protected]*\n#\n# In this tutorial, we show how to implement confident learning without using cleanlab (for the most part).\n# This tutorial is to confident learning what this tutorial https://pytorch.org/tutorials/beginner/examples_tensor/two_layer_net_numpy.html\n# is to deep learning.\n#\n# The actual implementations in cleanlab are complex because they support parallel processing, numerous type and input checaccuracy_scores, lots of hyper-parameter settings, lots of utilities to maaccuracy_scoree things woraccuracy_score smoothly for all types of inputs, and ancillary functions.\n#\n# I ignore all of that here and provide you a bare-bones implementation using mostly for-loops and some numpy.\n# Here we'll do two simple things:\n# 1. Compute the confident joint which fully characterizes all label noise.\n# 2. Find the indices of all label errors, ordered by liaccuracy_scoreelihood of being an error.\n#\n# ## INPUT (stuff we need beforehand):\n# 1. s - These are the noisy labels. This is an np.array of noisy labels, shape (n,1)\n# 2. psx - These are the out-of-sample holdout predicted probabilities for every example in your dataset. This is an np.array (2d) of probabilities, shape (n, m)\n#\n# ## OUTPUT (what this returns):\n# 1. confident_joint - an (m, m) np.array matrix characterizing all the label error counts for every pair of labels.\n# 2. label_errors_idx - a numpy array comprised of indices of every label error, ordered by likelihood of being a label error.\n#\n# In this tutorial we use the handwritten digits dataset as an example.\n\n# In[1]:\n\n\nfrom __future__ import print_function, absolute_import, division, with_statement\n\n# To silence convergence warnings caused by using a weak\n# logistic regression classifier on image data\nimport warnings\n\nimport cleanlab\nimport numpy as np\nfrom cleanlab.classification import LearningWithNoisyLabels\nfrom cleanlab.pruning import get_noise_indices\nfrom sklearn import datasets\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import train_test_split\n\nwarnings.simplefilter(\"ignore\")\nnp.random.seed(477)\n\n# In[2]:\n\n\n# STEP 0 - Get some real digits data. Add a bunch of label errors. Get probs.\niris = datasets.load_iris()\n# Get handwritten digits data\nX = iris.data # we only take the first two features.\ny = iris.target\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)\nprint(\"X:\", X[:10])\nprint(\"y_train:\", y_train[:200])\nprint('datasets number of classes:', len(np.unique(y)))\nprint('datasets number of examples:', len(y))\nprint(len(set(y)))\n# Add lots of errors to labels\ns = np.array(y_train)\nfor i in range(10):\n # Switch to some wrong label thats a different class\n s[i] = 2\n\n# Confirm that we indeed added NUM_ERRORS label errors\nactual_label_errors = np.arange(len(y_train))[s != y_train]\nprint('\\nIndices of actual label errors:\\n', actual_label_errors)\nprint('error with y, y[:20]:', s[:20])\nprint(\"len of errors:\", len(actual_label_errors))\nactual_num_errors = len(actual_label_errors)\n# To keep the tutorial short, we use cleanlab to get the\n# out-of-sample predicted probabilities using cross-validation\n# with a very simple, non-optimized logistic regression classifier\nclf = LogisticRegression()\npsx = cleanlab.latent_estimation.estimate_cv_predicted_probabilities(\n X_train, s, clf=clf)\n\n# Now we have our noisy labels s and predicted probabilities psx.\n# That's all we need for confident learning.\n\n\n# STEP 1 - Compute confident joint\n\n# Verify inputs\ns = np.asarray(s)\npsx = np.asarray(psx)\n\nordered_label_errors = get_noise_indices(\n s=s,\n psx=psx,\n sorted_index_method='normalized_margin', # Orders label errors\n)\n\nprint('orderd_label_errors:')\n\nprint(np.array(sorted(ordered_label_errors)))\nidx_errors = ordered_label_errors\n\nlabel_errors_idx = np.array(sorted(ordered_label_errors))\nscore = sum([e in label_errors_idx for e in actual_label_errors]) / actual_num_errors\nprint('% actual errors that confident learning found: {:.0%}'.format(score))\nscore = sum([e in actual_label_errors for e in label_errors_idx]) / len(label_errors_idx)\nprint('% confident learning errors that are actual errors: {:.0%}'.format(score))\n\n# original lr f1\n\nprint('WITHOUT confident learning,', end=\" \")\n\nclf.fit(X_train, s)\npred = clf.predict(X_test)\nprint(\"dataset test f1:\", round(accuracy_score(pred, y_test), 4))\n\nprint(\"\\nNow we show improvement using cleanlab to characterize the noise\")\nprint(\"and learn on the data that is (with high confidence) labeled correctly.\")\nprint()\nprint('WITH confident learning (psx not given),', end=\" \")\nrp = LearningWithNoisyLabels(clf=clf)\nrp.fit(X_train, s)\npred = rp.predict(X_test)\nprint(\"dataset test f1:\", round(accuracy_score(pred, y_test), 4))\n\nprint('WITH confident learning (psx given),', end=\" \")\nrp.fit(X=X_train, s=s, psx=psx)\npred = rp.predict(X_test)\nprint(\"dataset test f1:\", round(accuracy_score(pred, y_test), 4))\n\nprint('WITH all label right,', end=\" \")\nclf.fit(X_train, y_train)\npred = clf.predict(X_test)\nprint(\"dataset test f1:\", round(accuracy_score(pred, y_test), 4))\n\nprint(\"-------------------\")\nrp_score = accuracy_score(y_test, rp.fit(X_train, s, psx=psx).predict(X_test))\nprint(\"Logistic regression (+rankpruning):\", rp_score)\n\nclf.fit(X_train[~idx_errors], s[~idx_errors])\npred = clf.predict(X_test)\nprint('Fit on denoised data without re-weighting:', accuracy_score(y_test, pred))\n" ]
[ [ "torch.manual_seed", "torch.nn.LSTM", "numpy.argmax" ], [ "numpy.array", "numpy.asarray", "numpy.random.seed", "sklearn.metrics.accuracy_score", "sklearn.linear_model.LogisticRegression", "numpy.unique", "sklearn.model_selection.train_test_split", "sklearn.datasets.load_iris" ] ]
liyzcj/Texygen
[ "805264ff8cc4f1d09a919293812538ac2b5624e5" ]
[ "models/testrelgan/RelganGenerator.py" ]
[ "import numpy as np\nimport tensorflow as tf\n\nfrom models.Gan import Gen\nfrom models.mrelgan.RelganMemory import RelationalMemory\nfrom tensorflow.python.ops import tensor_array_ops, control_flow_ops\nfrom utils.ops import *\n\n\nclass Generator(Gen):\n\n # def __init__(self, temperature, vocab_size, batch_size, seq_len, gen_emb_dim,\n # mem_slots, head_size, num_heads, hidden_dim, start_token, gpre_lr, grad_clip):\n\n def __init__(self, batch_size, seq_len, vocab_size, grad_clip, gpre_lr, **kwargs):\n\n self.grad_clip = grad_clip\n self.x_real = tf.placeholder(\n tf.int32, [batch_size, seq_len], name=\"x_real\")\n \n # batch_size x seq_len x vocab_size\n self.x_real_onehot=tf.one_hot(self.x_real, vocab_size)\n assert self.x_real_onehot.get_shape().as_list() == [\n batch_size, seq_len, vocab_size]\n\n with tf.variable_scope('generator'):\n self.init_generator(batch_size=batch_size, seq_len=seq_len, vocab_size=vocab_size, **kwargs)\n self.set_pretrain_op(gpre_lr)\n\n def init_generator(\n self, temperature, vocab_size, batch_size, seq_len, gen_emb_dim, mem_slots,\n head_size, num_heads, hidden_dim, start_token):\n\n x_real = self.x_real\n start_tokens=tf.constant([start_token] * batch_size, dtype=tf.int32)\n output_size=mem_slots * head_size * num_heads\n\n # build relation memory module\n g_embeddings=tf.get_variable('g_emb', shape=[vocab_size, gen_emb_dim],\n initializer=create_linear_initializer(vocab_size))\n gen_mem=RelationalMemory(\n mem_slots=mem_slots, head_size=head_size, num_heads=num_heads)\n g_output_unit=create_output_unit(output_size, vocab_size)\n\n # initial states\n init_states=gen_mem.initial_state(batch_size)\n\n # ---------- generate tokens and approximated one-hot results (Adversarial) ---------\n gen_o=tensor_array_ops.TensorArray(\n dtype=tf.float32, size=seq_len, dynamic_size=False, infer_shape=True)\n gen_x=tensor_array_ops.TensorArray(\n dtype=tf.int32, size=seq_len, dynamic_size=False, infer_shape=True)\n gen_x_onehot_adv=tensor_array_ops.TensorArray(dtype=tf.float32, size=seq_len, dynamic_size=False,\n infer_shape=True) # generator output (relaxed of gen_x)\n\n # the generator recurrent module used for adversarial training\n def _gen_recurrence(i, x_t, h_tm1, gen_o, gen_x, gen_x_onehot_adv):\n mem_o_t, h_t=gen_mem(x_t, h_tm1) # hidden_memory_tuple\n o_t=g_output_unit(mem_o_t) # batch x vocab, logits not probs\n gumbel_t=add_gumbel(o_t)\n next_token=tf.stop_gradient(\n tf.argmax(gumbel_t, axis=1, output_type=tf.int32))\n next_token_onehot=tf.one_hot(next_token, vocab_size, 1.0, 0.0)\n\n # one-hot-like, [batch_size x vocab_size]\n x_onehot_appr=tf.nn.softmax(tf.multiply(gumbel_t, temperature))\n\n # x_tp1 = tf.matmul(x_onehot_appr, g_embeddings) # approximated embeddings, [batch_size x emb_dim]\n # embeddings, [batch_size x emb_dim]\n x_tp1=tf.nn.embedding_lookup(g_embeddings, next_token)\n\n gen_o=gen_o.write(i, tf.reduce_sum(tf.multiply(\n next_token_onehot, x_onehot_appr), 1)) # [batch_size], prob\n gen_x=gen_x.write(i, next_token) # indices, [batch_size]\n\n gen_x_onehot_adv=gen_x_onehot_adv.write(i, x_onehot_appr)\n\n return i + 1, x_tp1, h_t, gen_o, gen_x, gen_x_onehot_adv\n # build a graph for outputting sequential tokens\n _, _, _, gen_o, gen_x, gen_x_onehot_adv=control_flow_ops.while_loop(\n cond=lambda i, _1, _2, _3, _4, _5: i < seq_len,\n body=_gen_recurrence,\n loop_vars=(tf.constant(0, dtype=tf.int32), tf.nn.embedding_lookup(g_embeddings, start_tokens),\n init_states, gen_o, gen_x, gen_x_onehot_adv))\n\n # batch_size x seq_len\n self.gen_o=tf.transpose(gen_o.stack(), perm=[1, 0])\n # batch_size x seq_len\n self.gen_x=tf.transpose(gen_x.stack(), perm=[1, 0])\n\n self.gen_x_onehot_adv=tf.transpose(gen_x_onehot_adv.stack(), perm=[\n 1, 0, 2]) # batch_size x seq_len x vocab_size\n\n # ----------- pre-training for generator -----------------\n x_emb=tf.transpose(tf.nn.embedding_lookup(g_embeddings, x_real), perm=[\n 1, 0, 2]) # seq_len x batch_size x emb_dim\n g_predictions=tensor_array_ops.TensorArray(\n dtype=tf.float32, size=seq_len, dynamic_size=False, infer_shape=True)\n\n ta_emb_x=tensor_array_ops.TensorArray(dtype=tf.float32, size=seq_len)\n ta_emb_x=ta_emb_x.unstack(x_emb)\n\n # the generator recurrent moddule used for pre-training\n def _pretrain_recurrence(i, x_t, h_tm1, g_predictions):\n mem_o_t, h_t=gen_mem(x_t, h_tm1)\n o_t=g_output_unit(mem_o_t)\n g_predictions=g_predictions.write(\n i, tf.nn.softmax(o_t)) # batch_size x vocab_size\n x_tp1=ta_emb_x.read(i)\n return i + 1, x_tp1, h_t, g_predictions\n\n # build a graph for outputting sequential tokens\n _, _, _, g_predictions=control_flow_ops.while_loop(\n cond=lambda i, _1, _2, _3: i < seq_len,\n body=_pretrain_recurrence,\n loop_vars=(tf.constant(0, dtype=tf.int32), tf.nn.embedding_lookup(g_embeddings, start_tokens),\n init_states, g_predictions))\n\n g_predictions=tf.transpose(g_predictions.stack(),\n perm=[1, 0, 2]) # batch_size x seq_length x vocab_size\n\n # pre-training loss\n self.pretrain_loss=-tf.reduce_sum(\n tf.one_hot(tf.to_int32(tf.reshape(x_real, [-1])), vocab_size, 1.0, 0.0) * tf.log(\n tf.clip_by_value(tf.reshape(\n g_predictions, [-1, vocab_size]), 1e-20, 1.0)\n )\n ) / (seq_len * batch_size)\n\n def set_pretrain_op(self, gpre_lr):\n # pre-training op\n self.vars=tf.get_collection(\n tf.GraphKeys.TRAINABLE_VARIABLES, scope='generator')\n pretrain_opt=tf.train.AdamOptimizer(gpre_lr, beta1=0.9, beta2=0.999)\n pretrain_grad, _=tf.clip_by_global_norm(tf.gradients(\n self.pretrain_loss, self.vars), self.grad_clip) # gradient clipping\n self.pretrain_op=pretrain_opt.apply_gradients(\n zip(pretrain_grad, self.vars))\n\n def set_train_op(self, g_loss, optimizer_name, gadv_lr, global_step, nadv_steps, decay):\n\n self.loss=g_loss\n\n if decay:\n gadv_lr=tf.train.exponential_decay(\n gadv_lr, global_step=global_step, decay_steps=nadv_steps, decay_rate=0.1)\n\n if optimizer_name == \"adam\":\n g_optimizer=tf.train.AdamOptimizer(\n gadv_lr, beta1=0.9, beta2=0.999)\n elif optimizer_name == \"rmsprop\":\n g_optimizer=tf.train.RMSPropOptimizer(gadv_lr)\n else:\n raise AttributeError\n\n # gradient clipping\n grads, _=tf.clip_by_global_norm(\n tf.gradients(g_loss, self.vars), self.grad_clip)\n self.train_op=g_optimizer.apply_gradients(zip(grads, self.vars))\n\n def generate(self):\n \"\"\"gemerate fake smples\n \"\"\"\n return self.gen_x\n\n def get_nll(self, sess, x):\n pretrain_loss=sess.run(\n self.pretrain_loss, feed_dict={self.x_real: x}\n )\n return pretrain_loss\n\n def pretrain_step(self, sess, x):\n \"\"\"pretrain the generator on step\"\"\"\n _, g_loss=sess.run(\n [self.pretrain_op, self.pretrain_loss], feed_dict={self.x_real: x})\n return g_loss\n\n def train(self, sess, x, step_sig):\n _, sigs = sess.run(\n [self.train_op, step_sig], feed_dict={self.x_real: x})\n return sigs\n" ]
[ [ "tensorflow.multiply", "tensorflow.train.AdamOptimizer", "tensorflow.argmax", "tensorflow.train.RMSPropOptimizer", "tensorflow.gradients", "tensorflow.get_collection", "tensorflow.reshape", "tensorflow.constant", "tensorflow.variable_scope", "tensorflow.placeholder", "tensorflow.nn.embedding_lookup", "tensorflow.nn.softmax", "tensorflow.train.exponential_decay", "tensorflow.one_hot", "tensorflow.python.ops.tensor_array_ops.TensorArray" ] ]
slikos/espresso
[ "fc0d315c267b8754b039e4062b6463556da3bd57" ]
[ "espresso/tools/specaug_noise.py" ]
[ "# Copyright (c) slikos\n\nimport numpy as np\nimport torch\nfrom torch import Tensor\n\n\nclass AddSpecNoise:\n \"\"\"Add noise spec to signal spec\"\"\"\n def __init__(self, noises_npz_path=None, noise_multiplier_range=None, noise_probability=0.5):\n \"\"\"\n Args:\n noises_npz_path (str): path to numpy dump of noises specters (T, dim)\n noise_multiplier_range (list): min and max value of magnitude multiplier\n noise_probability (float): 0..1\n \"\"\"\n self.noise_multiplier_range = noise_multiplier_range or [1., 1.]\n self.noise_probability = noise_probability\n self.noises = np.load(noises_npz_path, allow_pickle=True) if noises_npz_path is not None else None\n\n def __call__(self, spec: Tensor):\n \"\"\"Add noise spec to signal spec\n Args:\n spec (torch.Tensor): input tensor of shape `(T, dim)`\n Returns:\n noised tensor (torch.Tensor): output tensor of shape `(T, dim)`\n \"\"\"\n if self.noises is None or np.random.random() > self.noise_probability:\n return spec\n\n cloned = spec.clone()\n spec_duration = cloned.size(0)\n\n noises_start = np.random.randint(0, self.noises.shape[0] - spec_duration)\n noise = self.noises[noises_start:noises_start+spec_duration, :]\n noise = torch.from_numpy(noise).to(cloned.device)\n noise_multiplier = np.random.uniform(self.noise_multiplier_range[0],\n self.noise_multiplier_range[1])\n cloned += noise * noise_multiplier\n return cloned\n" ]
[ [ "numpy.load", "torch.from_numpy", "numpy.random.uniform", "numpy.random.randint", "numpy.random.random" ] ]
TrainingDML/pytdml
[ "b1c21533a44d931717d9398cbdc57b1ee4ef3302" ]
[ "examples/scene_classification/utils/summaries.py" ]
[ "import os\nimport torch\nfrom torchvision.utils import make_grid\nfrom tensorboardX import SummaryWriter\n\n\nclass TensorboardSummary(object):\n def __init__(self, directory):\n self.directory = directory\n\n def creater_summary(self):\n writer = SummaryWriter(log_dir=self.directory)\n return writer\n\n def visualize_image(self, writer, T1, T2, label, pred, global_step, mode):\n grid_T1 = make_grid(T1[:4].clone().cpu().data, padding=50, normalize=True)\n writer.add_image(os.path.join(mode, 'T1'), grid_T1, global_step)\n\n grid_T2 = make_grid(T2[:4].clone().cpu().data, padding=50, normalize=True)\n writer.add_image(os.path.join(mode, 'T2'), grid_T2, global_step)\n\n grid_label = label.float()[:4, :, :, :]\n grid_label = make_grid(grid_label, padding=50, normalize=True)\n writer.add_image(os.path.join(mode, 'label'), grid_label, global_step)\n\n grid_pred = torch.sigmoid(pred)\n grid_pred = grid_pred[:4, :, :, :]\n grid_pred = make_grid(grid_pred, padding=50, normalize=True)\n writer.add_image(os.path.join(mode, 'pred'), grid_pred, global_step)\n" ]
[ [ "torch.sigmoid" ] ]
dataubc/DSCI_532_Group_113_Overdose
[ "d22be4cbe925107453b7b4479fdd5250feaaed80" ]
[ "code/by_race_and_place_new.py" ]
[ "import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\nimport altair as alt\nimport vega_datasets\nimport pandas as pd\nimport numpy as np\nfrom collections import defaultdict\n\n### NEW IMPORT\n# See Docs here: https://dash-bootstrap-components.opensource.faculty.ai\nimport dash_bootstrap_components as dbc\n\napp = dash.Dash(__name__, assets_folder='assets', external_stylesheets=[dbc.themes.CERULEAN])\napp.config['suppress_callback_exceptions'] = True\n\nserver = app.server\napp.title = 'Dash app with pure Altair HTML'\n\ndef make_plot(race='Black',place ='Hospital'):\n # Don't forget to include imports\n\n def mds_special():\n font = \"Arial\"\n axisColor = \"#000000\"\n gridColor = \"#DEDDDD\"\n return {\n \"config\": {\n \"title\": {\n \"fontSize\": 24,\n \"font\": font,\n \"anchor\": \"start\", # equivalent of left-aligned.\n \"fontColor\": \"#000000\"\n },\n 'view': {\n \"height\": 300, \n \"width\": 400\n },\n \"axisX\": {\n \"domain\": True,\n #\"domainColor\": axisColor,\n \"gridColor\": gridColor,\n \"domainWidth\": 1,\n \"grid\": False,\n \"labelFont\": font,\n \"labelFontSize\": 12,\n \"labelAngle\": 0, \n \"tickColor\": axisColor,\n \"tickSize\": 5, # default, including it just to show you can change it\n \"titleFont\": font,\n \"titleFontSize\": 16,\n \"titlePadding\": 10, # guessing, not specified in styleguide\n \"title\": \"X Axis Title (units)\", \n },\n \"axisY\": {\n \"domain\": False,\n \"grid\": True,\n \"gridColor\": gridColor,\n \"gridWidth\": 1,\n \"labelFont\": font,\n \"labelFontSize\": 14,\n \"labelAngle\": 0, \n #\"ticks\": False, # even if you don't have a \"domain\" you need to turn these off.\n \"titleFont\": font,\n \"titleFontSize\": 16,\n \"titlePadding\": 10, # guessing, not specified in styleguide\n \"title\": \"Y Axis Title (units)\", \n # titles are by default vertical left of axis so we need to hack this \n #\"titleAngle\": 0, # horizontal\n #\"titleY\": -10, # move it up\n #\"titleX\": 18, # move it to the right so it aligns with the labels \n },\n }\n }\n\n # register the custom theme under a chosen name\n alt.themes.register('mds_special', mds_special)\n\n # enable the newly registered theme\n alt.themes.enable('mds_special')\n #alt.themes.enable('none') # to return to default\n\n\n# Need to enable this to allow work with larger datasets (https://altair-viz.github.io/user_guide/faq.html)\n# alt.data_transformers.enable('json')\n alt.data_transformers.disable_max_rows()\n\n#################################################################### READING THE DATA #########################################################################################\n drug_overdose_wrangled_m = pd.read_csv(\"../data/2012-2018_lab4_data_drug-overdose-deaths-connecticut-wrangled-melted.csv\") # FOR THE BAR CHART\n\n drug_overdose_wrangled_p = pd.read_csv(\"../data/2012-2018_lab4_data_drug-overdose-deaths-connecticut-wrangled-pivot.csv\") # FOR THE LINE CHART\n\n##################################################################### FILTERING BY race and place of death ####################################################################\n####################################### HERE is Where we are taking the inputs of the function to update the data ##############################################################\n by_race_place = drug_overdose_wrangled_m[(drug_overdose_wrangled_m['Race']==race) & (drug_overdose_wrangled_m['Location']==place)] # FOR THE BAR CHART\n by_race_place_p = drug_overdose_wrangled_p[(drug_overdose_wrangled_p['Race']==race) & (drug_overdose_wrangled_p['Location']==place)] # FOR THE LINE CHART\n\n # WRANGLING \n drug_overdose_mpdrug = by_race_place.groupby(['Drug']).sum().drop(columns = 'Age')\\\n .sort_values('Toxicity_test', ascending = False).reset_index()\n\n######################### BAR PLOT using the filtered data ################################ \n\n mp_drug = alt.Chart(drug_overdose_mpdrug).mark_bar(\n opacity=0.8,\n color = 'teal'\n ).encode(\n alt.Y('Drug:N', title = '', sort = alt.EncodingSortField(field = 'Toxicity_test', order = 'descending')),\n alt.X('Toxicity_test:Q', title = 'Times a drug tested positive'),\n tooltip = [alt.Tooltip('Drug', title = 'Drug'), \n alt.Tooltip('Toxicity_test', title = 'Positives')]\n ).properties(\n width = 200,\n height = 400,\n title = 'Drugs in test'\n )\n ######################### LINE PLOT filtered data ###################################### \n trend_AFTER = alt.Chart(by_race_place_p).mark_line(point = True).encode(\n alt.X('year(Date):O', title = 'Reported year of death'),\n alt.Y('count()', title = 'Count of people', scale = alt.Scale(domain=(0, 2000))),\n tooltip = [alt.Tooltip('year(Date)', title = 'Year'),\n alt.Tooltip('count()', title = 'Count of people')]\n ).properties(\n width = 200,\n height = 400,\n title = \" Trend\") \n\n return (mp_drug|trend_AFTER)\n\n\n ############################ dbc section #####################################\n\n # IMAGE ON THE TOP\njumbotron = dbc.Jumbotron(\n [\n dbc.Container(\n [\n html.Img(src='https://live.staticflickr.com/2414/2133362573_04f6bd053f_b.jpg', \n width='100px'),\n html.H1(\"Drug overdose deaths in Connecticut US\", className=\"display-6\"),\n html.P(\n \"Add a description of the dashboard\",\n className=\"lead\",\n ),\n ],\n fluid=True,\n )\n ],\n fluid=True,\n)\n\n#### THE LOGO, NOT USED we can embed an image of staic plot here ###################\nlogo = dbc.Row(dbc.Col(html.Img(src='https://upload.wikimedia.org/wikipedia/commons/thumb/b/b7/Unico_Anello.png/1920px-Unico_Anello.png', \n width='15%'), width=4))\n\n##### THE content, the Iframe and the dropdown\ncontent = dbc.Container([\n dbc.Row(\n [dbc.Col(\n html.Iframe(\n sandbox='allow-scripts',\n id='plot',\n height='560',\n width='700',\n style={'border-width': '0'},\n ################ The magic happens here\n srcDoc=make_plot().to_html()\n ################ The magic happens here\n ),width='8'),\n \n dbc.Col( \n dcc.Dropdown(\n id='dd-chart-race',\n options=[\n {'label': 'Black', 'value': 'Black'},\n {'label': 'White', 'value': 'White'},\n {'label': 'Asian, Other', 'value': 'Asian, Other'},\n {'label': 'Hispanic, White', 'value': 'Hispanic, White'},\n {'label': 'No description', 'value': 'No description'},\n {'label': 'Asian Indian', 'value': 'Asian Indian'},\n {'label': 'Hispanic, Black', 'value': 'Hispanic, Black'},\n {'label': 'Unknown', 'value': 'Unknown'},\n {'label': 'Other', 'value': 'Other'},\n {'label': 'Chinese', 'value': 'Chinese'},\n {'label': 'Native American, Other', 'value': 'Native American, Other'},\n \n ],\n value='White'\n ), width=2),\n\n dbc.Col( \n dcc.Dropdown(\n id='dd-chart-place',\n options=[\n {'label': 'Hospital', 'value': 'Hospital'},\n {'label': 'Residence', 'value': 'Residence'},\n {'label': 'Other', 'value': 'Other'},\n {'label': 'Nursing Home', 'value': 'Nursing Home'},\n {'label': 'No description', 'value': 'No description'},\n {'label': 'Convalescent Home', 'value': 'Convalescent Home'},\n {'label': 'Hospice', 'value': 'Hospice'}\n \n ],\n value='Residence'\n ), width=2)\n \n ]\n )\n ]\n)\n\n\n##### THE Footer\nfooter = dbc.Container([dbc.Row(dbc.Col(html.P('UBC-MDS'))),\n ])\n\n## THE general lay out, indclude logo here from above if desired \napp.layout = html.Div([jumbotron,\n content,\n footer])\n\n## THE callback\[email protected](\n dash.dependencies.Output('plot', 'srcDoc'),\n [dash.dependencies.Input('dd-chart-race', 'value'),\n dash.dependencies.Input('dd-chart-place', 'value')])\n\n## updating the make plot function after callback get the updated values from the users\ndef update_plot(race_name,place_name):\n '''\n Takes in an xaxis_column_name and calls make_plot to update our Altair figure\n '''\n updated_plot = make_plot(race_name,place_name).to_html()\n return updated_plot\n\nif __name__ == '__main__':\n app.run_server(debug=True)" ]
[ [ "pandas.read_csv" ] ]
mansimane/notebooks
[ "acd5481dd7a041ad899bbf37da57c3a46863c985" ]
[ "sagemaker/05_spot_instances/scripts/train.py" ]
[ "from transformers import AutoModelForSequenceClassification, Trainer, TrainingArguments\nfrom transformers.trainer_utils import get_last_checkpoint\n\nfrom sklearn.metrics import accuracy_score, precision_recall_fscore_support\nfrom datasets import load_from_disk\nimport logging\nimport sys\nimport argparse\nimport os\n\n# Set up logging\nlogger = logging.getLogger(__name__)\n\nlogging.basicConfig(\n level=logging.getLevelName(\"INFO\"),\n handlers=[logging.StreamHandler(sys.stdout)],\n format=\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\",\n)\n\nif __name__ == \"__main__\":\n\n logger.info(sys.argv)\n\n parser = argparse.ArgumentParser()\n\n # hyperparameters sent by the client are passed as command-line arguments to the script.\n parser.add_argument(\"--epochs\", type=int, default=3)\n parser.add_argument(\"--train-batch-size\", type=int, default=32)\n parser.add_argument(\"--eval-batch-size\", type=int, default=64)\n parser.add_argument(\"--warmup_steps\", type=int, default=500)\n parser.add_argument(\"--model_name\", type=str)\n parser.add_argument(\"--learning_rate\", type=str, default=5e-5)\n parser.add_argument(\"--output_dir\", type=str)\n\n # Data, model, and output directories\n parser.add_argument(\"--output-data-dir\", type=str, default=os.environ[\"SM_OUTPUT_DATA_DIR\"])\n parser.add_argument(\"--model-dir\", type=str, default=os.environ[\"SM_MODEL_DIR\"])\n parser.add_argument(\"--n_gpus\", type=str, default=os.environ[\"SM_NUM_GPUS\"])\n parser.add_argument(\"--training_dir\", type=str, default=os.environ[\"SM_CHANNEL_TRAIN\"])\n parser.add_argument(\"--test_dir\", type=str, default=os.environ[\"SM_CHANNEL_TEST\"])\n\n args, _ = parser.parse_known_args()\n\n # load datasets\n train_dataset = load_from_disk(args.training_dir)\n test_dataset = load_from_disk(args.test_dir)\n\n logger.info(f\" loaded train_dataset length is: {len(train_dataset)}\")\n logger.info(f\" loaded test_dataset length is: {len(test_dataset)}\")\n\n # compute metrics function for binary classification\n def compute_metrics(pred):\n labels = pred.label_ids\n preds = pred.predictions.argmax(-1)\n precision, recall, f1, _ = precision_recall_fscore_support(labels, preds, average=\"binary\")\n acc = accuracy_score(labels, preds)\n return {\"accuracy\": acc, \"f1\": f1, \"precision\": precision, \"recall\": recall}\n\n # download model from model hub\n model = AutoModelForSequenceClassification.from_pretrained(args.model_name)\n\n # define training args\n training_args = TrainingArguments(\n output_dir=args.output_dir,\n num_train_epochs=args.epochs,\n per_device_train_batch_size=args.train_batch_size,\n per_device_eval_batch_size=args.eval_batch_size,\n warmup_steps=args.warmup_steps,\n evaluation_strategy=\"epoch\",\n logging_dir=f\"{args.output_data_dir}/logs\",\n learning_rate=float(args.learning_rate),\n )\n\n # create Trainer instance\n trainer = Trainer(\n model=model,\n args=training_args,\n compute_metrics=compute_metrics,\n train_dataset=train_dataset,\n eval_dataset=test_dataset,\n )\n\n # train model\n if get_last_checkpoint(args.output_dir) is not None:\n logger.info(\"***** continue training *****\")\n last_checkpoint = get_last_checkpoint(args.output_dir)\n trainer.train(resume_from_checkpoint=last_checkpoint)\n else:\n trainer.train()\n # evaluate model\n eval_result = trainer.evaluate(eval_dataset=test_dataset)\n\n # writes eval result to file which can be accessed later in s3 ouput\n with open(os.path.join(args.output_data_dir, \"eval_results.txt\"), \"w\") as writer:\n print(f\"***** Eval results *****\")\n for key, value in sorted(eval_result.items()):\n writer.write(f\"{key} = {value}\\n\")\n\n # Saves the model to s3\n trainer.save_model(args.model_dir)\n" ]
[ [ "sklearn.metrics.accuracy_score", "sklearn.metrics.precision_recall_fscore_support" ] ]
sergeyshilin/catalyst
[ "f4dfaac7bc3fe98b2a0a9cf0b4347b100750f82f" ]
[ "catalyst/data/sampler.py" ]
[ "from typing import List, Iterator\nimport numpy as np\n\nfrom torch.utils.data.sampler import Sampler\n\n\nclass BalanceClassSampler(Sampler):\n \"\"\"\n Abstraction over data sampler. Allows you to create stratified sample\n on unbalanced classes.\n \"\"\"\n def __init__(self, labels: List[int], mode: str = \"downsampling\"):\n \"\"\"\n Args:\n labels (List[int]): list of class label\n for each elem in the datasety\n mode (str): Strategy to balance classes.\n Must be one of [downsampling, upsampling]\n \"\"\"\n super().__init__(labels)\n\n labels = np.array(labels)\n samples_per_class = {\n label: (labels == label).sum()\n for label in set(labels)\n }\n\n self.lbl2idx = {\n label: np.arange(len(labels))[labels == label].tolist()\n for label in set(labels)\n }\n\n if isinstance(mode, int) or mode == \"upsampling\":\n samples_per_class = mode \\\n if isinstance(mode, int) \\\n else max(samples_per_class.values())\n else:\n samples_per_class = min(samples_per_class.values())\n\n self.labels = labels\n self.samples_per_class = samples_per_class\n self.length = self.samples_per_class * len(set(labels))\n\n def __iter__(self) -> Iterator[int]:\n \"\"\"\n Yields:\n indices of stratified sample\n \"\"\"\n indices = []\n for key in sorted(self.lbl2idx):\n replace_ = self.samples_per_class > len(self.lbl2idx[key])\n indices += np.random.choice(\n self.lbl2idx[key], self.samples_per_class, replace=replace_\n ).tolist()\n assert (len(indices) == self.length)\n np.random.shuffle(indices)\n\n return iter(indices)\n\n def __len__(self) -> int:\n \"\"\"\n Returns:\n length of result sample\n \"\"\"\n return self.length\n\n\nclass MiniEpochSampler(Sampler):\n \"\"\"\n Sampler iterates mini epochs from the dataset\n used by ``mini_epoch_len``\n\n Args:\n data_len (int): Size of the dataset\n mini_epoch_len (int): Num samples from the dataset used in one\n mini epoch.\n drop_last (bool): If ``True``, sampler will drop the last batches if\n its size would be less than ``batches_per_epoch``\n shuffle (str): one of ``[\"always\", \"real_epoch\", None]``.\n The sampler will shuffle indices\n > \"per_mini_epoch\" -- every mini epoch (every ``__iter__`` call)\n > \"per_epoch\" -- every real epoch\n > None -- don't shuffle\n\n Example:\n >>> MiniEpochSampler(len(dataset), mini_epoch_len=100)\n >>> MiniEpochSampler(len(dataset), mini_epoch_len=100,\n >>> drop_last=True)\n >>> MiniEpochSampler(len(dataset), mini_epoch_len=100,\n >>> shuffle=\"per_epoch\")\n \"\"\"\n def __init__(\n self,\n data_len: int,\n mini_epoch_len: int,\n drop_last: bool = False,\n shuffle: str = None\n ):\n super().__init__(None)\n\n self.data_len = int(data_len)\n self.mini_epoch_len = int(mini_epoch_len)\n\n self.steps = int(data_len / self.mini_epoch_len)\n self.state_i = 0\n\n has_reminder = data_len - self.steps * mini_epoch_len > 0\n if self.steps == 0:\n self.divider = 1\n elif has_reminder and not drop_last:\n self.divider = self.steps + 1\n else:\n self.divider = self.steps\n\n self._indices = np.arange(self.data_len)\n self.indices = self._indices\n self.end_pointer = max(self.data_len, self.mini_epoch_len)\n\n if not (shuffle is None or shuffle in [\"per_mini_epoch\", \"per_epoch\"]):\n raise ValueError(\n f\"Shuffle must be one of ['per_mini_epoch', 'per_epoch']. \"\n f\"Got {shuffle}\"\n )\n self.shuffle_type = shuffle\n\n def shuffle(self):\n if self.shuffle_type == \"per_mini_epoch\" or \\\n (self.shuffle_type == \"per_epoch\" and self.state_i == 0):\n if self.data_len >= self.mini_epoch_len:\n self.indices = self._indices\n np.random.shuffle(self.indices)\n else:\n self.indices = np.random.choice(\n self._indices, self.mini_epoch_len, replace=True\n )\n\n def __iter__(self) -> Iterator[int]:\n self.state_i = self.state_i % self.divider\n self.shuffle()\n\n start = self.state_i * self.mini_epoch_len\n stop = self.end_pointer \\\n if (self.state_i == self.steps) \\\n else (self.state_i + 1) * self.mini_epoch_len\n indices = self.indices[start:stop].tolist()\n\n self.state_i += 1\n return iter(indices)\n\n def __len__(self) -> int:\n return self.mini_epoch_len\n\n\n__all__ = [\"BalanceClassSampler\", \"MiniEpochSampler\"]\n" ]
[ [ "numpy.array", "numpy.arange", "numpy.random.choice", "numpy.random.shuffle" ] ]
gbiomech/BMC
[ "fec9413b17a54f00ba6818438f7a50b132353e42" ]
[ "functions/detect_peaks.py" ]
[ "\"\"\"Detect peaks in data based on their amplitude and other features.\"\"\"\r\n\r\nfrom __future__ import division, print_function\r\nimport numpy as np\r\n\r\n__author__ = \"Marcos Duarte, https://github.com/demotu/BMC\"\r\n__version__ = \"1.0.4\"\r\n__license__ = \"MIT\"\r\n\r\n\r\ndef detect_peaks(x, mph=None, mpd=1, threshold=0, edge='rising',\r\n kpsh=False, valley=False, show=False, ax=None):\r\n\r\n \"\"\"Detect peaks in data based on their amplitude and other features.\r\n\r\n Parameters\r\n ----------\r\n x : 1D array_like\r\n data.\r\n mph : {None, number}, optional (default = None)\r\n detect peaks that are greater than minimum peak height.\r\n mpd : positive integer, optional (default = 1)\r\n detect peaks that are at least separated by minimum peak distance (in\r\n number of data).\r\n threshold : positive number, optional (default = 0)\r\n detect peaks (valleys) that are greater (smaller) than `threshold`\r\n in relation to their immediate neighbors.\r\n edge : {None, 'rising', 'falling', 'both'}, optional (default = 'rising')\r\n for a flat peak, keep only the rising edge ('rising'), only the\r\n falling edge ('falling'), both edges ('both'), or don't detect a\r\n flat peak (None).\r\n kpsh : bool, optional (default = False)\r\n keep peaks with same height even if they are closer than `mpd`.\r\n valley : bool, optional (default = False)\r\n if True (1), detect valleys (local minima) instead of peaks.\r\n show : bool, optional (default = False)\r\n if True (1), plot data in matplotlib figure.\r\n ax : a matplotlib.axes.Axes instance, optional (default = None).\r\n\r\n Returns\r\n -------\r\n ind : 1D array_like\r\n indeces of the peaks in `x`.\r\n\r\n Notes\r\n -----\r\n The detection of valleys instead of peaks is performed internally by simply\r\n negating the data: `ind_valleys = detect_peaks(-x)`\r\n \r\n The function can handle NaN's \r\n\r\n See this IPython Notebook [1]_.\r\n\r\n References\r\n ----------\r\n .. [1] http://nbviewer.ipython.org/github/demotu/BMC/blob/master/notebooks/DetectPeaks.ipynb\r\n\r\n Examples\r\n --------\r\n >>> from detect_peaks import detect_peaks\r\n >>> x = np.random.randn(100)\r\n >>> x[60:81] = np.nan\r\n >>> # detect all peaks and plot data\r\n >>> ind = detect_peaks(x, show=True)\r\n >>> print(ind)\r\n\r\n >>> x = np.sin(2*np.pi*5*np.linspace(0, 1, 200)) + np.random.randn(200)/5\r\n >>> # set minimum peak height = 0 and minimum peak distance = 20\r\n >>> detect_peaks(x, mph=0, mpd=20, show=True)\r\n\r\n >>> x = [0, 1, 0, 2, 0, 3, 0, 2, 0, 1, 0]\r\n >>> # set minimum peak distance = 2\r\n >>> detect_peaks(x, mpd=2, show=True)\r\n\r\n >>> x = np.sin(2*np.pi*5*np.linspace(0, 1, 200)) + np.random.randn(200)/5\r\n >>> # detection of valleys instead of peaks\r\n >>> detect_peaks(x, mph=0, mpd=20, valley=True, show=True)\r\n\r\n >>> x = [0, 1, 1, 0, 1, 1, 0]\r\n >>> # detect both edges\r\n >>> detect_peaks(x, edge='both', show=True)\r\n\r\n >>> x = [-2, 1, -2, 2, 1, 1, 3, 0]\r\n >>> # set threshold = 2\r\n >>> detect_peaks(x, threshold = 2, show=True)\r\n \"\"\"\r\n\r\n x = np.atleast_1d(x).astype('float64')\r\n if x.size < 3:\r\n return np.array([], dtype=int)\r\n if valley:\r\n x = -x\r\n # find indices of all peaks\r\n dx = x[1:] - x[:-1]\r\n # handle NaN's\r\n indnan = np.where(np.isnan(x))[0]\r\n if indnan.size:\r\n x[indnan] = np.inf\r\n dx[np.where(np.isnan(dx))[0]] = np.inf\r\n ine, ire, ife = np.array([[], [], []], dtype=int)\r\n if not edge:\r\n ine = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) > 0))[0]\r\n else:\r\n if edge.lower() in ['rising', 'both']:\r\n ire = np.where((np.hstack((dx, 0)) <= 0) & (np.hstack((0, dx)) > 0))[0]\r\n if edge.lower() in ['falling', 'both']:\r\n ife = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) >= 0))[0]\r\n ind = np.unique(np.hstack((ine, ire, ife)))\r\n # handle NaN's\r\n if ind.size and indnan.size:\r\n # NaN's and values close to NaN's cannot be peaks\r\n ind = ind[np.in1d(ind, np.unique(np.hstack((indnan, indnan-1, indnan+1))), invert=True)]\r\n # first and last values of x cannot be peaks\r\n if ind.size and ind[0] == 0:\r\n ind = ind[1:]\r\n if ind.size and ind[-1] == x.size-1:\r\n ind = ind[:-1]\r\n # remove peaks < minimum peak height\r\n if ind.size and mph is not None:\r\n ind = ind[x[ind] >= mph]\r\n # remove peaks - neighbors < threshold\r\n if ind.size and threshold > 0:\r\n dx = np.min(np.vstack([x[ind]-x[ind-1], x[ind]-x[ind+1]]), axis=0)\r\n ind = np.delete(ind, np.where(dx < threshold)[0])\r\n # detect small peaks closer than minimum peak distance\r\n if ind.size and mpd > 1:\r\n ind = ind[np.argsort(x[ind])][::-1] # sort ind by peak height\r\n idel = np.zeros(ind.size, dtype=bool)\r\n for i in range(ind.size):\r\n if not idel[i]:\r\n # keep peaks with the same height if kpsh is True\r\n idel = idel | (ind >= ind[i] - mpd) & (ind <= ind[i] + mpd) \\\r\n & (x[ind[i]] > x[ind] if kpsh else True)\r\n idel[i] = 0 # Keep current peak\r\n # remove the small peaks and sort back the indices by their occurrence\r\n ind = np.sort(ind[~idel])\r\n\r\n if show:\r\n if indnan.size:\r\n x[indnan] = np.nan\r\n if valley:\r\n x = -x\r\n _plot(x, mph, mpd, threshold, edge, valley, ax, ind)\r\n\r\n return ind\r\n\r\n\r\ndef _plot(x, mph, mpd, threshold, edge, valley, ax, ind):\r\n \"\"\"Plot results of the detect_peaks function, see its help.\"\"\"\r\n try:\r\n import matplotlib.pyplot as plt\r\n except ImportError:\r\n print('matplotlib is not available.')\r\n else:\r\n if ax is None:\r\n _, ax = plt.subplots(1, 1, figsize=(8, 4))\r\n\r\n ax.plot(x, 'b', lw=1)\r\n if ind.size:\r\n label = 'valley' if valley else 'peak'\r\n label = label + 's' if ind.size > 1 else label\r\n ax.plot(ind, x[ind], '+', mfc=None, mec='r', mew=2, ms=8,\r\n label='%d %s' % (ind.size, label))\r\n ax.legend(loc='best', framealpha=.5, numpoints=1)\r\n ax.set_xlim(-.02*x.size, x.size*1.02-1)\r\n ymin, ymax = x[np.isfinite(x)].min(), x[np.isfinite(x)].max()\r\n yrange = ymax - ymin if ymax > ymin else 1\r\n ax.set_ylim(ymin - 0.1*yrange, ymax + 0.1*yrange)\r\n ax.set_xlabel('Data #', fontsize=14)\r\n ax.set_ylabel('Amplitude', fontsize=14)\r\n mode = 'Valley detection' if valley else 'Peak detection'\r\n ax.set_title(\"%s (mph=%s, mpd=%d, threshold=%s, edge='%s')\"\r\n % (mode, str(mph), mpd, str(threshold), edge))\r\n # plt.grid()\r\n plt.show()\r\n" ]
[ [ "numpy.array", "numpy.isnan", "numpy.zeros", "matplotlib.pyplot.subplots", "numpy.where", "matplotlib.pyplot.show", "numpy.sort", "numpy.atleast_1d", "numpy.argsort", "numpy.isfinite", "numpy.hstack", "numpy.vstack" ] ]
wnstlr/SMERF
[ "27901688a417154de1ebad4f9bfb06686112a098" ]
[ "smerf/textbox_data.py" ]
[ "import numpy as np\nimport PIL\nfrom PIL import Image, ImageDraw, ImageFont, ImageEnhance\nfrom torch.utils.data import Dataset, DataLoader\nimport os\nfrom smerf.eval import setup_bboxes\nimport pickle\n\nDATA_DIR = '../data/'\n\nclass TextBoxDataset(Dataset):\n def __init__(self, X, y):\n self.X = X\n self.y = y\n\n def __len__(self):\n return len(self.y)\n\n def __getitem__(self, idx):\n return self.X[idx], self.y[idx]\n\n# Interpolate between blue (v = 0) and red (v = 1)\ndef shade(im, v):\n if v == -1:\n im[:, :, :] = 255 #plain white background\n elif v == -2:\n im[:,:,:] = 0 # plain black background\n elif v == -3:\n im[:,:,:] = np.asarray(np.random.random((64,64,3)) * 100, dtype=int) # random gray background\n elif v == -4:\n # natural image background\n places_img_file = pickle.load(open(os.path.join(DATA_DIR, 'places_img_file.pkl'), 'rb'))\n choices = places_img_file['stadium/baseball']\n img_ids = [0, 9, 10, 12, 15, 16, 17, 19, 20, 21, 24, 25, 26, 27, 28, 33, 34, 35, 37, 39, 41, 42, 43, 45, 46, 47, 49, 50, 51, 52, 55, 56, 58, 64, 65, 58, 68, 71, 74, 78, 86, 88, 90, 91, 92, 93]\n #choices = places_img_file['bamboo_forest'] \n #img_ids = [0, 1, 2, 3, 4, 6, 7, 8, 10, 11, 12, 13, 14, 15, 16, 17, 19, 20, 21, 25, 28, 37, 44, 47, 57, 59, 65, 68, 69, 72, 75, 77, 85, 93, 96, 98, 99]\n img_dir = os.path.join(DATA_DIR, 'val_256')\n img = Image.open(os.path.join(img_dir, choices[np.random.choice(img_ids)]))\n img = img.resize((64,64))\n enhancer = ImageEnhance.Brightness(img)\n img = enhancer.enhance(0.7)\n img = np.array(img)\n im[:,:,:] = img\n else:\n im[:, :, 0] = 255 * v\n im[:, :, 2] = 255 * (1 - v)\n return im\n\n# Add a square\ndef sticker(im, x_start = 0, y_start = 0, delta = 10, color = [0, 0, 0]):\n im[y_start:y_start + delta, x_start:x_start + delta, :] = color\n return im\n\n# Add text\ndef text(im, text, x, y, color = (0,0,0), size = 20):\n im = Image.fromarray(im)\n draw = ImageDraw.Draw(im)\n font = ImageFont.truetype(\"../data/arial.ttf\", size)\n w, h = font.getsize(text)\n draw.text((x, y), text, color, font = font)\n im = np.array(im)\n return im, (w,h)\n\ndef vec2im(features, **kwargs):\n \"\"\"\n Convert a feature into an image with certain features.\n \n :return image, text location, small box location\n \"\"\"\n im = np.zeros((64, 64, 3), dtype = np.uint8)\n \n im = shade(im, features[4])\n \n if features[5] == 1: # place large box in the image\n if 'x_start' in kwargs.keys():\n patch_x = kwargs['x_start']\n else:\n patch_x = 0\n if 'y_start' in kwargs.keys():\n patch_y = kwargs['y_start']\n else:\n patch_y = 0\n if 'p_color' in kwargs.keys():\n p_color = kwargs['p_color']\n else:\n p_color = [0,0,0]\n\n if 'p_size' in kwargs.keys():\n p_delta = kwargs['p_size']\n else:\n p_delta = 10\n # Add a large box in the image at the location set in the argument.\n im = sticker(im, x_start=patch_x, y_start=patch_y, color=p_color, delta=p_delta)\n else: # no large box in the image so the locations are set to None\n patch_x = None\n patch_y = None\n \n # Determine the character to be included in the image\n if features[0] == 0:\n char = \"A\"\n elif features[0] == 1:\n char = \"B\"\n elif features[0] == -1:\n char = None # no character\n \n # Determine the color of the character to be included in the image\n if features[3] == 0: # set text color as black\n color = (0, 0, 0)\n elif features[3] == 1: # set text color as green\n color = (0, 255, 0)\n elif features[3] == 3: # set text color as white\n color = (255, 255, 255)\n elif features[3] == 2: # set manual text color from (R, G, B) input\n color = kwargs['color']\n textloc = (None, None, None, None)\n \n # Add text if character is not None\n if char != None:\n xstart = int(36 * features[1] + 6)\n ystart = int(36 * features[2] + 6)\n im, dim = text(im, char, xstart, ystart, color = color)\n # keep the location of the character to return\n textloc = (ystart, ystart+dim[1], xstart, xstart+dim[0])\n \n # Add a small box if switch argument is turned on\n dist_ = 300\n if kwargs['switch'] == 1: # small box at a fixed location\n switch_x = 58\n switch_y = 58\n im = sticker(im, x_start=58, y_start=58, delta=4, color=kwargs['s_color'])\n elif kwargs['switch'] == 2: # small box at a random location\n while True:\n switch_x = np.random.random_integers(0, 53)\n switch_y = np.random.random_integers(0, 53)\n # prevent overlap of switch with text and larger box\n if patch_x is not None and features[0] != -1: # yes patch, yes character\n if (int(36*features[1]) - switch_x)**2 + (int(36*features[2]) - switch_y)**2 > dist_+100 and \\\n (patch_x - switch_x)**2 + (patch_y - switch_y)**2 > dist_:\n break\n elif patch_x is None and features[0] != -1: # no patch, yes character\n if (int(36*features[1]) - switch_x)**2 + (int(36*features[2]) - switch_y)**2 > dist_+100:\n break \n elif patch_x is not None and features[0] == -1: # yes patch, no character\n if (patch_x - switch_x)**2 + (patch_y - switch_y)**2 > dist_:\n break\n elif patch_x is None and features[0] == -1: # neither\n break\n im = sticker(im, x_start=switch_x, y_start=switch_y, delta=4, color=kwargs['s_color'])\n else:\n switch_x = None\n switch_y = None\n \n # keep the small box location to return\n if switch_x is not None:\n switch_loc = (switch_y, switch_y+4, switch_x, switch_x+4)\n else:\n switch_loc = (None, None, None, None)\n \n # return the image generated, character location, and small box location\n return im, textloc, switch_loc\n\ndef save_data(exp_no, save_dir, train_data, test_data, train_coord, train_avoid, train_avoid2, test_coord, test_avoid, test_avoid2, save=True):\n # setup bbox info to save to the file\n fname = os.path.join(save_dir, 'textbox_%0.2f.npz'%exp_no)\n # NOTE need to specify below based on different type of experiments\n if exp_no in [1.11, 2.11]: # for simple FR and NR, only one object to include\n gt_flag = [1,0,0]\n elif exp_no == 1.2: # for complex-FR, there are two ground-truth objects to include\n gt_flag = [1,0,1]\n elif exp_no >= 3.7: # for complex-CR, there are two ground-truth objects to inlcude\n gt_flag = [1,0,1]\n train_primary, train_secondary = setup_bboxes(train_coord, train_avoid, train_avoid2, np.array(range(train_data.X.shape[0])), gt_flag=gt_flag)\n test_primary, test_secondary = setup_bboxes(test_coord, test_avoid, test_avoid2, np.array(range(test_data.X.shape[0])), gt_flag=gt_flag)\n if save:\n np.savez(open(fname, 'wb'),\n x_train=train_data.X, \n y_train=train_data.y, \n x_test=test_data.X, \n y_test=test_data.y, \n train_primary=train_primary,\n test_primary=test_primary,\n train_secondary=train_secondary,\n test_secondary=test_secondary)\n return train_data, test_data, train_primary, train_secondary, test_primary, test_secondary\n \ndef load_data(exp_no, load_dir): \n fname = os.path.join(load_dir, 'textbox_%0.2f.npz'%exp_no)\n tmp = np.load(open(fname, 'rb'), allow_pickle=True)\n train_data = TextBoxDataset(tmp['x_train'], tmp['y_train'])\n test_data = TextBoxDataset(tmp['x_test'], tmp['y_test'])\n train_primary = tmp['train_primary']\n test_primary = tmp['test_primary']\n train_secondary = tmp['train_secondary']\n test_secondary = tmp['test_secondary']\n return train_data, test_data, train_primary, train_secondary, test_primary, test_secondary\n \n# Generate text data with spurious features\n# make the labels to be correlated with color, not the digit itself\n# or the other way\ndef sample_uniform():\n feature = np.zeros((6))\n feature[0] = np.random.randint(2) #character\n feature[1] = np.random.uniform() #x\n feature[2] = np.random.uniform() #y\n feature[3] = 0\n feature[4] = np.random.uniform() # shade\n feature[5] = 0\n return feature\n\ndef generate_data(n=10000):\n #plain data\n rep = np.zeros((n, 6))\n labels = np.zeros(n)\n im = np.zeros((n, 64, 64, 3))\n for i in range(n):\n rep[i] = sample_uniform()\n im[i] = vec2im(rep[i])\n labels[i] = int(rep[i][0])\n im = np.float32(im / 255)\n return im, labels, rep\n\ndef original_textbox_data(n=10000, save=True, save_dir='data'):\n if not os.path.exists(save_dir):\n os.mkdir(save_dir)\n fname = os.path.join(save_dir, 'textbox_original.npz')\n if os.path.exists(fname):\n tmp = np.load(open(fname, 'rb'))\n im = tmp['x_train']\n labels = tmp['y_train']\n im_test = tmp['x_test']\n labels_test = tmp['y_test']\n else:\n # train data\n im, labels, rep = generate_data(n=n)\n # val data\n test_n = int(n * 0.3)\n im_test, labels_test, rep_test = generate_data(n=test_n)\n\n train_data = TextBoxDataset(im, labels)\n test_data = TextBoxDataset(im_test, labels_test)\n\n if save:\n np.savez(open(fname, 'wb'), x_train=train_data.X, y_train=train_data.y, x_test=test_data.X, y_test=test_data.y)\n np.savez(open(os.path.join(save_dir, 'textbox_original_meta.npz'), 'wb'), rep=rep, rep_test=rep_test)\n\n return train_data, test_data\n" ]
[ [ "numpy.array", "numpy.random.random_integers", "numpy.random.choice", "numpy.zeros", "numpy.float32", "numpy.random.uniform", "numpy.random.randint", "numpy.random.random" ] ]
leschzinerlab/myami-3.2-freeHand
[ "974b8a48245222de0d9cfb0f433533487ecce60d" ]
[ "appion/bin/edCenterStack.py" ]
[ "#!/usr/bin/env python\n\n#python\nimport os\nimport numpy\nfrom scipy import ndimage\n#appion\nfrom appionlib import appionScript\nfrom appionlib import apStack\nfrom appionlib import apDisplay\nfrom appionlib import apImagicFile\nfrom pyami import correlator, peakfinder\n\nclass centerStackScript(appionScript.AppionScript):\n\t#=====================\n\tdef setupParserOptions(self):\n\t\tself.parser.set_usage(\"Usage: %prog --stack-id=ID [options]\")\n\t\tself.parser.add_option(\"-s\", \"--stack-id\", dest=\"stackid\", type=\"int\",\n\t\t\thelp=\"Stack database id\", metavar=\"ID\")\n\t\tself.parser.add_option(\"-x\", \"--maxshift\", dest=\"maxshift\", type=\"int\",\n\t\t\thelp=\"Maximum shift\")\n\n\t#=====================\n\tdef checkConflicts(self):\n\t\tif self.params['stackid'] is None:\n\t\t\tapDisplay.printError(\"stackid was not defined\")\n\t\tif self.params['description'] is None:\n\t\t\tapDisplay.printError(\"substack description was not defined\")\n\t\tif self.params['runname'] is None:\n\t\t\tapDisplay.printError(\"new stack name was not defined\")\n\t\t\n\n\t#=====================\n\tdef setRunDir(self):\n\t\tstackdata = apStack.getOnlyStackData(self.params['stackid'], msg=False)\n\t\tpath = stackdata['path']['path']\n\t\tuppath = os.path.dirname(os.path.abspath(path))\n\t\t# add mask & maxshift to rundir if specifie\n\t\tif self.params['mask'] is not None:\n\t\t\tself.params['runname'] = self.params['runname']+\"_\"+str(self.params['mask'])\n\t\tif self.params['maxshift'] is not None:\n\t\t\tself.params['runname'] = self.params['runname']+\"_\"+str(self.params['maxshift'])\n\t\tself.params['rundir'] = os.path.join(uppath, self.params['runname'])\n\t\t\n\t#=====================\n\tdef centerParticles(self, oldstack, centerstack, badstack):\n\t\tmaxshift = self.params['maxshift']\n\t\tcenterparts = []\n\t\tbadparts = []\n\t\tkeeplist = []\n\t\ti = 0\n\t\twhile partnum < numparts:\n\t\t\t### if need more particles\n\t\t\t\t### read 4000 parts from oldstack\n\t\t\t\t### write centerparts to centerstack\n\t\t\t\t### write badparts to badstack\n\n\t\t\t### set current image\n\t\t\toldpart = oldparts[i]\n\n\t\t\t### mirror about x\n\t\t\txmirror = numpy.flipud(oldpart)\n\t\t\t### cross-correlate\n\t\t\txcc = correlator.cross_correlate(oldpart, xmirror)\n\t\t\t### find peak\n\t\t\tpeakdict = peakfinder.findSubpixelPeak(xcc)\n\t\t\txpeak = correlator.wrap_coord(peakdict['pixel peak'], xcc.shape)\n\n\t\t\t### mirror about y\n\t\t\tymirror = numpy.fliplr(oldpart)\n\t\t\t### cross-correlate\n\t\t\tycc = correlator.cross_correlate(oldpart, ymirror)\n\t\t\t### find peak\n\t\t\tpeakdict = peakfinder.findSubpixelPeak(ycc)\n\t\t\typeak = correlator.wrap_coord(peakdict['pixel peak'], ycc.shape)\n\n\t\t\t### mirror about y then x\n\t\t\txymirror = numpy.flipud(ymirror)\n\t\t\t### cross-correlate\n\t\t\txycc = correlator.cross_correlate(oldpart, xymirror)\n\t\t\t### find peak\n\t\t\tpeakdict = peakfinder.findSubpixelPeak(xycc)\n\t\t\txypeak = correlator.wrap_coord(peakdict['pixel peak'], xycc.shape)\n\n\t\t\t### do some math to get shift\n\t\t\txshift = (ypeak[0] + xypeak[0])/4.0\n\t\t\tyshift = (xpeak[0] + xypeak[0])/4.0\n\n\t\t\t### shift particle, by integers only\n\t\t\tif xshift < maxshift and yshift < maxshift:\n\t\t\t\txyshift = (xshift, yshift)\n\t\t\t\tcenterpart = ndimage.shift(oldpart, shift=xyshift, mode='wrap', order=0)\n\t\t\t\tcenterparts.append(centerpart)\n\t\t\t\tkeeplist.append(partnum)\n\t\t\telse:\n\t\t\t\tbadparts.append(oldpart)\t\t\n\t\treturn keeplist\n\n\t#=====================\n\tdef start(self):\n\t\t### new stack path\n\t\tstackdata = apStack.getOnlyStackData(self.params['stackid'])\n\t\toldstack = os.path.join(stackdata['path']['path'], stackdata['name'])\n\n\t\t### checks\n\t\tcenterstack = os.path.join(self.params['rundir'], 'align.img')\n\t\tbadstack = os.path.join(self.params['rundir'], 'bad.img')\n\t\tapStack.checkForPreviousStack(centerstack)\n\n\t\t### run centering algorithm\n\t\tkeeplist = self.centerParticles(oldstack, centerstack, badstack)\n\t\tif not os.path.isfile(centerstack):\n\t\t\tapDisplay.printError(\"No stack was created\")\n\n\t\tself.params['keepfile'] = os.path.join(self.params['rundir'], 'keepfile.txt')\n\n\t\t### get number of particles\n\t\tself.params['description'] += (\n\t\t\t(\" ... %d eman centered substack id %d\" \n\t\t\t% (numparticles, self.params['stackid']))\n\t\t)\n\t\t\n\t\tapStack.commitSubStack(self.params, newname=os.path.basename(centerstack), centered=True)\n\t\tapStack.averageStack(stack=centerstack)\n\t\tif os.path.isfile(badstack):\n\t\t\tapStack.averageStack(stack=badstack, outfile='badaverage.mrc')\n\n#=====================\nif __name__ == \"__main__\":\n\tcenStack = centerStackScript()\n\tcenStack.start()\n\tcenStack.close()\n\n" ]
[ [ "numpy.flipud", "scipy.ndimage.shift", "numpy.fliplr" ] ]
saeed-abdul-rahim/simple_connect
[ "c1d6945f5116825d9635915793f5ebbb488ef413" ]
[ "build/lib/simple_connect/connect.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Aug 10 15:20:03 2018\r\n\r\n@author: Saeed\r\n\"\"\"\r\n\r\nimport httplib2\r\nimport os\r\nimport json\r\nimport oauth2client\r\nfrom oauth2client import file, client, tools\r\nimport base64\r\nfrom email import encoders\r\nimport mimetypes\r\nfrom email.mime.audio import MIMEAudio\r\nfrom email.mime.base import MIMEBase\r\nfrom email.mime.image import MIMEImage\r\nfrom email.mime.multipart import MIMEMultipart\r\nfrom email.mime.text import MIMEText\r\nfrom email.mime.application import MIMEApplication\r\nimport pandas as pd\r\nimport pymysql as db\r\nfrom sqlalchemy import create_engine\r\nfrom sqlalchemy.sql import text\r\nfrom sshtunnel import SSHTunnelForwarder\r\nfrom googleapiclient.discovery import build\r\nfrom googleapiclient import errors, discovery\r\nfrom googleapiclient.http import MediaIoBaseDownload\r\nfrom httplib2 import Http\r\nimport io\r\nimport boto3\r\nfrom tqdm import tqdm\r\n\r\nclass Common(object):\r\n \r\n def __init__(self,sql_serverhost,sql_user,sql_password,database):\r\n self.sql_serverhost = sql_serverhost\r\n self.sql_user=sql_user\r\n self.sql_password=sql_password\r\n self.database=database\r\n \r\n def create_sq_string(col_set, sep):\r\n cols=\"\"\r\n for i, col in enumerate(col_set):\r\n cols = cols+ col + \" = :\"+col\r\n if not i==(len(col_set)-1):\r\n cols = cols+\" \"+sep+\" \"\r\n return cols\r\n \r\n def update_main(self,df,conn,table_name,set_cols,where_cols):\r\n dict_data = df.to_dict('records')\r\n dict_data=tuple(dict_data)\r\n set_columns=Common.create_sq_string(set_cols, \",\")\r\n where_columns=Common.create_sq_string(where_cols, \"AND\")\r\n stmt=\"UPDATE \"+table_name+\" SET \"+set_columns+\" WHERE \"+where_columns\r\n stmt=text(stmt)\r\n for line in tqdm(dict_data):\r\n conn.execute(stmt, **line) \r\n \r\n def delete_main(self,df,conn,table_name,where_cols):\r\n dict_data = df.to_dict('records')\r\n dict_data=tuple(dict_data)\r\n where_columns=Common.create_sq_string(where_cols, \"AND\")\r\n stmt=\"DELETE FROM \"+table_name+\" WHERE \"+where_columns\r\n stmt=text(stmt)\r\n for line in tqdm(dict_data):\r\n conn.execute(stmt, **line) \r\n\r\nclass Connect(Common):\r\n \r\n def __init__(self,credentials,database):\r\n cred_dir=os.path.join(os.path.expanduser('~'),'.credentials')\r\n cred_file=os.path.join(cred_dir,credentials)\r\n with open(cred_file) as f:\r\n cred=json.load(f)\r\n sql_serverhost=cred['SQL_HOST']\r\n sql_user=cred['SQL_USER']\r\n sql_password=cred['SQL_PASSWORD']\r\n self.common=Common(sql_serverhost,sql_user,sql_password,database)\r\n self.mydb=create_engine('mysql+pymysql://' + sql_user + ':' + sql_password + '@' + sql_serverhost + ':' + str(3306) + '/' + database , echo=False)\r\n \r\n def to_db(self,data,table):\r\n data.to_sql(name=table, con=self.mydb, if_exists = 'append', index=False, chunksize=5000)\r\n \r\n def query(self,q):\r\n return pd.read_sql_query(q, self.mydb)\r\n \r\n def update_table(self,df,table_name,set_cols,where_cols):\r\n self.common.update_main(df,self.mydb,table_name,set_cols,where_cols)\r\n \r\n def delete_row(self,df,table_name,where_cols):\r\n self.common.delete_main(df,self.mydb,table_name,where_cols)\r\n \r\nclass BastionConnect(Common):\r\n \r\n def __init__(self, credentials, database, pem_path=None):\r\n cred_dir=os.path.join(os.path.expanduser('~'),'.credentials')\r\n cred_file=os.path.join(cred_dir,credentials)\r\n with open(cred_file) as f:\r\n cred=json.load(f) \r\n ssh_username=cred['SSH_USERNAME']\r\n ssh_password=cred['SSH_PASSWORD']\r\n sql_serverhost=cred['SQL_HOST']\r\n sql_user=cred['SQL_USER']\r\n sql_password=cred['SQL_PASSWORD']\r\n self.bastion_host = cred['BASTION_HOST']\r\n self.localhost = '127.0.0.1'\r\n self.ssh_username = ssh_username\r\n self.ssh_password=ssh_password\r\n self.common=Common(sql_serverhost,sql_user,sql_password,database)\r\n if pem_path:\r\n self.server = SSHTunnelForwarder(\r\n (self.bastion_host, 22),\r\n ssh_username=self.ssh_username,\r\n ssh_private_key=pem_path,\r\n remote_bind_address=(self.common.sql_serverhost, 3306))\r\n else:\r\n self.server = SSHTunnelForwarder(\r\n (self.bastion_host, 22),\r\n ssh_username=self.ssh_username,\r\n ssh_password=self.ssh_password,\r\n remote_bind_address=(self.common.sql_serverhost, 3306))\r\n self.conn=None\r\n self.mydb=None\r\n \r\n def start_conn(self):\r\n self.conn = db.connect(host=self.localhost,\r\n port=self.server.local_bind_port,\r\n user=self.common.sql_user,\r\n passwd=self.common.sql_password,\r\n db=self.common.database)\r\n self.mydb=create_engine('mysql+pymysql://' + self.common.sql_user + ':' + self.common.sql_password + '@' + self.localhost + ':' + str(self.server.local_bind_port) + '/' + self.common.database , echo=False)\r\n \r\n def query(self,q):\r\n self.server.start()\r\n self.start_conn()\r\n df=pd.read_sql_query(q, self.conn)\r\n self.server.stop()\r\n return df \r\n\r\n def to_db(self,df,table):\r\n self.server.start()\r\n self.start_conn()\r\n df.to_sql(name=table, con=self.mydb, if_exists = 'append', index=False, chunksize=5000)\r\n self.server.stop()\r\n \r\n def update_table(self,df,table_name,set_cols,where_cols):\r\n self.server.start()\r\n self.start_conn()\r\n self.common.update_main(df,self.mydb,table_name,set_cols,where_cols) \r\n self.server.stop()\r\n \r\n def delete_row(self,df,table_name,where_cols):\r\n self.server.start()\r\n self.start_conn()\r\n self.common.delete_main(df,self.mydb,table_name,where_cols) \r\n self.server.stop()\r\n\r\n\r\nclass Gdrive:\r\n \r\n SCOPES = 'https://www.googleapis.com/auth/drive'\r\n \r\n def __init__(self, credential_file_json, SCOPES=SCOPES):\r\n \r\n credential_file_json=credential_file_json.replace('.json','')\r\n home_dir = os.path.expanduser('~')\r\n credential_dir = os.path.join(home_dir, '.credentials')\r\n if not os.path.exists(credential_dir):\r\n os.makedirs(credential_dir)\r\n credential_path = os.path.join(credential_dir, credential_file_json+'-gdrive.json')\r\n \r\n self.store = oauth2client.file.Storage(credential_path)\r\n self.creds = self.store.get()\r\n if not self.creds or self.creds.invalid:\r\n CLIENT_SECRET_FILE = credential_file_json+'.json'\r\n APPLICATION_NAME = 'Google Drive API Python'\r\n self.flow = client.flow_from_clientsecrets(os.path.join(os.getcwd(),CLIENT_SECRET_FILE), SCOPES)\r\n self.flow.user_agent = APPLICATION_NAME\r\n self.creds = tools.run_flow(self.flow, self.store)\r\n self.service = build('drive', 'v3', http=self.creds.authorize(Http()))\r\n self.items=[]\r\n self.folder=''\r\n \r\n def get_files(self, folder):\r\n \r\n self.folder=folder\r\n service = self.service\r\n results = service.files().list( \r\n fields=\"nextPageToken, files(id, name)\",\r\n q=\"'\"+self.folder+\"' in parents\").execute()\r\n self.items = results.get('files', [])\r\n \r\n return self.items\r\n \r\n def download_files(self, folder):\r\n \r\n items=self.get_files(folder)\r\n for item in items:\r\n print('{0} ({1})'.format(item['name'], item['id']))\r\n file_name = item['name']\r\n file_id = item['id']\r\n request = self.service.files().get_media(fileId=file_id)\r\n fh = io.FileIO(file_name, 'wb')\r\n downloader = MediaIoBaseDownload(fh, request)\r\n done = False\r\n while done is False:\r\n status, done = downloader.next_chunk()\r\n print(\"Download %d%%.\" % int(status.progress() * 100))\r\n\r\nclass Gmail:\r\n \r\n def __init__(self, credential_file_json):\r\n \r\n credential_file_json=credential_file_json.replace('.json','')\r\n home_dir = os.path.expanduser('~') \r\n credential_dir = os.path.join(home_dir, '.credentials') \r\n if not os.path.exists(credential_dir):\r\n os.makedirs(credential_dir) \r\n credential_path = os.path.join(credential_dir, credential_file_json+'-gmail.json')\r\n \r\n store = oauth2client.file.Storage(credential_path)\r\n self.credentials = store.get()\r\n if not self.credentials or self.credentials.invalid:\r\n CLIENT_SECRET_FILE = credential_file_json+'.json'\r\n APPLICATION_NAME = 'Gmail API Python Send Email'\r\n SCOPES = 'https://www.googleapis.com/auth/gmail.send'\r\n flow = client.flow_from_clientsecrets(os.path.join(os.getcwd(),CLIENT_SECRET_FILE), SCOPES)\r\n flow.user_agent = APPLICATION_NAME\r\n self.credentials = tools.run_flow(flow, store)\r\n \r\n def create_message_and_send(self,sender, to, subject, message_text_html, image=None, attached_file=None):\r\n \r\n credentials = self.credentials\r\n http = httplib2.Http()\r\n http = credentials.authorize(http)\r\n service = discovery.build('gmail', 'v1', http=http)\r\n \r\n message = self.create_message(sender, to, subject, message_text_html, image, attached_file)\r\n self.send_message(service, \"me\", message)\r\n \r\n \r\n def create_message(self,sender, to, subject, message_text_html, image=None, attached_file=None):\r\n \r\n message = MIMEMultipart()\r\n message['Subject'] = subject\r\n message['From'] = sender\r\n message['To'] = to\r\n \r\n table = \"table{color: #333;font-family: Helvetica, Arial, sans-serif;border-collapse: collapse; border-spacing: 0; }\"\r\n tdth=\"\"\"td,th { border: 1px solid transparent;height: 30px; transition: all 0.3s; padding: 6px 12px;}\r\n th { background: #aaccff; font-weight: bold;}\r\n td { text-align: center;}\r\n tr:nth-child(even) td { background: #F1F1F1 !important; } \r\n tr:nth-child(odd) td { background: #FEFEFE !important; }\r\n tr:hover { background: #000 !important; color: #FFF !important; }\"\"\"\r\n head = '<html><head><style>'+table+tdth+'</style></head>'\r\n message_text_html=head+'<body>'+message_text_html+'</body></html>'\r\n \r\n if image==None:\r\n message.attach(MIMEText(message_text_html, 'html'))\r\n else:\r\n message.attach(MIMEText('<p><img src=\"cid:image1\" /></p>'+message_text_html, 'html'))\r\n\r\n image.seek(0)\r\n img = MIMEImage(image.read(), 'png')\r\n img.add_header('Content-Id', '<image1>')\r\n img.add_header(\"Content-Disposition\", \"inline\", filename=\"image1\")\r\n message.attach(img)\r\n \r\n if not attached_file==None:\r\n my_mimetype, encoding = mimetypes.guess_type(attached_file)\r\n \r\n if my_mimetype is None or encoding is not None:\r\n my_mimetype = 'application/octet-stream' \r\n \r\n main_type, sub_type = my_mimetype.split('/', 1)\r\n if main_type == 'text':\r\n print(\"text\")\r\n temp = open(attached_file, 'r') \r\n attachement = MIMEText(temp.read(), _subtype=sub_type)\r\n temp.close()\r\n \r\n elif main_type == 'image':\r\n print(\"image\")\r\n temp = open(attached_file, 'rb')\r\n attachement = MIMEImage(temp.read(), _subtype=sub_type)\r\n temp.close()\r\n \r\n elif main_type == 'audio':\r\n print(\"audio\")\r\n temp = open(attached_file, 'rb')\r\n attachement = MIMEAudio(temp.read(), _subtype=sub_type)\r\n temp.close() \r\n \r\n elif main_type == 'application' and sub_type == 'pdf': \r\n temp = open(attached_file, 'rb')\r\n attachement = MIMEApplication(temp.read(), _subtype=sub_type)\r\n temp.close()\r\n \r\n else: \r\n attachement = MIMEBase(main_type, sub_type)\r\n temp = open(attached_file, 'rb')\r\n attachement.set_payload(temp.read())\r\n temp.close()\r\n \r\n encoders.encode_base64(attachement)\r\n filename = os.path.basename(attached_file)\r\n attachement.add_header('Content-Disposition', 'attachment', filename=filename) \r\n message.attach(attachement)\r\n \r\n raw_message_no_attachment = base64.urlsafe_b64encode(message.as_bytes())\r\n raw_message_no_attachment = raw_message_no_attachment.decode()\r\n body = {'raw': raw_message_no_attachment}\r\n return body\r\n \r\n def send_message(self,service, user_id, body):\r\n \r\n try:\r\n (service.users().messages().send(userId=user_id, body=body).execute())\r\n print(\"Sent\")\r\n except errors.HttpError as error:\r\n print (f'An error occurred: {error}')\r\n \r\n\r\nclass S3:\r\n \r\n def __init__(self,aws_access_key_id,aws_secret_access_key,region_name):\r\n self.aws_access_key_id=aws_access_key_id\r\n self.aws_secret_access_key=aws_secret_access_key\r\n self.region_name=region_name \r\n self.client = boto3.client('s3',aws_access_key_id=aws_access_key_id,aws_secret_access_key=aws_secret_access_key,region_name=region_name)\r\n " ]
[ [ "pandas.read_sql_query" ] ]
e-bug/mpre-unmasked
[ "cd12250b58152a558e15a33113bf98d90b88e776" ]
[ "code/vilbert/vilbert/datasets/retreival_dataset.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport json\nfrom typing import Any, Dict, List\nimport random\nimport os\n\nimport torch\nfrom torch.utils.data import Dataset\nimport numpy as np\nimport _pickle as cPickle\n\nfrom pytorch_transformers.tokenization_bert import BertTokenizer\nfrom ._image_features_reader import ImageFeaturesH5Reader\nimport jsonlines\nimport sys\nimport pdb\n\n\ndef assert_eq(real, expected):\n assert real == expected, \"%s (true) vs %s (expected)\" % (real, expected)\n\n\ndef _load_annotations(split, annotations_jsonpath, task, dataroot, clean_datasets):\n\n with jsonlines.open(annotations_jsonpath) as reader:\n\n # Build an index which maps image id with a list of caption annotations.\n entries = []\n imgid2entry = {}\n count = 0\n\n remove_ids = []\n if clean_datasets:\n if task == \"RetrievalCOCO\":\n remove_ids = np.load(\n os.path.join(dataroot, \"cache\", \"coco_test_ids.npy\")\n )\n elif task == \"RetrievalFlickr30k\":\n remove_ids = np.load(\n os.path.join(dataroot, \"cache\", \"flickr_test_ids.npy\")\n )\n remove_ids = [int(x) for x in remove_ids]\n\n for annotation in reader:\n if task == \"RetrievalCOCO\":\n image_id = annotation[\"id\"]\n elif task == \"RetrievalFlickr30k\":\n image_id = int(annotation[\"img_path\"].split(\".\")[0])\n #image_id = annotation[\"id\"]\n if split == \"train\" and int(image_id) in remove_ids:\n continue\n imgid2entry[image_id] = []\n for sentences in annotation[\"sentences\"]:\n entries.append({\"caption\": sentences, \"image_id\": image_id})\n imgid2entry[image_id].append(count)\n count += 1\n\n return entries, imgid2entry\n\n\nclass RetreivalDataset(Dataset):\n def __init__(\n self,\n task: str,\n dataroot: str,\n annotations_jsonpath: str,\n split: str,\n image_features_reader: ImageFeaturesH5Reader,\n gt_image_features_reader: ImageFeaturesH5Reader,\n tokenizer: BertTokenizer,\n bert_model,\n clean_datasets,\n padding_index: int = 0,\n max_seq_length: int = 20,\n max_region_num: int = 37,\n ):\n # All the keys in `self._entries` would be present in `self._image_features_reader`\n\n self._entries, self.imgid2entry = _load_annotations(\n split, annotations_jsonpath, task, dataroot, clean_datasets\n )\n self.image_id_list = [*self.imgid2entry]\n\n self._image_features_reader = image_features_reader\n self._tokenizer = tokenizer\n self.num_labels = 1\n self._split = split\n self._padding_index = padding_index\n self._max_region_num = max_region_num\n self._max_seq_length = max_seq_length\n\n clean_train = \"_cleaned\" if clean_datasets else \"\"\n\n if self._split == \"train\":\n image_info = cPickle.load(\n open(\n os.path.join(dataroot, \"hard_negative\" + clean_train + \".pkl\"), \"rb\"\n )\n )\n for key, value in image_info.items():\n setattr(self, key, value)\n self.train_imgId2pool = {\n imageId: i for i, imageId in enumerate(self.train_image_list)\n }\n\n os.makedirs(os.path.join(dataroot, \"cache\"), exist_ok=True)\n if \"roberta\" in bert_model:\n cache_path = os.path.join(\n dataroot,\n \"cache\",\n task\n + \"_\"\n + split\n + \"_\"\n + \"roberta\"\n + \"_\"\n + str(max_seq_length)\n + clean_train\n + \".pkl\",\n )\n else:\n cache_path = os.path.join(\n dataroot,\n \"cache\",\n task + \"_\" + split + \"_\" + str(max_seq_length) + clean_train + \".pkl\",\n )\n\n if not os.path.exists(cache_path):\n self.tokenize()\n self.tensorize()\n cPickle.dump(self._entries, open(cache_path, \"wb\"))\n else:\n print(\"loading entries from %s\" % (cache_path))\n self._entries = cPickle.load(open(cache_path, \"rb\"))\n\n def tokenize(self):\n \"\"\"Tokenizes the captions.\n\n This will add caption_tokens in each entry of the dataset.\n -1 represents nil, and should be treated as padding_idx in embedding.\n \"\"\"\n for entry in self._entries:\n\n tokens = self._tokenizer.encode(entry[\"caption\"])\n tokens = tokens[: self._max_seq_length - 2]\n tokens = self._tokenizer.add_special_tokens_single_sentence(tokens)\n\n segment_ids = [0] * len(tokens)\n input_mask = [1] * len(tokens)\n\n if len(tokens) < self._max_seq_length:\n # Note here we pad in front of the sentence\n padding = [self._padding_index] * (self._max_seq_length - len(tokens))\n tokens = tokens + padding\n input_mask += padding\n segment_ids += padding\n\n assert_eq(len(tokens), self._max_seq_length)\n entry[\"token\"] = tokens\n entry[\"input_mask\"] = input_mask\n entry[\"segment_ids\"] = segment_ids\n\n def tensorize(self):\n\n for entry in self._entries:\n token = torch.from_numpy(np.array(entry[\"token\"]))\n entry[\"token\"] = token\n\n input_mask = torch.from_numpy(np.array(entry[\"input_mask\"]))\n entry[\"input_mask\"] = input_mask\n\n segment_ids = torch.from_numpy(np.array(entry[\"segment_ids\"]))\n entry[\"segment_ids\"] = segment_ids\n\n def __getitem__(self, index):\n entry = self._entries[index]\n image_id = entry[\"image_id\"]\n\n features, num_boxes, boxes, _ = self._image_features_reader[image_id]\n\n mix_num_boxes = min(int(num_boxes), self._max_region_num)\n mix_boxes_pad = np.zeros((self._max_region_num, 5))\n mix_features_pad = np.zeros((self._max_region_num, 2048))\n\n image_mask = [1] * (int(mix_num_boxes))\n while len(image_mask) < self._max_region_num:\n image_mask.append(0)\n\n mix_boxes_pad[:mix_num_boxes] = boxes[:mix_num_boxes]\n mix_features_pad[:mix_num_boxes] = features[:mix_num_boxes]\n\n features1 = torch.tensor(mix_features_pad).float()\n image_mask1 = torch.tensor(image_mask).long()\n spatials1 = torch.tensor(mix_boxes_pad).float()\n\n caption1 = entry[\"token\"]\n input_mask1 = entry[\"input_mask\"]\n segment_ids1 = entry[\"segment_ids\"]\n # negative samples.\n # 1: correct one, 2: random caption wrong, 3: random image wrong. 4: hard image wrong.\n\n while True:\n # sample a random image:\n img_id2 = random.choice(self.image_id_list)\n if img_id2 != image_id:\n break\n\n entry2 = self._entries[random.choice(self.imgid2entry[img_id2])]\n\n features2 = features1\n image_mask2 = image_mask1\n spatials2 = spatials1\n caption2 = entry2[\"token\"]\n input_mask2 = entry2[\"input_mask\"]\n segment_ids2 = entry2[\"segment_ids\"]\n\n # random image wrong\n while True:\n # sample a random image:\n img_id3 = random.choice(self.image_id_list)\n if img_id3 != image_id:\n break\n\n features3, num_boxes3, boxes3, _ = self._image_features_reader[img_id3]\n image_mask3 = [1] * (int(num_boxes3))\n\n mix_num_boxes3 = min(int(num_boxes3), self._max_region_num)\n mix_boxes_pad3 = np.zeros((self._max_region_num, 5))\n mix_features_pad3 = np.zeros((self._max_region_num, 2048))\n\n while len(image_mask3) < self._max_region_num:\n image_mask3.append(0)\n\n mix_boxes_pad[:mix_num_boxes3] = boxes3[:mix_num_boxes3]\n mix_features_pad[:mix_num_boxes3] = features3[:mix_num_boxes3]\n\n features3 = torch.tensor(mix_features_pad).float()\n image_mask3 = torch.tensor(image_mask3).long()\n spatials3 = torch.tensor(mix_boxes_pad).float()\n\n caption3 = caption1\n input_mask3 = input_mask1\n segment_ids3 = segment_ids1\n\n if self._split == \"train\":\n # random hard caption.\n rand_img_id_pool = self.train_hard_pool[self.train_imgId2pool[image_id]]\n pool_img_idx = int(\n rand_img_id_pool[np.random.randint(1, len(rand_img_id_pool))]\n )\n img_id4 = self.train_image_list[pool_img_idx]\n else:\n while True:\n # sample a random image:\n img_id4 = random.choice(self.image_id_list)\n if img_id4 != image_id:\n break\n\n entry4 = self._entries[random.choice(self.imgid2entry[img_id4])]\n\n features4 = features1\n image_mask4 = image_mask1\n spatials4 = spatials1\n caption4 = entry4[\"token\"]\n input_mask4 = entry4[\"input_mask\"]\n segment_ids4 = entry4[\"segment_ids\"]\n\n features = torch.stack([features1, features2, features3, features4], dim=0)\n spatials = torch.stack([spatials1, spatials2, spatials3, spatials4], dim=0)\n image_mask = torch.stack(\n [image_mask1, image_mask2, image_mask3, image_mask4], dim=0\n )\n caption = torch.stack([caption1, caption2, caption3, caption4], dim=0)\n input_mask = torch.stack(\n [input_mask1, input_mask2, input_mask3, input_mask4], dim=0\n )\n segment_ids = torch.stack(\n [segment_ids1, segment_ids2, segment_ids3, segment_ids4], dim=0\n )\n co_attention_mask = torch.zeros((4, self._max_region_num, self._max_seq_length))\n target = 0\n\n return (\n features,\n spatials,\n image_mask,\n caption,\n target,\n input_mask,\n segment_ids,\n co_attention_mask,\n image_id,\n )\n\n def __len__(self):\n return len(self._entries)\n\n\ndef _load_annotationsVal(annotations_jsonpath, task):\n\n with jsonlines.open(annotations_jsonpath) as reader:\n\n # Build an index which maps image id with a list of caption annotations.\n image_entries = {}\n caption_entries = []\n\n for annotation in reader:\n if task == \"RetrievalCOCO\":\n image_id = annotation[\"id\"]\n elif task == \"RetrievalFlickr30k\":\n image_id = int(annotation[\"img_path\"].split(\".\")[0])\n #image_id = annotation[\"id\"]\n\n image_entries[image_id] = 1\n\n for sentences in annotation[\"sentences\"]:\n caption_entries.append({\"caption\": sentences, \"image_id\": image_id})\n\n image_entries = [*image_entries]\n\n return image_entries, caption_entries\n\n\nclass RetreivalDatasetVal(Dataset):\n def __init__(\n self,\n task: str,\n dataroot: str,\n annotations_jsonpath: str,\n split: str,\n image_features_reader: ImageFeaturesH5Reader,\n gt_image_features_reader: ImageFeaturesH5Reader,\n tokenizer: BertTokenizer,\n bert_model,\n clean_datasets,\n padding_index: int = 0,\n max_seq_length: int = 20,\n max_region_num: int = 101,\n ):\n # All the keys in `self._entries` would be present in `self._image_features_reader`\n self._image_entries, self._caption_entries = _load_annotationsVal(\n annotations_jsonpath, task\n )\n self._image_features_reader = image_features_reader\n self._tokenizer = tokenizer\n\n self._split = split\n self._padding_index = padding_index\n self._max_region_num = max_region_num\n self._max_seq_length = max_seq_length\n self.num_labels = 1\n\n # cache file path data/cache/train_ques\n # cap_cache_path = \"data/cocoRetreival/cache/val_cap.pkl\"\n # if not os.path.exists(cap_cache_path):\n self.tokenize()\n self.tensorize()\n # cPickle.dump(self._entries, open(cap_cache_path, 'wb'))\n # else:\n # print('loading entries from %s' %(cap_cache_path))\n # self._entries = cPickle.load(open(cap_cache_path, \"rb\"))\n #\n self.features_all = np.zeros((len(self._image_entries), self._max_region_num, 2048))\n self.spatials_all = np.zeros((len(self._image_entries), self._max_region_num, 5))\n self.image_mask_all = np.zeros((len(self._image_entries), self._max_region_num))\n\n for i, image_id in enumerate(self._image_entries):\n features, num_boxes, boxes, _ = self._image_features_reader[image_id]\n\n mix_num_boxes = min(int(num_boxes), self._max_region_num)\n mix_boxes_pad = np.zeros((self._max_region_num, 5))\n mix_features_pad = np.zeros((self._max_region_num, 2048))\n\n image_mask = [1] * (int(mix_num_boxes))\n while len(image_mask) < self._max_region_num:\n image_mask.append(0)\n\n mix_boxes_pad[:mix_num_boxes] = boxes[:mix_num_boxes]\n mix_features_pad[:mix_num_boxes] = features[:mix_num_boxes]\n\n self.features_all[i] = mix_features_pad\n self.image_mask_all[i] = np.array(image_mask)\n self.spatials_all[i] = mix_boxes_pad\n\n sys.stdout.write(\"%d/%d\\r\" % (i, len(self._image_entries)))\n sys.stdout.flush()\n\n self.features_all = torch.Tensor(self.features_all).float()\n self.image_mask_all = torch.Tensor(self.image_mask_all).long()\n self.spatials_all = torch.Tensor(self.spatials_all).float()\n\n def tokenize(self):\n \"\"\"Tokenizes the captions.\n\n This will add caption_tokens in each entry of the dataset.\n -1 represents nil, and should be treated as padding_idx in embedding.\n \"\"\"\n for entry in self._caption_entries:\n tokens = self._tokenizer.encode(entry[\"caption\"])\n tokens = tokens[: self._max_seq_length - 2]\n tokens = self._tokenizer.add_special_tokens_single_sentence(tokens)\n\n segment_ids = [0] * len(tokens)\n input_mask = [1] * len(tokens)\n\n if len(tokens) < self._max_seq_length:\n # Note here we pad in front of the sentence\n padding = [self._padding_index] * (self._max_seq_length - len(tokens))\n tokens = tokens + padding\n input_mask += padding\n segment_ids += padding\n\n assert_eq(len(tokens), self._max_seq_length)\n entry[\"token\"] = tokens\n entry[\"input_mask\"] = input_mask\n entry[\"segment_ids\"] = segment_ids\n\n def tensorize(self):\n for entry in self._caption_entries:\n token = torch.from_numpy(np.array(entry[\"token\"])).long()\n entry[\"token\"] = token\n\n input_mask = torch.from_numpy(np.array(entry[\"input_mask\"]))\n entry[\"input_mask\"] = input_mask\n\n segment_ids = torch.from_numpy(np.array(entry[\"segment_ids\"])).long()\n entry[\"segment_ids\"] = segment_ids\n\n def __getitem__(self, index):\n\n # we iterate through every caption here.\n caption_idx = int(index / 2)\n image_idx = index % 2\n\n if image_idx == 0:\n image_entries = self._image_entries[:500]\n features_all = self.features_all[:500]\n spatials_all = self.spatials_all[:500]\n image_mask_all = self.image_mask_all[:500]\n\n else:\n image_entries = self._image_entries[500:]\n features_all = self.features_all[500:]\n spatials_all = self.spatials_all[500:]\n image_mask_all = self.image_mask_all[500:]\n\n entry = self._caption_entries[caption_idx]\n caption = entry[\"token\"]\n input_mask = entry[\"input_mask\"]\n segment_ids = entry[\"segment_ids\"]\n\n target_all = torch.zeros(500)\n for i, image_id in enumerate(image_entries):\n if image_id == entry[\"image_id\"]:\n target_all[i] = 1\n\n return (\n features_all,\n spatials_all,\n image_mask_all,\n caption,\n input_mask,\n segment_ids,\n target_all,\n caption_idx,\n image_idx,\n )\n\n def __len__(self):\n return len(self._caption_entries) * 2\n" ]
[ [ "torch.zeros", "numpy.array", "torch.stack", "numpy.zeros", "torch.tensor", "torch.Tensor" ] ]
weders/NeuralFusion
[ "4f0c14f67ad9d2368b68cbeb78c237a6328971e5" ]
[ "dataset/utils/augmentation.py" ]
[ "import numpy as np\nimport random\nimport time\n\n\nimport cv2\n\nfrom scipy.ndimage.filters import median_filter, maximum_filter, uniform_filter\nfrom scipy.ndimage.morphology import binary_dilation, binary_erosion\nfrom scipy.ndimage import generate_binary_structure\n\ndef add_kinect_noise(depth, sigma_fraction=0.05):\n\n r = np.random.uniform(0., 1., depth.shape)\n sign = np.ones(depth.shape)\n sign[r < 0.5] = -1.0\n sigma = sigma_fraction*depth\n magnitude = sigma*(1.0 - np.exp(-0.5*np.power(r, 2)))\n depth += sign*magnitude\n depth[depth < 0] = 0.\n return depth\n\n\ndef add_axial_noise(x, std=0.05, depth_dependency=False, radial_dependency=False):\n\n if radial_dependency is False and depth_dependency is False:\n\n x += np.random.normal(0, scale=std)\n return x\n\n if depth_dependency:\n\n sigma = 0.0012 + 0.0019*np.power((x - 0.4), 2)\n x += np.random.normal(0, scale=sigma)\n return x\n\n\ndef add_random_zeros(x, p=0.9):\n\n mask = np.random.uniform(0, 1, x.shape)\n mask[mask >= p] = 0.0\n mask[mask > 0.0] = 1.0\n\n return np.multiply(x, mask)\n\n\ndef add_lateral_noise(x, focal_length=557, method='gaussian'):\n\n pixels = np.arange(-int(x.shape[1]/2), int(x.shape[1]/2), dtype=np.int32)\n theta = np.arctan(pixels/focal_length)\n\n sigma_l = 0.8 + 0.035*theta/(np.pi/2. - theta)\n\n x += np.random.normal(0, scale=sigma_l)\n return x\n\n\ndef add_depth_noise(depthmaps, noise_sigma, seed):\n\n # add noise\n if noise_sigma > 0:\n random.seed(time.process_time())\n np.random.seed(int(time.process_time()))\n sigma = noise_sigma\n noise = np.random.normal(0, 1, size=depthmaps.shape).astype(np.float32)\n depthmaps = depthmaps + noise * sigma * depthmaps\n\n return depthmaps\n\n\ndef add_lateral_and_axial_noise(x, focal_length):\n\n pixels = np.arange(-int(x.shape[1] / 2), int(x.shape[1] / 2), dtype=np.int32)\n theta = np.arctan(pixels / focal_length)\n\n sigma = 0.0012 + 0.0019*(x - 0-4)**2 + 0.0001/np.sqrt(x)*(theta**2)/(np.pi/2 - theta)**2\n\n x += np.random.normal(0, scale=sigma)\n return x\n\n\ndef add_outliers(x, scale=5, fraction=0.99):\n\n # check for invalid data points\n x[x < 0.] = 0.\n\n\n random.seed(time.process_time())\n np.random.seed(int(time.process_time()))\n\n # filter with probability:\n mask = np.random.uniform(0, 1, x.shape)\n mask[mask >= fraction] = 1.0\n mask[mask < fraction] = 0.0\n mask[x == 0.] = 0.\n\n outliers = np.random.normal(0, scale=scale, size=x.shape)\n x += np.multiply(outliers, mask)\n\n\n x[x < 0.] = 0.\n\n return x\n\n\ndef add_sparse_depth(x, percentage=0.1):\n\n # check for invalid data points\n x[x < 0.] = 0.\n\n random.seed(time.process_time())\n np.random.seed(int(time.process_time()))\n\n # filter with probability:\n mask = np.random.uniform(0, 1, x.shape)\n mask[mask < percentage] = -1\n mask[mask >= percentage] = 0.\n mask[x == 0.] = 0.\n\n x[mask == 0.] = 0.\n\n return x\n\ndef add_gradient_noise(x, xorig):\n\n gx = cv2.Sobel(xorig, cv2.CV_64F, 1, 0, ksize=5)\n gy = cv2.Sobel(xorig, cv2.CV_64F, 0, 1, ksize=5)\n\n grad = np.sqrt(gx ** 2 + gy ** 2)\n\n mask = np.zeros_like(grad)\n mask[grad > 1000] = 1.\n mask = binary_dilation(mask, iterations=4)\n\n noise = np.random.normal(0, 1, size=x.shape).astype(np.float32)\n\n x += np.multiply(mask, noise * 0.03 * x)\n\n return x\n\n\ndef add_outlier_blobs(x, scale=5, fraction=0.9, starti=1, endi=4):\n # check for invalid data points\n x[x < 0.] = 0.\n\n random.seed(time.process_time())\n np.random.seed(int(time.process_time()))\n\n # filter with probability:\n mask = np.random.uniform(0, 1, x.shape)\n mask[mask >= fraction] = 1.0\n mask[mask < fraction] = 0.0\n mask[x == 0.] = 0.\n\n\n\n for i in range(starti, endi):\n # filter with probability:\n mask = np.random.uniform(0, 1, x.shape)\n mask[mask <= (1. - fraction) / 3.] = -1.0\n mask[mask > (1. - fraction) / 3.] = 0.0\n mask[mask == -1.] = 1.\n mask[x == 0.] = 0.\n\n\n # dilation\n mask = binary_dilation(mask, iterations=i).astype(np.float)\n\n outliers = np.random.normal(0, scale=scale, size=x.shape)\n\n x += np.multiply(outliers, mask)\n\n x[x < 0.] = 0.\n\n return x\n\n\ndef add_noise_heuristic(x, xclean, scale=3., fraction=0.98):\n # check for invalid data points\n x[x < 0.] = 0.\n x_orig = np.copy(x)\n\n random.seed(time.process_time())\n np.random.seed(int(time.process_time()))\n\n gx = cv2.Sobel(xclean, cv2.CV_64F, 1, 0, ksize=5)\n gy = cv2.Sobel(xclean, cv2.CV_64F, 0, 1, ksize=5)\n\n grad = np.sqrt(gx ** 2 + gy ** 2)\n\n norm = np.count_nonzero(grad)\n thresh = np.sum(grad) / norm\n\n grad_mask = np.zeros_like(grad)\n grad_mask[grad > thresh] = 1.\n grad_mask = binary_erosion(grad_mask, iterations=1)\n\n # print(np.sum(grad_mask))\n\n for i in range(2, 5):\n\n mask = np.zeros_like(x)\n\n # filter with probability:\n sampler = np.random.uniform(0, 1, x.shape)\n mask[sampler <= (1. - fraction) / 3.] = 1.0\n\n mask[x == 0.] = 0.\n mask[grad_mask == 0.] = 0.\n\n # dilation\n mask = binary_dilation(mask, iterations=i).astype(np.float)\n\n outliers = np.random.normal(0, scale=scale, size=(15, 20))\n outliers = np.repeat(outliers, 16, axis=1)\n outliers = np.repeat(outliers, 16, axis=0)\n\n x += np.multiply(outliers, mask)\n \n x[x < 0.] = 0.\n\n return x\n" ]
[ [ "numpy.random.normal", "numpy.zeros_like", "numpy.count_nonzero", "scipy.ndimage.morphology.binary_dilation", "numpy.sum", "numpy.copy", "numpy.ones", "numpy.multiply", "numpy.random.uniform", "numpy.arctan", "numpy.power", "numpy.sqrt", "numpy.repeat", "scipy.ndimage.morphology.binary_erosion" ] ]
qsyao/cudaBERT
[ "c93cb5ff0ccd387294a7229a9bef969c1375d0d6" ]
[ "cuda_model.py" ]
[ "import sys\r\nimport numpy as np\r\n\r\nclass Cuda_BERT(object):\r\n def __init__(self, id_gpu, config):\r\n self.id_gpu = id_gpu\r\n self.max_batchsize = config.batch_size\r\n self.max_seq_length = config.max_seq_length\r\n\r\n sys.path.insert(0, config.cubert_pth)\r\n from pybert import load_model, bert\r\n\r\n if config.is_large:\r\n self.hidden_size = 1024\r\n else:\r\n self.hidden_size = 768\r\n\r\n self.model = load_model(config.is_large, config.model_npy_pth, id_gpu,\\\r\n config.batch_size, config.max_seq_length)\r\n \r\n self.cu_encode = bert\r\n\r\n def encode(self, input_tensor):\r\n '''\r\n Input_tensor:\r\n inputs_id, segments_id, mask:\r\n numpy.array [batchsize, seq_length]\r\n '''\r\n indexed_tokens = input_tensor[0]\r\n segments_ids = input_tensor[1]\r\n attention_mask = input_tensor[2]\r\n batchsize = indexed_tokens.shape[0]\r\n seq_length = indexed_tokens.shape[1]\r\n output = np.ones([batchsize, self.hidden_size]).astype(np.float32)\r\n self.cu_encode(self.model, output, indexed_tokens, segments_ids, \\\r\n batchsize, seq_length, attention_mask)\r\n return output\r\n \r\n def __del__(self):\r\n from pybert import unload_model\r\n unload_model(self.model)\r\n" ]
[ [ "numpy.ones" ] ]
aprieels/3D-watermarking-spectral-decomposition
[ "dcab78857d0bb201563014e58900917545ed4673" ]
[ "dependencies/PyMesh/scripts/scale_mesh.py" ]
[ "#!/usr/bin/env python\n\n\"\"\"\nScalar input mesh.\n\"\"\"\n\nimport argparse\nimport pymesh\nimport numpy as np\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=__doc__);\n parser.add_argument(\"--scale\", \"-s\", type=float, default=1.0,\n help=\"Uniform scaling factor\");\n parser.add_argument(\"--scale-x\", \"-x\", type=float, default=None,\n help=\"X axis scaling factor\");\n parser.add_argument(\"--scale-y\", \"-y\", type=float, default=None,\n help=\"Y axis scaling factor\");\n parser.add_argument(\"--scale-z\", \"-z\", type=float, default=None,\n help=\"Z axis scaling factor\");\n parser.add_argument(\"input_mesh\", help=\"input mesh\");\n parser.add_argument(\"output_mesh\", help=\"output mesh\");\n return parser.parse_args();\n\ndef main():\n args = parse_args();\n scale = np.ones(3) * args.scale;\n if args.scale_x is not None:\n scale[0] = args.scale_x;\n if args.scale_y is not None:\n scale[1] = args.scale_y;\n if args.scale_z is not None:\n scale[2] = args.scale_z;\n\n mesh = pymesh.load_mesh(args.input_mesh);\n mesh = pymesh.form_mesh(mesh.vertices * scale, mesh.faces, mesh.voxels);\n pymesh.save_mesh(args.output_mesh, mesh);\n\nif __name__ == \"__main__\":\n main();\n" ]
[ [ "numpy.ones" ] ]
edlanglois/akro
[ "bcbeae4f5fd82606956a5dca3d5ef72a22760ce4" ]
[ "tests/akro/test_dict.py" ]
[ "import collections\nimport pickle\nimport unittest\n\nimport numpy as np\n\nfrom akro import Box\nfrom akro import Dict\nfrom akro import Discrete\nfrom akro import tf\nfrom akro import theano\nfrom akro.requires import requires_tf, requires_theano\n\n\nclass TestDict(unittest.TestCase):\n\n def test_pickleable(self):\n motion_dict = {'position': Discrete(2), 'velocity': Discrete(3)}\n sample = {\n 'position': 1,\n 'velocity': 2,\n }\n d = Dict(motion_dict)\n round_trip = pickle.loads(pickle.dumps(d))\n\n assert d.contains(sample)\n assert round_trip\n assert round_trip.contains(sample)\n\n def test_flat_dim(self):\n d = Dict(\n collections.OrderedDict(position=Box(0, 10, (2, )),\n velocity=Box(0, 10, (3, ))))\n assert d.flat_dim == 5\n\n def test_flat_dim_with_keys(self):\n d = Dict(\n collections.OrderedDict([('position', Box(0, 10, (2, ))),\n ('velocity', Box(0, 10, (3, )))]))\n assert d.flat_dim_with_keys(['position']) == 2\n\n def test_flatten(self):\n d = Dict(\n collections.OrderedDict([('position', Box(0, 10, (2, ))),\n ('velocity', Box(0, 10, (3, )))]))\n f = np.array([1., 2., 3., 4., 5.])\n # Keys are intentionally in the \"wrong\" order.\n s = collections.OrderedDict([('velocity', np.array([3., 4., 5.])),\n ('position', np.array([1., 2.]))])\n assert (d.flatten(s) == f).all()\n\n def test_unflatten(self):\n d = Dict(\n collections.OrderedDict([('position', Box(0, 10, (2, ))),\n ('velocity', Box(0, 10, (3, )))]))\n f = np.array([1., 2., 3., 4., 5.])\n # Keys are intentionally in the \"wrong\" order.\n s = collections.OrderedDict([('velocity', np.array([3., 4., 5.])),\n ('position', np.array([1., 2.]))])\n assert all((s[k] == v).all() for k, v in d.unflatten(f).items())\n\n def test_flatten_n(self):\n d = Dict(\n collections.OrderedDict([('position', Box(0, 10, (2, ))),\n ('velocity', Box(0, 10, (3, )))]))\n f = np.array([[1., 2., 3., 4., 5.], [6., 7., 8., 9., 0.]])\n # Keys are intentionally in the \"wrong\" order.\n s = [\n collections.OrderedDict([('velocity', np.array([3., 4., 5.])),\n ('position', np.array([1., 2.]))]),\n collections.OrderedDict([('velocity', np.array([8., 9., 0.])),\n ('position', np.array([6., 7.]))])\n ]\n assert (d.flatten_n(s) == f).all()\n\n def test_unflatten_n(self):\n d = Dict(\n collections.OrderedDict([('position', Box(0, 10, (2, ))),\n ('velocity', Box(0, 10, (3, )))]))\n f = np.array([[1., 2., 3., 4., 5.], [6., 7., 8., 9., 0.]])\n # Keys are intentionally in the \"wrong\" order.\n s = [\n collections.OrderedDict([('velocity', np.array([3., 4., 5.])),\n ('position', np.array([1., 2.]))]),\n collections.OrderedDict([('velocity', np.array([8., 9., 0.])),\n ('position', np.array([6., 7.]))])\n ]\n for i, fi in enumerate(d.unflatten_n(f)):\n assert all((s[i][k] == v).all() for k, v in fi.items())\n\n def test_flatten_with_keys(self):\n d = Dict(\n collections.OrderedDict([('position', Box(0, 10, (2, ))),\n ('velocity', Box(0, 10, (3, )))]))\n f = np.array([3., 4., 5.])\n f_full = np.array([1., 2., 3., 4., 5.])\n # Keys are intentionally in the \"wrong\" order.\n s = collections.OrderedDict([('velocity', np.array([3., 4., 5.])),\n ('position', np.array([1., 2.]))])\n assert (d.flatten_with_keys(s, ['velocity']) == f).all()\n assert (d.flatten_with_keys(s,\n ['velocity', 'position']) == f_full).all()\n\n def test_unflatten_with_keys(self):\n d = Dict(\n collections.OrderedDict([('position', Box(0, 10, (2, ))),\n ('velocity', Box(0, 10, (3, )))]))\n f = np.array([3., 4., 5.])\n f_full = np.array([1., 2., 3., 4., 5.])\n # Keys are intentionally in the \"wrong\" order.\n s = collections.OrderedDict([('velocity', np.array([3., 4., 5.])),\n ('position', np.array([1., 2.]))])\n assert all((s[k] == v).all()\n for k, v in d.unflatten_with_keys(f, ['velocity']).items())\n assert all((s[k] == v).all() for k, v in d.unflatten_with_keys(\n f_full, ['velocity', 'position']).items())\n\n def test_concat(self):\n d1 = Dict(\n collections.OrderedDict([('position', Box(0, 10, (2, ))),\n ('velocity', Box(0, 10, (3, )))]))\n d2 = Dict(\n collections.OrderedDict([('position', Box(0, 10, (2, ))),\n ('gravity', Box(0, 10, (3, )))]))\n concat_d = d1.concat(d2)\n\n assert (sorted(concat_d.spaces.keys()) == sorted(\n ['position', 'velocity', 'gravity']))\n\n @requires_tf\n def test_convert_tf(self):\n d = Dict({'position': Discrete(2), 'velocity': Discrete(3)})\n tensor_dict = d.to_tf_placeholder('test', 1)\n assert isinstance(tensor_dict, Dict)\n assert all(\n [isinstance(c, tf.Tensor) for c in tensor_dict.spaces.values()])\n assert all([v.dtype == tf.int64 for v in tensor_dict.spaces.values()])\n\n @requires_theano\n def test_convert_theano(self):\n d = Dict({'position': Discrete(2), 'velocity': Discrete(3)})\n tensor_dict = d.to_theano_tensor('test', 1)\n assert isinstance(tensor_dict, Dict)\n assert all([\n isinstance(c, theano.tensor.TensorVariable)\n for c in tensor_dict.spaces.values()\n ])\n assert all(\n [space.dtype == 'int64' for space in tensor_dict.spaces.values()])\n" ]
[ [ "numpy.array" ] ]
nitxiodev/vue-flask-celery-docker-leaflet
[ "0d09239dae668833a66744deff7201586b7b7a47" ]
[ "csp_solver_cloud/src/server/MapService.py" ]
[ "import os\nimport warnings\n\nfrom csp_solver_cloud.src.server.BaseService import BaseService\n\nwarnings.simplefilter(\"ignore\")\nfrom geopy.exc import GeocoderTimedOut, GeocoderServiceError, GeocoderQueryError\nfrom csp_solver_cloud.src.server.Fetcher import Fetcher\nimport geopandas as gp\nfrom csp_solver_cloud.src.map.map import Map\nfrom csp_solver_cloud.src.server import ServiceException, ServiceCodes\nimport pandas as pd\n\n\nclass MapService(BaseService):\n def __init__(self):\n self._geo_data = gp.read_file(\n os.path.join(os.path.dirname(__file__), 'data/ne_10m_admin_1_states_provinces.shp'), encoding='utf-8')\n self._geo_data['admin'] = self._geo_data['admin'].apply(lambda x: x.lower()) # uniform criteria\n self._geolocator = Fetcher()\n\n def solve(self, latitude, longitude, colors):\n if latitude is None or longitude is None or colors is None:\n raise ServiceException(ServiceCodes.EMPTY_PARAMS, msg='Empty parameters')\n\n try:\n colors = [color for color in xrange(1, int(colors) + 1)]\n except (TypeError, ValueError) as f:\n raise ServiceException(ServiceCodes.BAD_PARAMS, msg=f.message)\n\n try:\n input_data = self._geolocator.resolve(latitude, longitude)\n except (GeocoderTimedOut, GeocoderServiceError, GeocoderQueryError) as e:\n raise ServiceException(ServiceCodes.FAIL, msg=e.message)\n\n if not input_data:\n raise ServiceException(ServiceCodes.FAIL,\n msg='No country found with these coordinates ({},{})'.format(latitude, longitude))\n\n map = Map(self._geo_data, input_data, colors, 'mrv', 'lcv')\n if map.backtracking_search():\n return self._build_json_response(map)\n\n return None\n\n def _build_json_response(self, map):\n df = pd.DataFrame.from_dict(map.variables, orient='index')\n if not df.empty:\n df.reset_index(0, inplace=True)\n df.columns = ['province', 'color']\n\n s_geo = map.geo_data.merge(df, left_on=map.geo_data.gn_name, right_on='province')\n\n return s_geo[['geometry', 'province', 'color']].to_json()\n\n return None\n" ]
[ [ "pandas.DataFrame.from_dict" ] ]
PlaytikaResearch/abexp
[ "7f04e0fe29be6b027c84f670f4d09939b50f8eca" ]
[ "tests/test_analysis_bayesian.py" ]
[ "# MIT License\n# \n# Copyright (c) 2021 Playtika Ltd.\n# \n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport numpy as np\nimport pandas as pd\n\nfrom abexp.core.analysis_bayesian import BayesianAnalyzer, BayesianGLMAnalyzer\n\n\ndef test_bayesian_compare_mean():\n \"\"\" Test compare means from statistics. \"\"\"\n\n # Generate random samples\n np.random.seed(42)\n obs1 = np.random.normal(490, 200, 1000)\n obs2 = np.random.normal(500, 200, 1000)\n\n analyzer = BayesianAnalyzer()\n\n prob, lift, diff_means, ci = analyzer.compare_mean(obs_contr=obs1, obs_treat=obs2)\n\n assert (type(prob) == np.float64), 'Error prob'\n assert (type(lift) == np.float64), 'Error lift'\n assert (type(diff_means) == np.float64), 'Error diff_means'\n assert (type(ci[0]) == np.float64), 'Error'\n assert (type(ci[1]) == np.float64), 'Error'\n # TODO: check value\n\n\ndef test_bayesian_compare_conv():\n \"\"\" Test compare conversions from statistics. \"\"\"\n\n # Results based on the online calculator at\n # https://marketing.dynamicyield.com/bayesian-calculator/ for probability\n # https://abtestguide.com/bayesian/ for lift\n\n data = {'conv1': [300, 500, 3000, 100, 700],\n 'conv2': [370, 450, 3200, 100, 100],\n 'nobs1': [700, 5000, 10000, 980, 980],\n 'nobs2': [800, 5000, 10000, 980, 980],\n 'prob': [0.9058, 0.0431, 0.9989, 0.50, 0],\n 'lift': [0.0792, -0.10, 0.0667, 0, -0.857]}\n\n # Set epsilons\n eps = 0.005\n\n analyzer = BayesianAnalyzer()\n\n for i in np.arange(len(data['prob'])):\n prob, lift = analyzer.compare_conv(conv_contr=data['conv1'][i],\n conv_treat=data['conv2'][i],\n nobs_contr=data['nobs1'][i],\n nobs_treat=data['nobs2'][i])\n\n assert (data['prob'][i] - eps < prob < data['prob'][i] + eps), \\\n 'Error in compare_conv computed prob={}, expected_prob={}'.format(prob, data['prob'][i])\n\n assert (data['lift'][i] - eps < lift < data['lift'][i] + eps), \\\n 'Error in compare_conv computed lift={}, expected_lift={}'.format(lift, data['lift'][i])\n\n\ndef test_bayesian_multivariate_glm():\n \"\"\" Test bayesiann GLM with multivariate regression. \"\"\"\n\n analyzer = BayesianGLMAnalyzer()\n\n df1 = pd.DataFrame([[1, 4, 35],\n [0, 4, 5],\n [1, 3, 28],\n [0, 1, 5],\n [0, 2, 1],\n [1, 0, 1.5]], columns=['group', 'level', 'kpi'])\n\n df2 = pd.DataFrame([[0, 0, 100],\n [0, 1, 100],\n [0, 0, 100],\n [1, 0, 100],\n [1, 1, 100],\n [1, 0, 100]], columns=['group', 'level', 'kpi'])\n\n stats = analyzer.multivariate_regression(df1, 'kpi')\n assert (type(stats) == pd.DataFrame), 'Error'\n # TODO: check value\n\n stats = analyzer.multivariate_regression(df2, 'kpi')\n assert (type(stats) == pd.DataFrame), 'Error'\n # TODO: check value\n\n\ndef test_bayesian_hierarchical_glm():\n \"\"\" Test bayesiann GLM with hierarchical regression. \"\"\"\n\n analyzer = BayesianGLMAnalyzer()\n\n df1 = pd.DataFrame([[0, 5, 'Italy'],\n [0, 5, 'Italy'],\n [0, 100, 'Switzerland'],\n [1, 100, 'Italy'],\n [1, 100, 'Italy'],\n [1, 100, 'France']], columns=['group', 'kpi', 'country'])\n\n stats = analyzer.hierarchical_regression(df1, group_col='group', cat_col='country', kpi_col='kpi')\n assert (type(stats) == pd.DataFrame), 'Error'\n # TODO: check value\n" ]
[ [ "numpy.random.seed", "numpy.random.normal", "pandas.DataFrame" ] ]
0xangelo/nnrl
[ "c925af1c6ecc6e2e999b782935f7e2c7dee1ba81" ]
[ "nnrl/nn/distributions/flows/abstract.py" ]
[ "\"\"\"Distribution transforms as PyTorch modules compatible with TorchScript.\"\"\"\nfrom typing import Dict\n\nimport torch\nfrom torch import nn\n\nfrom .utils import sum_rightmost\n\n\nclass Transform(nn.Module):\n \"\"\"A diffeomorphism.\n\n Transforms are differentiable bijections with tractable Jacobians.\n All transforms map samples from a latent space to another (f(z) -> x)\n Use the `reverse` flag to invert the transformation (f^{-1}(x) -> z).\n \"\"\"\n\n params: Dict[str, torch.Tensor]\n\n def __init__(self, *, cond_transform=None, params=None, event_dim=0):\n super().__init__()\n self.event_dim = (\n event_dim if cond_transform is None else cond_transform.event_dim\n )\n self.cond_transform = cond_transform\n self.params = params or {}\n for name, param in self.params.items():\n if isinstance(param, nn.Parameter):\n self.register_parameter(name, param)\n else:\n self.register_buffer(name, param)\n\n def forward(self, inputs, reverse: bool = False):\n # pylint:disable=arguments-differ,missing-function-docstring\n return self.decode(inputs) if reverse else self.encode(inputs)\n\n def encode(self, inputs):\n \"\"\"\n Computes the transform `z => x` and the log det jacobian `log |dz/dx|`\n \"\"\"\n\n return self.cond_transform.encode(inputs, self.params)\n\n def decode(self, inputs):\n \"\"\"\n Inverts the transform `x => z` and the log det jacobian `log |dx/dz|`,\n or `- log |dz/dx|`.\n \"\"\"\n\n return self.cond_transform.decode(inputs, self.params)\n\n\nclass ConditionalTransform(nn.Module):\n \"\"\"A Transform conditioned on some external variable(s).\"\"\"\n\n def __init__(self, *, transform=None, event_dim=0):\n super().__init__()\n self.event_dim = event_dim if transform is None else transform.event_dim\n self.transform = transform\n\n def forward(self, inputs, params: Dict[str, torch.Tensor], reverse: bool = False):\n # pylint:disable=arguments-differ,missing-function-docstring\n return self.decode(inputs, params) if reverse else self.encode(inputs, params)\n\n def encode(self, inputs, params: Dict[str, torch.Tensor]):\n \"\"\"\n Computes the transform `(z, y) => x`.\n \"\"\"\n # pylint:disable=unused-argument\n return self.transform.encode(inputs)\n\n def decode(self, inputs, params: Dict[str, torch.Tensor]):\n \"\"\"\n Inverts the transform `(x, y) => z`.\n \"\"\"\n # pylint:disable=unused-argument\n return self.transform.decode(inputs)\n\n\nclass InverseTransform(ConditionalTransform):\n \"\"\"Invert the transform, effectively swapping the encoding/decoding directions.\"\"\"\n\n def __init__(self, transform):\n super().__init__(event_dim=transform.event_dim)\n self.transform = (\n ConditionalTransform(transform=transform)\n if isinstance(transform, Transform)\n else transform\n )\n\n def encode(self, inputs, params: Dict[str, torch.Tensor]):\n\n return self.transform.decode(inputs, params)\n\n def decode(self, inputs, params: Dict[str, torch.Tensor]):\n\n return self.transform.encode(inputs, params)\n\n\nclass CompositeTransform(ConditionalTransform):\n # pylint:disable=missing-docstring\n\n def __init__(self, transforms, event_dim=None):\n event_dim = event_dim or max(t.event_dim for t in transforms)\n super().__init__(event_dim=event_dim)\n assert self.event_dim >= max(t.event_dim for t in transforms), (\n \"CompositeTransform cannot have an event_dim smaller than any \"\n \"of its components'\"\n )\n transforms = self.unpack(transforms)\n self.transforms = nn.ModuleList(transforms)\n self.inv_transforms = nn.ModuleList(transforms[::-1])\n\n @staticmethod\n def unpack(transforms):\n \"\"\"Recursively unfold CompositeTransforms in a list.\"\"\"\n result = []\n for trans in transforms:\n if isinstance(trans, CompositeTransform):\n result.extend(trans.unpack(trans.transforms))\n elif isinstance(trans, Transform):\n result += [ConditionalTransform(transform=trans)]\n else:\n result += [trans]\n return result\n\n def encode(self, inputs, params: Dict[str, torch.Tensor]):\n out = inputs\n log_abs_det_jacobian = 0.0\n for transform in self.transforms:\n out, log_det = transform(out, params, reverse=False)\n log_abs_det_jacobian += sum_rightmost(\n log_det, self.event_dim - transform.event_dim\n )\n return out, log_abs_det_jacobian\n\n def decode(self, inputs, params: Dict[str, torch.Tensor]):\n out = inputs\n log_abs_det_jacobian = 0.0\n for transform in self.inv_transforms:\n out, log_det = transform(out, params, reverse=True)\n log_abs_det_jacobian += sum_rightmost(\n log_det, self.event_dim - transform.event_dim\n )\n return out, log_abs_det_jacobian\n" ]
[ [ "torch.nn.ModuleList" ] ]
garanews/cuml
[ "318f521a1d2681f4622a44921d27b5f592fe4407" ]
[ "python/cuml/experimental/hyperopt_utils/plotting_utils.py" ]
[ "#\n# Copyright (c) 2020, NVIDIA CORPORATION.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\n\n\ndef plot_heatmap(df, col1, col2):\n \"\"\"\n Generates a heatmap to highlight interactions\n of two parameters specified in col1 and col2.\n\n Parameters\n ----------\n df : Pandas dataframe\n Results from Grid or Random Search\n col1 : string; Name of the first parameter\n col2: string; Name of the second parameter\n\n Output\n ----------\n A heatmap using seaborn\n \"\"\"\n max_scores = df.groupby([col1, col2]).max()\n max_scores = max_scores.unstack()[['mean_test_score']]\n sns.heatmap(max_scores.mean_test_score, annot=True, fmt='.3g')\n\n\ndef plot_search_results(res):\n \"\"\"\n Plots by fixing all paramters except one parameter to\n its best value using matplotlib.\n\n Accepts results from grid or random search from dask-ml.\n\n Parameters\n ----------\n res : results from Grid or Random Search\n\n Output\n ----------\n As many plots as the parameters that were tuned\n \"\"\"\n # Results from grid search\n results = res.cv_results_\n means_test = results['mean_test_score']\n stds_test = results['std_test_score']\n # Getting indexes of values per hyper-parameter\n masks = []\n masks_names = list(res.best_params_.keys())\n for p_k, p_v in res.best_params_.items():\n masks.append(list(results['param_' + p_k].data == p_v))\n try:\n # Grid Search\n params = res.param_grid\n # Ploting results\n fig, ax = plt.subplots(1, len(params), sharex='none',\n sharey='all', figsize=(20, 5))\n fig.suptitle('Score per parameter')\n fig.text(0.04, 0.5, 'MEAN SCORE', va='center', rotation='vertical')\n pram_preformace_in_best = {}\n for i, p in enumerate(masks_names):\n m = np.stack(masks[:i] + masks[i+1:])\n pram_preformace_in_best\n best_parms_mask = m.all(axis=0)\n best_index = np.where(best_parms_mask)[0]\n x = np.array(params[p])\n y_1 = np.array(means_test[best_index])\n e_1 = np.array(stds_test[best_index])\n ax[i].errorbar(x, y_1, e_1, linestyle='--',\n marker='o', label='test')\n ax[i].set_xlabel(p.upper())\n except Exception as e:\n # Randomized Seach\n print(\"Cannot generate plots because of \", type(e), \"trying again...\")\n try:\n params = res.param_distributions\n # Ploting results\n fig, ax = plt.subplots(1, len(params), sharex='none',\n sharey='all', figsize=(20, 5))\n fig.suptitle('Score per parameter')\n fig.text(0.04, 0.5, 'MEAN SCORE', va='center', rotation='vertical')\n\n for i, p in enumerate(masks_names):\n results = pd.DataFrame(res.cv_results_)\n select_names = masks_names[:i] + masks_names[i+1:]\n for j in select_names:\n best_value = res.best_params_[j]\n results = results[results['param_'+j] == best_value]\n\n x = np.array(results['param_'+p])\n y_1 = np.array(results['mean_test_score'])\n e_1 = np.array(results['std_test_score'])\n ax[i].errorbar(x, y_1, e_1, linestyle='--',\n marker='o', label='test')\n ax[i].set_xlabel(p.upper())\n except Exception as e:\n # Something else broke while attempting to plot\n print(\"Cannot generate plots because of \", type(e))\n return\n plt.legend()\n plt.show()\n" ]
[ [ "numpy.array", "pandas.DataFrame", "matplotlib.pyplot.legend", "numpy.where", "numpy.stack", "matplotlib.pyplot.show" ] ]
liqihao2000/shenfun
[ "2164596ccf906242779d9ec361168246ee6214d8" ]
[ "shenfun/la.py" ]
[ "r\"\"\"\nThis module contains linear algebra solvers for SparseMatrixes\n\"\"\"\nimport numpy as np\nimport scipy.sparse as scp\nfrom scipy.sparse.linalg import spsolve, splu\nfrom shenfun.optimization import optimizer\nfrom shenfun.matrixbase import SparseMatrix, extract_bc_matrices, SpectralMatrix\n\n\nclass TDMA:\n \"\"\"Tridiagonal matrix solver\n\n Parameters\n ----------\n mat : SparseMatrix\n Tridiagonal matrix with diagonals in offsets -2, 0, 2\n\n \"\"\"\n # pylint: disable=too-few-public-methods\n\n def __init__(self, mat, neumann=False):\n assert isinstance(mat, SparseMatrix)\n self.mat = mat\n self.dd = np.zeros(0)\n self.neumann = neumann\n if isinstance(mat, SpectralMatrix):\n self.neumann = mat.testfunction[0].use_fixed_gauge\n\n def init(self):\n \"\"\"Initialize and allocate solver\"\"\"\n N = self.mat.shape[0]\n symmetric = self.mat.issymmetric\n self.dd = self.mat[0]*np.ones(N)*self.mat.scale\n self.ud = self.mat[2]*np.ones(N-2)*self.mat.scale\n if self.neumann:\n self.dd[0] = 1\n self.ud[0] = 0\n self.ld = np.zeros(N-2) if symmetric else self.mat[-2]*np.ones(N-2)*self.mat.scale\n if symmetric:\n self.TDMA_SymLU(self.dd, self.ud, self.ld)\n else:\n self.TDMA_LU(self.ld, self.dd, self.ud)\n\n @staticmethod\n @optimizer\n def TDMA_LU(ld, d, ud):\n n = d.shape[0]\n for i in range(2, n):\n ld[i-2] = ld[i-2]/d[i-2]\n d[i] = d[i] - ld[i-2]*ud[i-2]\n\n @staticmethod\n @optimizer\n def TDMA_SymLU(d, ud, ld):\n n = d.shape[0]\n for i in range(2, n):\n ld[i-2] = ud[i-2]/d[i-2]\n d[i] = d[i] - ld[i-2]*ud[i-2]\n\n @staticmethod\n @optimizer\n def TDMA_SymSolve(d, a, l, x, axis=0):\n assert x.ndim == 1, \"Use optimized version for multidimensional solve\"\n n = d.shape[0]\n for i in range(2, n):\n x[i] -= l[i-2]*x[i-2]\n\n x[n-1] = x[n-1]/d[n-1]\n x[n-2] = x[n-2]/d[n-2]\n for i in range(n - 3, -1, -1):\n x[i] = (x[i] - a[i]*x[i+2])/d[i]\n\n def __call__(self, b, u=None, axis=0, **kw):\n \"\"\"Solve matrix problem self u = b\n\n Parameters\n ----------\n b : array\n Array of right hand side on entry and solution on exit unless\n u is provided.\n u : array, optional\n Output array\n axis : int, optional\n The axis over which to solve for if b and u are multidimensional\n\n Note\n ----\n If u is not provided, then b is overwritten with the solution and returned\n\n \"\"\"\n\n if u is None:\n u = b\n else:\n assert u.shape == b.shape\n N = self.dd.shape[0]\n u[:] = b[:]\n\n if not self.dd.shape[0] == self.mat.shape[0]:\n self.init()\n\n if self.neumann:\n T = self.mat.testfunction[0]\n u[T.si[0]] = 0\n if isinstance(self, SpectralMatrix):\n u[T.si[0]] = self.mat.testfunction[0].mean\n\n self.TDMA_SymSolve(self.dd, self.ud, self.ld, u, axis=axis)\n\n if hasattr(u, 'set_boundary_dofs'):\n u.set_boundary_dofs()\n\n if self.neumann:\n u[T.si[0]] = 0\n if isinstance(self.mat, SpectralMatrix):\n u[T.si[0]] = self.mat.testfunction[0].mean\n\n return u\n\nclass PDMA:\n \"\"\"Pentadiagonal matrix solver\n\n Parameters\n ----------\n mat : SparseMatrix\n Pentadiagonal matrix with diagonals in offsets\n -4, -2, 0, 2, 4\n neumann : bool, optional\n Whether matrix represents a Neumann problem, where\n the first index is known as the mean value and we\n solve for slice(1, N-3).\n If `mat` is a :class:`.SpectralMatrix`, then the\n `neumann` keyword is ignored and the information\n extracted from the matrix.\n\n \"\"\"\n\n def __init__(self, mat, neumann=False):\n assert isinstance(mat, SparseMatrix)\n self.mat = mat\n self.N = 0\n self.d0 = np.zeros(0)\n self.d1 = None\n self.d2 = None\n self.l1 = None\n self.l2 = None\n self.A = None\n self.L = None\n self.neumann = neumann\n if isinstance(mat, SpectralMatrix):\n self.neumann = mat.testfunction[0].use_fixed_gauge\n\n def init(self):\n \"\"\"Initialize and allocate solver\"\"\"\n B = self.mat\n shape = self.mat.shape[1]\n # Broadcast in case diagonal is simply a constant.\n self.d0 = np.broadcast_to(np.atleast_1d(B[0]), shape).copy()*B.scale\n self.d1 = np.broadcast_to(np.atleast_1d(B[2]), shape-2).copy()*B.scale\n self.d2 = np.broadcast_to(np.atleast_1d(B[4]), shape-4).copy()*B.scale\n if self.neumann:\n self.d0[0] = 1\n self.d1[0] = 0\n self.d2[0] = 0\n if B.issymmetric:\n self.PDMA_SymLU(self.d0, self.d1, self.d2)\n else:\n self.l1 = np.broadcast_to(np.atleast_1d(B[-2]), shape-2).copy()*B.scale\n self.l2 = np.broadcast_to(np.atleast_1d(B[-4]), shape-4).copy()*B.scale\n self.PDMA_LU(self.l2, self.l1, self.d0, self.d1, self.d2)\n\n @staticmethod\n @optimizer\n def PDMA_SymLU(d, e, f): # pragma: no cover\n \"\"\"Symmetric LU decomposition\"\"\"\n n = d.shape[0]\n m = e.shape[0]\n k = n - m\n\n for i in range(n-2*k):\n lam = e[i]/d[i]\n d[i+k] -= lam*e[i]\n e[i+k] -= lam*f[i]\n e[i] = lam\n lam = f[i]/d[i]\n d[i+2*k] -= lam*f[i]\n f[i] = lam\n\n lam = e[n-4]/d[n-4]\n d[n-2] -= lam*e[n-4]\n e[n-4] = lam\n lam = e[n-3]/d[n-3]\n d[n-1] -= lam*e[n-3]\n e[n-3] = lam\n\n @staticmethod\n @optimizer\n def PDMA_LU(a, b, d, e, f): # pragma: no cover\n \"\"\"LU decomposition\"\"\"\n n = d.shape[0]\n m = e.shape[0]\n k = n - m\n\n for i in range(n-2*k):\n lam = b[i]/d[i]\n d[i+k] -= lam*e[i]\n e[i+k] -= lam*f[i]\n b[i] = lam\n lam = a[i]/d[i]\n b[i+k] -= lam*e[i]\n d[i+2*k] -= lam*f[i]\n a[i] = lam\n\n i = n-4\n lam = b[i]/d[i]\n d[i+k] -= lam*e[i]\n b[i] = lam\n i = n-3\n lam = b[i]/d[i]\n d[i+k] -= lam*e[i]\n b[i] = lam\n\n @staticmethod\n @optimizer\n def PDMA_SymSolve(d, e, f, b, axis=0): # pragma: no cover\n \"\"\"Symmetric solve (for testing only)\"\"\"\n n = d.shape[0]\n bc = b\n\n bc[2] -= e[0]*bc[0]\n bc[3] -= e[1]*bc[1]\n for k in range(4, n):\n bc[k] -= (e[k-2]*bc[k-2] + f[k-4]*bc[k-4])\n\n bc[n-1] /= d[n-1]\n bc[n-2] /= d[n-2]\n bc[n-3] /= d[n-3]\n bc[n-3] -= e[n-3]*bc[n-1]\n bc[n-4] /= d[n-4]\n bc[n-4] -= e[n-4]*bc[n-2]\n for k in range(n-5, -1, -1):\n bc[k] /= d[k]\n bc[k] -= (e[k]*bc[k+2] + f[k]*bc[k+4])\n b[:] = bc.astype(float)\n\n @staticmethod\n @optimizer\n def PDMA_Solve(a, b, d, e, f, h, axis=0): # pragma: no cover\n \"\"\"Symmetric solve (for testing only)\"\"\"\n n = d.shape[0]\n bc = h\n\n bc[2] -= b[0]*bc[0]\n bc[3] -= b[1]*bc[1]\n for k in range(4, n):\n bc[k] -= (b[k-2]*bc[k-2] + a[k-4]*bc[k-4])\n\n bc[n-1] /= d[n-1]\n bc[n-2] /= d[n-2]\n bc[n-3] = (bc[n-3]-e[n-3]*bc[n-1])/d[n-3]\n bc[n-4] = (bc[n-4]-e[n-4]*bc[n-2])/d[n-4]\n for k in range(n-5, -1, -1):\n bc[k] = (bc[k]-e[k]*bc[k+2]-f[k]*bc[k+4])/d[k]\n\n def __call__(self, b, u=None, axis=0, **kw):\n \"\"\"Solve matrix problem self u = b\n\n Parameters\n ----------\n b : array\n Array of right hand side on entry and solution on exit unless\n u is provided.\n u : array, optional\n Output array\n axis : int, optional\n The axis over which to solve for if b and u are multidimensional\n\n Note\n ----\n If u is not provided, then b is overwritten with the solution and returned\n \"\"\"\n\n if u is None:\n u = b\n else:\n assert u.shape == b.shape\n u[:] = b[:]\n\n if not self.d0.shape[0] == self.mat[0].shape[0]:\n self.init()\n\n if self.neumann:\n T = self.mat.testfunction[0]\n u[T.si[0]] = 0\n if isinstance(self, SpectralMatrix):\n u[T.si[0]] = T.mean\n\n if self.mat.issymmetric:\n self.PDMA_SymSolve(self.d0, self.d1, self.d2, u, axis)\n else:\n self.PDMA_Solve(self.l2, self.l1, self.d0, self.d1, self.d2, u, axis)\n\n if hasattr(u, 'set_boundary_dofs'):\n u.set_boundary_dofs()\n\n return u\n\nclass Solve:\n \"\"\"Generic solver class for SparseMatrix\n\n Possibly with inhomogeneous boundary values\n\n Parameters\n ----------\n A : SparseMatrix\n test : BasisFunction\n\n \"\"\"\n def __init__(self, A, test):\n assert isinstance(A, (SparseMatrix, list))\n self.s = test.slice()\n self.bc_mats = []\n if isinstance(A, list):\n bc_mats = extract_bc_matrices([A])\n self.A = np.sum(np.array(A, dtype=object))\n self.bc_mats = bc_mats\n else:\n self.A = A\n\n self.test = test\n assert self.A.shape[0] == self.A.shape[1]\n\n def __call__(self, b, u=None, axis=0, use_lu=False):\n \"\"\"Solve matrix problem Au = b\n\n Parameters\n ----------\n b : array\n Array of right hand side on entry and solution on exit unless\n u is provided.\n u : array, optional\n Output array\n axis : int, optional\n The axis over which to solve for if b and u are multidimensional\n use_lu : bool, optional\n whether to use lu-decomposition stored previously as self._lu\n\n Note\n ----\n If u is not provided, then b is overwritten with the solution and returned\n\n \"\"\"\n if u is None:\n u = b\n else:\n assert u.shape == b.shape\n\n # Boundary conditions\n if len(self.bc_mats) > 0:\n u.set_boundary_dofs()\n w0 = np.zeros_like(u)\n for bc_mat in self.bc_mats:\n b -= bc_mat.matvec(u, w0)\n\n # Move axis to first\n if axis > 0:\n u = np.moveaxis(u, axis, 0)\n if u is not b:\n b = np.moveaxis(b, axis, 0)\n\n s = self.s\n assert self.A.shape[0] == b[s].shape[0]\n\n if self.test.use_fixed_gauge:\n A = self.A.diags('lil')\n _, zerorow = A[0].nonzero()\n A[(0, zerorow)] = 0\n A[0, 0] = 1\n b[0] = self.test.mean\n A = A.tocsc()\n else:\n A = self.A.diags('csc')\n\n if b.ndim == 1:\n if use_lu:\n if b.dtype.char in 'fdg' or self._lu.U.dtype.char in 'FDG':\n u[s] = self._lu.solve(b[s])\n else: # complex b and real matrix\n u.real[s] = self._lu.solve(b[s].real)\n u.imag[s] = self._lu.solve(b[s].imag)\n\n else:\n u[s] = spsolve(A, b[s])\n\n else:\n N = b[s].shape[0]\n P = np.prod(b[s].shape[1:])\n br = b[s].reshape((N, P))\n if use_lu:\n if b.dtype.char in 'fdg' or self._lu.U.dtype.char in 'FDG':\n u[s] = self._lu.solve(br).reshape(u[s].shape)\n else: # complex b and real matrix\n u.real[s] = self._lu.solve(br.real).reshape(u[s].shape)\n u.imag[s] = self._lu.solve(br.imag).reshape(u[s].shape)\n\n else:\n u[s] = spsolve(A, br).reshape(u[s].shape)\n\n if axis > 0:\n u = np.moveaxis(u, 0, axis)\n if u is not b:\n b = np.moveaxis(b, 0, axis)\n return u\n\n\nclass SolverGeneric2ND:\n \"\"\"Generic solver for problems consisting of tensorproduct matrices\n containing two non-diagonal submatrices.\n\n Parameters\n ----------\n mats : sequence\n sequence of instances of :class:`.TPMatrix`\n\n Note\n ----\n In addition to two non-diagonal matrices, the solver can also handle one\n additional diagonal matrix (one Fourier matrix).\n \"\"\"\n\n def __init__(self, tpmats):\n self.tpmats = tpmats\n self.T = tpmats[0].space\n self.M = None\n\n def matvec(self, u, c):\n c.fill(0)\n if u.ndim == 2:\n s0 = tuple(base.slice() for base in self.T)\n c[s0] = self.M.dot(u[s0].flatten()).reshape(self.T.dims())\n return c\n\n def get_diagonal_axis(self):\n naxes = self.T.get_nondiagonal_axes()\n diagonal_axis = np.setxor1d([0, 1, 2], naxes)\n assert len(diagonal_axis) == 1\n return diagonal_axis[0]\n\n def diags(self, i):\n \"\"\"Return matrix for given index `i` in diagonal direction\"\"\"\n if self.T.dimensions == 2:\n # In 2D there's just 1 matrix, store and reuse\n if self.M is not None:\n return self.M\n m = self.tpmats[0]\n A0 = m.mats[0].diags('csc')\n A1 = m.mats[1].diags('csc')\n M0 = scp.kron(A0, A1, 'csc')\n M0 *= np.atleast_1d(m.scale).item()\n for m in self.tpmats[1:]:\n A0 = m.mats[0].diags('csc')\n A1 = m.mats[1].diags('csc')\n M1 = scp.kron(A0, A1, 'csc')\n M1 *= np.atleast_1d(m.scale).item()\n M0 = M0 + M1\n # Check if we need to fix gauge. This is required if we are solving\n # a pure Neumann Poisson problem.\n z0 = M0[0].nonzero()\n if z0[1][0] > 0 :\n M0 = M0.tolil()\n zerorow = M0[0].nonzero()[1]\n M0[(0, zerorow)] = 0\n M0[0, 0] = 1\n M0 = M0.tocsc()\n self.M = M0\n return self.M\n\n else:\n # 1 matrix per Fourier coefficient\n naxes = self.T.get_nondiagonal_axes()\n m = self.tpmats[0]\n diagonal_axis = self.get_diagonal_axis()\n sc = [0, 0, 0]\n sc[diagonal_axis] = i if m.scale.shape[diagonal_axis] > 1 else 0\n A0 = m.mats[naxes[0]].diags('csc')\n A1 = m.mats[naxes[1]].diags('csc')\n M0 = scp.kron(A0, A1, 'csc')\n M0 *= m.scale[tuple(sc)]\n for m in self.tpmats[1:]:\n A0 = m.mats[naxes[0]].diags('csc')\n A1 = m.mats[naxes[1]].diags('csc')\n M1 = scp.kron(A0, A1, 'csc')\n sc[diagonal_axis] = i if m.scale.shape[diagonal_axis] > 1 else 0\n M1 *= m.scale[tuple(sc)]\n M0 = M0 + M1\n z0 = M0[0].nonzero()\n if z0[1][0] > 0 :\n M0 = M0.tolil()\n zerorow = M0[0].nonzero()[1]\n M0[(0, zerorow)] = 0\n M0[0, 0] = 1\n M0 = M0.tocsc()\n return M0\n\n def __call__(self, b, u=None):\n if u is None:\n u = b\n else:\n assert u.shape == b.shape\n if u.ndim == 2:\n s0 = self.T.slice()\n M = self.diags(0)\n u[s0] = scp.linalg.spsolve(M, b[s0].flatten()).reshape(self.T.dims())\n\n elif u.ndim == 3:\n naxes = self.T.get_nondiagonal_axes()\n diagonal_axis = self.get_diagonal_axis()\n s0 = list(self.T.slice())\n for i in range(self.T.shape(True)[diagonal_axis]):\n M0 = self.diags(i)\n s0[diagonal_axis] = i\n shape = np.take(self.T.dims(), naxes)\n u[tuple(s0)] = scp.linalg.spsolve(M0, b[tuple(s0)].flatten()).reshape(shape)\n return u\n\nclass Solver2D:\n \"\"\"Generic solver for tensorproductspaces in 2D\n\n Parameters\n ----------\n mats : sequence\n sequence of instances of :class:`.TPMatrix`\n\n Note\n ----\n If there are boundary matrices in the list of mats, then\n these matrices are used to modify the right hand side before\n solving. If this is not the desired behaviour, then use\n :func:`.extract_bc_matrices` on mats before using this class.\n\n \"\"\"\n\n def __init__(self, tpmats, fixed_gauge=None):\n bc_mats = extract_bc_matrices([tpmats])\n self.tpmats = tpmats\n self.bc_mats = bc_mats\n self.fixed_gauge = fixed_gauge\n self._lu = None\n m = tpmats[0]\n self.T = T = m.space\n ndim = T.dimensions\n assert ndim == 2\n assert np.atleast_1d(m.scale).size == 1, \"Use level = 2 with :func:`.inner`\"\n\n A0 = m.mats[0].diags('csc')\n A1 = m.mats[1].diags('csc')\n M0 = scp.kron(A0, A1, 'csc')\n M0 *= np.atleast_1d(m.scale).item()\n for m in tpmats[1:]:\n A0 = m.mats[0].diags('csc')\n A1 = m.mats[1].diags('csc')\n M1 = scp.kron(A0, A1, 'csc')\n assert np.atleast_1d(m.scale).size == 1, \"Use level = 2 with :func:`.inner`\"\n M1 *= np.atleast_1d(m.scale).item()\n M0 = M0 + M1\n\n # Check if we need to fix gauge. This is required if we are solving\n # a pure Neumann Poisson problem.\n if fixed_gauge is not None:\n # Ident first row\n z0 = M0[0].nonzero()\n if len(z0[0]) == 0 or z0[1][0] > 0:\n M0 = M0.tolil()\n zerorow = M0[0].nonzero()[1]\n M0[(0, zerorow)] = 0\n M0[0, 0] = 1\n M0 = M0.tocsc()\n\n self.M = M0\n\n def matvec(self, u, c):\n c.fill(0)\n s0 = tuple(base.slice() for base in self.T)\n c[s0] = self.M.dot(u[s0].flatten()).reshape(self.T.dims())\n return c\n\n def __call__(self, b, u=None):\n\n if u is None:\n u = b\n else:\n assert u.shape == b.shape\n\n if len(self.bc_mats) > 0:\n u.set_boundary_dofs()\n w0 = np.zeros_like(u)\n for bc_mat in self.bc_mats:\n b -= bc_mat.matvec(u, w0)\n\n s0 = tuple(base.slice() for base in self.T)\n assert b.dtype.char == u.dtype.char\n if self._lu is None:\n self._lu = scp.linalg.splu(self.M)\n if b.dtype.char in 'fdg':\n u[s0] = self._lu.solve(b[s0].flatten()).reshape(self.T.dims())\n else:\n if self.M.dtype.char in 'FDG':\n u[s0] = self._lu.solve(b[s0].flatten()).reshape(self.T.dims())\n\n else:\n u.imag[s0] = self._lu.solve(b.imag[s0].flatten()).reshape(self.T.dims())\n u.real[s0] = self._lu.solve(b.real[s0].flatten()).reshape(self.T.dims())\n\n if self.fixed_gauge:\n u[0, 0] = self.fixed_gauge\n\n return u\n\nclass Solver3D:\n \"\"\"Generic solver for tensorproductspaces in 3D\n\n Parameters\n ----------\n mats : sequence\n sequence of instances of :class:`.TPMatrix`\n\n Note\n ----\n If there are boundary matrices in the list of mats, then\n these matrices are used to modify the right hand side before\n solving. If this is not the desired behaviour, then use\n :func:`.extract_bc_matrices` on mats before using this class.\n\n \"\"\"\n\n def __init__(self, tpmats, fixed_gauge=None):\n bc_mats = extract_bc_matrices([tpmats])\n self.tpmats = tpmats\n self.bc_mats = bc_mats\n self._lu = None\n m = tpmats[0]\n self.T = T = m.space # test space\n ndim = T.dimensions\n assert ndim == 3\n assert np.atleast_1d(m.scale).size == 1, \"Use level = 2 with :func:`.inner`\"\n\n A0 = m.mats[0].diags('csc')\n A1 = m.mats[1].diags('csc')\n A2 = m.mats[2].diags('csc')\n M0 = scp.kron(scp.kron(A0, A1, 'csc'), A2, 'csc')\n M0 *= np.atleast_1d(m.scale).item()\n for m in tpmats[1:]:\n A0 = m.mats[0].diags('csc')\n A1 = m.mats[1].diags('csc')\n A2 = m.mats[2].diags('csc')\n M1 = scp.kron(scp.kron(A0, A1, 'csc'), A2, 'csc')\n assert np.atleast_1d(m.scale).size == 1, \"Use level = 2 with :func:`.inner`\"\n M1 *= np.atleast_1d(m.scale).item()\n M0 = M0 + M1\n\n # Check if we need to fix gauge. This is required if we are solving\n # a pure Neumann Poisson problem.\n if fixed_gauge:\n # Ident first row\n z0 = M0[0].nonzero()\n if len(z0[0]) == 0 or z0[1][0] > 0:\n M0 = M0.tolil()\n zerorow = M0[0].nonzero()[1]\n M0[(0, zerorow)] = 0\n M0[0, 0] = 1\n M0 = M0.tocsc()\n self.M = M0\n\n def matvec(self, u, c):\n c.fill(0)\n s0 = tuple(base.slice() for base in self.T)\n c[s0] = self.M.dot(u[s0].flatten()).reshape(self.T.dims())\n return c\n\n def __call__(self, b, u=None):\n\n if u is None:\n u = b\n else:\n assert u.shape == b.shape\n\n if len(self.bc_mats) > 0:\n u.set_boundary_dofs()\n w0 = np.zeros_like(u)\n for bc_mat in self.bc_mats:\n b -= bc_mat.matvec(u, w0)\n\n s0 = tuple(base.slice() for base in self.T)\n assert b.dtype.char == u.dtype.char\n if self._lu is None:\n self._lu = scp.linalg.splu(self.M)\n if b.dtype.char in 'fdg':\n u[s0] = self._lu.solve(b[s0].flatten()).reshape(self.T.dims())\n else:\n if self.M.dtype.char in 'FDG':\n u[s0] = self._lu.solve(b[s0].flatten()).reshape(self.T.dims())\n\n else:\n u.imag[s0] = self._lu.solve(b.imag[s0].flatten()).reshape(self.T.dims())\n u.real[s0] = self._lu.solve(b.real[s0].flatten()).reshape(self.T.dims())\n\n if self.fixed_gauge:\n u[0, 0, 0] = self.fixed_gauge\n\n return u\n\nclass TDMA_O:\n \"\"\"Tridiagonal matrix solver\n\n Parameters\n ----------\n mat : SparseMatrix\n Symmetric tridiagonal matrix with diagonals in offsets -1, 0, 1\n\n \"\"\"\n # pylint: disable=too-few-public-methods\n\n def __init__(self, mat):\n assert isinstance(mat, SparseMatrix)\n self.mat = mat\n self.N = 0\n self.dd = np.zeros(0)\n self.ud = None\n self.L = None\n\n def init(self):\n \"\"\"Initialize and allocate solver\"\"\"\n M = self.mat.shape[0]\n B = self.mat\n self.dd = B[0]*np.ones(M)\n self.ud = B[1]*np.ones(M-1)\n self.L = np.zeros(M-1)\n self.TDMA_O_SymLU(self.dd, self.ud, self.L)\n\n @staticmethod\n @optimizer\n def TDMA_O_SymLU(d, ud, ld):\n n = d.shape[0]\n for i in range(1, n):\n ld[i-1] = ud[i-1]/d[i-1]\n d[i] = d[i] - ld[i-1]*ud[i-1]\n\n @staticmethod\n @optimizer\n def TDMA_O_SymSolve(d, a, l, x, axis=0):\n assert x.ndim == 1, \"Use optimized version for multidimensional solve\"\n n = d.shape[0]\n for i in range(1, n):\n x[i] -= l[i-1]*x[i-1]\n\n x[n-1] = x[n-1]/d[n-1]\n for i in range(n-2, -1, -1):\n x[i] = (x[i] - a[i]*x[i+1])/d[i]\n\n def __call__(self, b, u=None, axis=0):\n \"\"\"Solve matrix problem self u = b\n\n Parameters\n ----------\n b : array\n Array of right hand side on entry and solution on exit unless\n u is provided.\n u : array, optional\n Output array\n axis : int, optional\n The axis over which to solve for if b and u are multidimensional\n\n Note\n ----\n If u is not provided, then b is overwritten with the solution and returned\n\n \"\"\"\n\n if u is None:\n u = b\n else:\n assert u.shape == b.shape\n u[:] = b[:]\n\n if not self.dd.shape[0] == self.mat.shape[0]:\n self.init()\n\n self.TDMA_O_SymSolve(self.dd, self.ud, self.L, u, axis=axis)\n\n u /= self.mat.scale\n return u\n\n\nclass SolverGeneric1ND:\n \"\"\"Generic solver for tensorproduct matrices consisting of\n non-diagonal matrices along only one axis.\n\n Parameters\n ----------\n mats : sequence\n sequence of instances of :class:`.TPMatrix`\n\n Note\n ----\n In addition to the one non-diagonal direction, the solver can also handle\n up to two diagonal (Fourier) directions.\n Also note that if there are boundary matrices in the list of mats, then\n these matrices are used to modify the right hand side before\n solving. If this is not the desired behaviour, then use\n :func:`.extract_bc_matrices` on mats before using this class.\n\n \"\"\"\n\n def __init__(self, mats):\n assert isinstance(mats, list)\n naxes = set()\n for tpmat in mats:\n if not tpmat._issimplified:\n tpmat.simplify_diagonal_matrices()\n naxes.update(tpmat.naxes)\n assert len(naxes) == 1\n self.naxes = naxes.pop()\n bc_mats = extract_bc_matrices([mats])\n self.mats = mats\n self.bc_mats = bc_mats\n # For time-dependent solver, store all generated matrices and reuse\n # This takes a lot of memory, so for now it's only implemented for 2D\n self.MM = None\n\n @staticmethod\n def apply_constraint(A, b, i, constraint):\n assert isinstance(constraint, tuple)\n assert len(constraint) == 4\n\n if constraint is None:\n return A, b\n\n if not i == constraint[0]:\n return A, b\n\n row = constraint[1]\n col = constraint[2]\n val = constraint[3]\n b[row] = val\n r = A.getrow(row).nonzero()\n A[(row, r[1])] = 0\n A[row, col] = 1\n return A, b\n\n def matvec(self, u, c):\n c.fill(0)\n w0 = np.zeros_like(u)\n for mat in self.mats:\n c += mat.matvec(u, w0)\n\n if len(self.bc_mats) > 0:\n u.set_boundary_dofs()\n for bc_mat in self.bc_mats:\n c += bc_mat.matvec(u, w0)\n return c\n\n def __call__(self, b, u=None, constraints=()):\n \"\"\"Solve problem with one non-diagonal direction\n\n Parameters\n ----------\n b : array, right hand side\n u : array, solution\n constraints : tuple of 4-tuples\n Each 4-tuple is a constraint, with each item representing\n - 0 : The diagonal index, or indices for 3D\n - 1 : row\n - 2 : column\n - 3 : value\n Matrix row is zeroed and then indented by setting A[row, col] = 0\n\n \"\"\"\n if u is None:\n u = b\n else:\n assert u.shape == b.shape\n m = self.mats[0]\n\n if len(self.bc_mats) > 0:\n u.set_boundary_dofs()\n w0 = np.zeros_like(u)\n for bc_mat in self.bc_mats:\n b -= bc_mat.matvec(u, w0)\n\n if u.ndim == 2:\n\n if self.naxes == 0:\n # non-diagonal in axis=0\n\n if self.MM is None:\n self.MM = []\n for i in range(b.shape[1]):\n MM = None\n for mat in self.mats:\n sc = mat.scale[0, i] if mat.scale.shape[1] > 1 else mat.scale[0, 0]\n if MM:\n MM += mat.mats[0]*sc\n else:\n MM = mat.mats[0]*sc\n sl = m.space.bases[0].slice()\n #u[sl, i] = MM.solve(b[sl, i], u[sl, i])\n Mc = MM.diags('csc')\n for constraint in constraints:\n Mc, b = self.apply_constraint(Mc, b, i, constraint)\n try:\n MM._lu = splu(Mc)\n u[sl, i] = MM.solve(b[sl, i], u[sl, i], use_lu=True)\n except:\n print('Singular matrix for j=', i)\n u[sl, i] = 0\n self.MM.append(MM)\n\n else:\n for i in range(b.shape[1]):\n sl = m.space.bases[0].slice()\n u[sl, i] = self.MM[i].solve(b[sl, i], u[sl, i], use_lu=True)\n\n else:\n if self.MM is None:\n # non-diagonal in axis=1\n self.MM = []\n for i in range(b.shape[0]):\n MM = None\n for mat in self.mats:\n sc = mat.scale[i, 0] if mat.scale.shape[0] > 1 else mat.scale[0, 0]\n if MM:\n MM += mat.mats[1]*sc\n else:\n MM = mat.mats[1]*sc\n sl = m.space.bases[1].slice()\n Mc = MM.diags('csc')\n for constraint in constraints:\n Mc, b = self.apply_constraint(Mc, b, i, constraint)\n MM._lu = splu(Mc)\n MM.solve(b[i, sl], u[i, sl], use_lu=True)\n self.MM.append(MM)\n\n else:\n for i in range(b.shape[0]):\n sl = m.space.bases[1].slice()\n u[i, sl] = self.MM[i].solve(b[i, sl], u[i, sl], use_lu=True)\n\n elif u.ndim == 3:\n if self.naxes == 0:\n # non-diagonal in axis=0\n for i in range(b.shape[1]):\n for j in range(b.shape[2]):\n MM = None\n for mat in self.mats:\n sc = np.broadcast_to(mat.scale, u.shape)[0, i, j]\n if MM:\n MM += mat.mats[0]*sc\n else:\n MM = mat.mats[0]*sc\n Mc = MM.diags('csc')\n for constraint in constraints:\n Mc, b = self.apply_constraint(Mc, b, (i, j), constraint)\n MM._lu = splu(Mc)\n sl = mat.space.bases[0].slice()\n u[sl, i, j] = MM.solve(b[sl, i, j], u[sl, i, j], use_lu=True)\n\n elif self.naxes == 1:\n # non-diagonal in axis=1\n for i in range(b.shape[0]):\n for j in range(b.shape[2]):\n MM = None\n for mat in self.mats:\n sc = np.broadcast_to(mat.scale, u.shape)[i, 0, j]\n if MM:\n MM += mat.mats[1]*sc\n else:\n MM = mat.mats[1]*sc\n Mc = MM.diags('csc')\n for constraint in constraints:\n Mc, b = self.apply_constraint(Mc, b, (i, j), constraint)\n MM._lu = splu(Mc)\n sl = mat.space.bases[1].slice()\n u[i, sl, j] = MM.solve(b[i, sl, j], u[i, sl, j], use_lu=True)\n\n elif self.naxes == 2:\n # non-diagonal in axis=2\n for i in range(b.shape[0]):\n for j in range(b.shape[1]):\n MM = None\n for mat in self.mats:\n sc = np.broadcast_to(mat.scale, u.shape)[i, j, 0]\n if MM:\n MM += mat.mats[2]*sc\n else:\n MM = mat.mats[2]*sc\n Mc = MM.diags('csc')\n for constraint in constraints:\n Mc, b = self.apply_constraint(Mc, b, (i, j), constraint)\n MM._lu = splu(Mc)\n sl = mat.space.bases[2].slice()\n u[i, j, sl] = MM.solve(b[i, j, sl], u[i, j, sl], use_lu=True)\n\n return u\n" ]
[ [ "scipy.sparse.linalg.splu", "scipy.sparse.linalg.spsolve", "numpy.zeros_like", "numpy.setxor1d", "numpy.array", "numpy.zeros", "numpy.ones", "numpy.prod", "numpy.atleast_1d", "numpy.moveaxis", "numpy.broadcast_to", "scipy.sparse.kron" ] ]
exrich/PyTables
[ "72ac99fb1029c382530d6f70ae70e1c22265cdec" ]
[ "tables/utils.py" ]
[ "\"\"\"Utility functions.\"\"\"\n\nimport math\nimport os\nimport sys\nimport warnings\nimport weakref\nfrom pathlib import Path\nfrom time import perf_counter as clock\n\nimport numpy as np\n\nfrom .flavor import array_of_flavor\n\n# The map between byteorders in NumPy and PyTables\nbyteorders = {\n '>': 'big',\n '<': 'little',\n '=': sys.byteorder,\n '|': 'irrelevant',\n}\n\n# The type used for size values: indexes, coordinates, dimension\n# lengths, row numbers, shapes, chunk shapes, byte counts...\nSizeType = np.int64\n\n\ndef correct_byteorder(ptype, byteorder):\n \"\"\"Fix the byteorder depending on the PyTables types.\"\"\"\n\n if ptype in ['string', 'bool', 'int8', 'uint8', 'object']:\n return \"irrelevant\"\n else:\n return byteorder\n\n\ndef is_idx(index):\n \"\"\"Checks if an object can work as an index or not.\"\"\"\n\n if type(index) is int:\n return True\n elif hasattr(index, \"__index__\"):\n # Exclude the array([idx]) as working as an index. Fixes #303.\n if (hasattr(index, \"shape\") and index.shape != ()):\n return False\n try:\n index.__index__()\n if isinstance(index, bool):\n warnings.warn(\n 'using a boolean instead of an integer will result in an '\n 'error in the future', DeprecationWarning, stacklevel=2)\n return True\n except TypeError:\n return False\n elif isinstance(index, np.integer):\n return True\n # For Python 2.4 one should test 0-dim and 1-dim, 1-elem arrays as well\n elif (isinstance(index, np.ndarray) and (index.shape == ()) and\n index.dtype.str[1] == 'i'):\n return True\n\n return False\n\n\ndef idx2long(index):\n \"\"\"Convert a possible index into a long int.\"\"\"\n\n try:\n return int(index)\n except Exception:\n raise TypeError(\"not an integer type.\")\n\n\n# This is used in VLArray and EArray to produce NumPy object compliant\n# with atom from a generic python type. If copy is stated as True, it\n# is assured that it will return a copy of the object and never the same\n# object or a new one sharing the same memory.\ndef convert_to_np_atom(arr, atom, copy=False):\n \"\"\"Convert a generic object into a NumPy object compliant with atom.\"\"\"\n\n # First, convert the object into a NumPy array\n nparr = array_of_flavor(arr, 'numpy')\n # Copy of data if necessary for getting a contiguous buffer, or if\n # dtype is not the correct one.\n if atom.shape == ():\n # Scalar atom case\n nparr = np.array(nparr, dtype=atom.dtype, copy=copy)\n else:\n # Multidimensional atom case. Addresses #133.\n # We need to use this strange way to obtain a dtype compliant\n # array because NumPy doesn't honor the shape of the dtype when\n # it is multidimensional. See:\n # http://scipy.org/scipy/numpy/ticket/926\n # for details.\n # All of this is done just to taking advantage of the NumPy\n # broadcasting rules.\n newshape = nparr.shape[:-len(atom.dtype.shape)]\n nparr2 = np.empty(newshape, dtype=[('', atom.dtype)])\n nparr2['f0'][:] = nparr\n # Return a view (i.e. get rid of the record type)\n nparr = nparr2.view(atom.dtype)\n return nparr\n\n\n# The next is used in Array, EArray and VLArray, and it is a bit more\n# high level than convert_to_np_atom\ndef convert_to_np_atom2(object, atom):\n \"\"\"Convert a generic object into a NumPy object compliant with atom.\"\"\"\n\n # Check whether the object needs to be copied to make the operation\n # safe to in-place conversion.\n copy = atom.type in ['time64']\n nparr = convert_to_np_atom(object, atom, copy)\n # Finally, check the byteorder and change it if needed\n byteorder = byteorders[nparr.dtype.byteorder]\n if (byteorder in ['little', 'big'] and byteorder != sys.byteorder):\n # The byteorder needs to be fixed (a copy is made\n # so that the original array is not modified)\n nparr = nparr.byteswap()\n\n return nparr\n\n\ndef check_file_access(filename, mode='r'):\n \"\"\"Check for file access in the specified `mode`.\n\n `mode` is one of the modes supported by `File` objects. If the file\n indicated by `filename` can be accessed using that `mode`, the\n function ends successfully. Else, an ``IOError`` is raised\n explaining the reason of the failure.\n\n All this paraphernalia is used to avoid the lengthy and scaring HDF5\n messages produced when there are problems opening a file. No\n changes are ever made to the file system.\n\n \"\"\"\n\n path = Path(filename).resolve()\n\n if mode == 'r':\n # The file should be readable.\n if not os.access(path, os.F_OK):\n raise OSError(f\"``{path}`` does not exist\")\n if not path.is_file():\n raise OSError(f\"``{path}`` is not a regular file\")\n if not os.access(path, os.R_OK):\n raise OSError(f\"file ``{path}`` exists but it can not be read\")\n elif mode == 'w':\n if os.access(path, os.F_OK):\n # Since the file is not removed but replaced,\n # it must already be accessible to read and write operations.\n check_file_access(path, 'r+')\n else:\n # A new file is going to be created,\n # so the directory should be writable.\n if not os.access(path.parent, os.F_OK):\n raise OSError(f\"``{path.parent}`` does not exist\")\n if not path.parent.is_dir():\n raise OSError(f\"``{path.parent}`` is not a directory\")\n if not os.access(path.parent, os.W_OK):\n raise OSError(\n f\"directory ``{path.parent}`` exists but it can not be \"\n f\"written\"\n )\n elif mode == 'a':\n if os.access(path, os.F_OK):\n check_file_access(path, 'r+')\n else:\n check_file_access(path, 'w')\n elif mode == 'r+':\n check_file_access(path, 'r')\n if not os.access(path, os.W_OK):\n raise OSError(f\"file ``{path}`` exists but it can not be written\")\n else:\n raise ValueError(f\"invalid mode: {mode!r}\")\n\n\ndef lazyattr(fget):\n \"\"\"Create a *lazy attribute* from the result of `fget`.\n\n This function is intended to be used as a *method decorator*. It\n returns a *property* which caches the result of calling the `fget`\n instance method. The docstring of `fget` is used for the property\n itself. For instance:\n\n >>> class MyClass(object):\n ... @lazyattr\n ... def attribute(self):\n ... 'Attribute description.'\n ... print('creating value')\n ... return 10\n ...\n >>> type(MyClass.attribute)\n <class 'property'>\n >>> MyClass.attribute.__doc__\n 'Attribute description.'\n >>> obj = MyClass()\n >>> obj.__dict__\n {}\n >>> obj.attribute\n creating value\n 10\n >>> obj.__dict__\n {'attribute': 10}\n >>> obj.attribute\n 10\n >>> del obj.attribute\n Traceback (most recent call last):\n ...\n AttributeError: can't delete attribute\n\n .. warning::\n\n Please note that this decorator *changes the type of the\n decorated object* from an instance method into a property.\n\n \"\"\"\n\n name = fget.__name__\n\n def newfget(self):\n mydict = self.__dict__\n if name in mydict:\n return mydict[name]\n mydict[name] = value = fget(self)\n return value\n\n return property(newfget, None, None, fget.__doc__)\n\n\ndef show_stats(explain, tref, encoding=None):\n \"\"\"Show the used memory (only works for Linux 2.6.x).\"\"\"\n\n for line in Path('/proc/self/status').read_text().splitlines():\n if line.startswith(\"VmSize:\"):\n vmsize = int(line.split()[1])\n elif line.startswith(\"VmRSS:\"):\n vmrss = int(line.split()[1])\n elif line.startswith(\"VmData:\"):\n vmdata = int(line.split()[1])\n elif line.startswith(\"VmStk:\"):\n vmstk = int(line.split()[1])\n elif line.startswith(\"VmExe:\"):\n vmexe = int(line.split()[1])\n elif line.startswith(\"VmLib:\"):\n vmlib = int(line.split()[1])\n print(\"Memory usage: ******* %s *******\" % explain)\n print(f\"VmSize: {vmsize:>7} kB\\tVmRSS: {vmrss:>7} kB\")\n print(f\"VmData: {vmdata:>7} kB\\tVmStk: {vmstk:>7} kB\")\n print(f\"VmExe: {vmexe:>7} kB\\tVmLib: {vmlib:>7} kB\")\n tnow = clock()\n print(f\"WallClock time: {tnow - tref:.3f}\")\n return tnow\n\n\n# truncate data before calling __setitem__, to improve compression ratio\n# this function is taken verbatim from netcdf4-python\ndef quantize(data, least_significant_digit):\n \"\"\"quantize data to improve compression.\n\n Data is quantized using around(scale*data)/scale, where scale is\n 2**bits, and bits is determined from the least_significant_digit.\n\n For example, if least_significant_digit=1, bits will be 4.\n\n \"\"\"\n\n exp = -least_significant_digit\n exp = math.floor(exp) if exp < 0 else math.ceil(exp)\n bits = math.ceil(math.log2(10 ** -exp))\n scale = 2 ** bits\n datout = np.around(scale * data) / scale\n\n return datout\n\n\n# Utilities to detect leaked instances. See recipe 14.10 of the Python\n# Cookbook by Martelli & Ascher.\ntracked_classes = {}\n\n\ndef log_instance_creation(instance, name=None):\n if name is None:\n name = instance.__class__.__name__\n if name not in tracked_classes:\n tracked_classes[name] = []\n tracked_classes[name].append(weakref.ref(instance))\n\n\ndef string_to_classes(s):\n if s == '*':\n c = sorted(tracked_classes)\n return c\n else:\n return s.split()\n\n\ndef fetch_logged_instances(classes=\"*\"):\n classnames = string_to_classes(classes)\n return [(cn, len(tracked_classes[cn])) for cn in classnames]\n\n\ndef count_logged_instances(classes, file=sys.stdout):\n for classname in string_to_classes(classes):\n file.write(\"%s: %d\\n\" % (classname, len(tracked_classes[classname])))\n\n\ndef list_logged_instances(classes, file=sys.stdout):\n for classname in string_to_classes(classes):\n file.write('\\n%s:\\n' % classname)\n for ref in tracked_classes[classname]:\n obj = ref()\n if obj is not None:\n file.write(' %s\\n' % repr(obj))\n\n\ndef dump_logged_instances(classes, file=sys.stdout):\n for classname in string_to_classes(classes):\n file.write('\\n%s:\\n' % classname)\n for ref in tracked_classes[classname]:\n obj = ref()\n if obj is not None:\n file.write(' %s:\\n' % obj)\n for key, value in obj.__dict__.items():\n file.write(f' {key:>20} : {value}\\n')\n\n\n#\n# A class useful for cache usage\n#\nclass CacheDict(dict):\n \"\"\"A dictionary that prevents itself from growing too much.\"\"\"\n\n def __init__(self, maxentries):\n self.maxentries = maxentries\n super().__init__(self)\n\n def __setitem__(self, key, value):\n # Protection against growing the cache too much\n if len(self) > self.maxentries:\n # Remove a 10% of (arbitrary) elements from the cache\n entries_to_remove = self.maxentries / 10\n for k in list(self)[:entries_to_remove]:\n super().__delitem__(k)\n super().__setitem__(key, value)\n\n\nclass NailedDict:\n \"\"\"A dictionary which ignores its items when it has nails on it.\"\"\"\n\n def __init__(self, maxentries):\n self.maxentries = maxentries\n self._cache = {}\n self._nailcount = 0\n\n # Only a restricted set of dictionary methods are supported. That\n # is why we buy instead of inherit.\n\n # The following are intended to be used by ``Table`` code changing\n # the set of usable indexes.\n\n def clear(self):\n self._cache.clear()\n\n def nail(self):\n self._nailcount += 1\n\n def unnail(self):\n self._nailcount -= 1\n\n # The following are intended to be used by ``Table`` code handling\n # conditions.\n\n def __contains__(self, key):\n if self._nailcount > 0:\n return False\n return key in self._cache\n\n def __getitem__(self, key):\n if self._nailcount > 0:\n raise KeyError(key)\n return self._cache[key]\n\n def get(self, key, default=None):\n if self._nailcount > 0:\n return default\n return self._cache.get(key, default)\n\n def __setitem__(self, key, value):\n if self._nailcount > 0:\n return\n cache = self._cache\n # Protection against growing the cache too much\n if len(cache) > self.maxentries:\n # Remove a 10% of (arbitrary) elements from the cache\n entries_to_remove = max(self.maxentries // 10, 1)\n for k in list(cache)[:entries_to_remove]:\n del cache[k]\n cache[key] = value\n\n\ndef detect_number_of_cores():\n \"\"\"Detects the number of cores on a system.\n\n Cribbed from pp.\n\n \"\"\"\n\n # Linux, Unix and MacOS:\n if hasattr(os, \"sysconf\"):\n if \"SC_NPROCESSORS_ONLN\" in os.sysconf_names:\n # Linux & Unix:\n ncpus = os.sysconf(\"SC_NPROCESSORS_ONLN\")\n if isinstance(ncpus, int) and ncpus > 0:\n return ncpus\n else: # OSX:\n return int(os.popen2(\"sysctl -n hw.ncpu\")[1].read())\n # Windows:\n if \"NUMBER_OF_PROCESSORS\" in os.environ:\n ncpus = int(os.environ[\"NUMBER_OF_PROCESSORS\"])\n if ncpus > 0:\n return ncpus\n return 1 # Default\n\n\ndef _test():\n \"\"\"Run ``doctest`` on this module.\"\"\"\n\n import doctest\n doctest.testmod()\n\n\nif __name__ == '__main__':\n _test()\n" ]
[ [ "numpy.around", "numpy.array", "numpy.empty" ] ]
wangzhen263/RelationPrediction
[ "bd3ce0a071032b65f0e5df391ab4fb51f8bb7502" ]
[ "code/train.py" ]
[ "import argparse\nimport random\n\nimport tensorflow as tf\nfrom optimization.optimize import build_tensorflow\nfrom common import settings_reader, io, model_builder, optimizer_parameter_parser, evaluation, auxilliaries\nfrom model import Model\nimport numpy as np\n\nparser = argparse.ArgumentParser(description=\"Train a model on a given dataset.\")\nparser.add_argument(\"--settings\", help=\"Filepath for settings file.\", required=True)\nparser.add_argument(\"--dataset\", help=\"Filepath for dataset.\", required=True)\nargs = parser.parse_args()\n\nsettings = settings_reader.read(args.settings)\nprint(settings)\n\n\n\n'''\nLoad datasets:\n'''\n\ndataset = args.dataset\n\nrelations_path = dataset + '/relations.dict'\nentities_path = dataset + '/entities.dict'\ntrain_path = dataset + '/train.txt'\nvalid_path = dataset + '/valid.txt'\ntest_path = dataset + '/test.txt'\n\n#Extend paths for accuracy evaluation:\nif settings['Evaluation']['Metric'] == 'Accuracy':\n valid_path = dataset + '/valid_accuracy.txt'\n test_path = dataset + '/test_accuracy.txt'\n\ntrain_triplets = io.read_triplets_as_list(train_path, entities_path, relations_path)\n\nvalid_triplets = io.read_triplets_as_list(valid_path, entities_path, relations_path)\ntest_triplets = io.read_triplets_as_list(test_path, entities_path, relations_path)\n\n\ntrain_triplets = np.array(train_triplets)\nvalid_triplets = np.array(valid_triplets)\ntest_triplets = np.array(test_triplets)\n\nentities = io.read_dictionary(entities_path)\nrelations = io.read_dictionary(relations_path)\n\n'''\nshuffled_rels = np.arange(len(relations))\nnp.random.shuffle(shuffled_rels)\n\nknown_rels = shuffled_rels[:int(len(relations)/2)]\ntarget_rels = shuffled_rels[int(len(relations)/2):]\n\nknown_train = train_triplets[np.where(np.in1d(train_triplets[:,1], known_rels))]\ntarget_train = train_triplets[np.where(np.in1d(train_triplets[:,1], target_rels))]\nknown_valid = valid_triplets[np.where(np.in1d(valid_triplets[:,1], known_rels))]\ntarget_valid = valid_triplets[np.where(np.in1d(valid_triplets[:,1], target_rels))]\nknown_test = test_triplets[np.where(np.in1d(test_triplets[:,1], known_rels))]\ntarget_test = test_triplets[np.where(np.in1d(test_triplets[:,1], target_rels))]\n'''\n\n'''\nLoad general settings\n'''\n\nencoder_settings = settings['Encoder']\ndecoder_settings = settings['Decoder']\nshared_settings = settings['Shared']\ngeneral_settings = settings['General']\noptimizer_settings = settings['Optimizer']\nevaluation_settings = settings['Evaluation']\n\ngeneral_settings.put('EntityCount', len(entities))\ngeneral_settings.put('RelationCount', len(relations))\ngeneral_settings.put('EdgeCount', len(train_triplets))\n\nencoder_settings.merge(shared_settings)\nencoder_settings.merge(general_settings)\ndecoder_settings.merge(shared_settings)\ndecoder_settings.merge(general_settings)\n\noptimizer_settings.merge(general_settings)\nevaluation_settings.merge(general_settings)\n\nprint(\"# Setup settings\")\n\n\n'''\nConstruct the encoder-decoder pair:\n'''\nencoder = model_builder.build_encoder(encoder_settings, train_triplets)\nmodel = model_builder.build_decoder(encoder, decoder_settings)\nprint(\"# Setup model\")\n\n'''\nConstruct the optimizer with validation MRR as early stopping metric:\n'''\n\nopp = optimizer_parameter_parser.Parser(optimizer_settings)\nopp.set_save_function(model.save)\n\nscorer = evaluation.Scorer(evaluation_settings)\nscorer.register_data(train_triplets)\nscorer.register_data(valid_triplets)\nscorer.register_data(test_triplets)\nscorer.register_degrees(train_triplets)\nscorer.register_model(model)\nscorer.finalize_frequency_computation(np.concatenate((train_triplets, valid_triplets, test_triplets), axis=0))\n\ndef score_validation_data(validation_data):\n score_summary = scorer.compute_scores(validation_data, verbose=False).get_summary()\n #score_summary.dump_degrees('dumps/degrees.in', 'dumps/degrees.out')\n #score_summary.dump_frequencies('dumps/near.freq', 'dumps/target.freq')\n #score_summary.pretty_print()\n\n if evaluation_settings['Metric'] == 'MRR':\n lookup_string = score_summary.mrr_string()\n elif evaluation_settings['Metric'] == 'Accuracy':\n lookup_string = score_summary.accuracy_string()\n\n early_stopping = score_summary.results['Filtered'][lookup_string]\n\n score_summary = scorer.compute_scores(test_triplets, verbose=False).get_summary()\n score_summary.pretty_print()\n\n return early_stopping\n\n\nopp.set_early_stopping_score_function(score_validation_data)\n\nprint(len(train_triplets))\n\nadj_list = [[] for _ in entities]\nfor i,triplet in enumerate(train_triplets):\n adj_list[triplet[0]].append([i, triplet[2]])\n adj_list[triplet[2]].append([i, triplet[0]])\n\ndegrees = np.array([len(a) for a in adj_list])\nadj_list = [np.array(a) for a in adj_list]\n\n\ndef sample_TIES(triplets, n_target_vertices):\n vertex_set = set([])\n\n edge_indices = np.arange(triplets.shape[0])\n while len(vertex_set) < n_target_vertices:\n edge = triplets[np.random.choice(edge_indices)]\n new_vertices = [edge[0], edge[1]]\n vertex_set = vertex_set.union(new_vertices)\n\n sampled = [False]*triplets.shape[0]\n\n for i in edge_indices:\n edge = triplets[i]\n if edge[0] in vertex_set and edge[2] in vertex_set:\n sampled[i] = True\n\n return edge_indices[sampled]\n\n\ndef sample_edge_neighborhood(triplets, sample_size):\n\n edges = np.zeros((sample_size), dtype=np.int32)\n\n #initialize\n sample_counts = np.array([d for d in degrees])\n picked = np.array([False for _ in triplets])\n seen = np.array([False for _ in degrees])\n\n for i in range(0, sample_size):\n weights = sample_counts * seen\n\n if np.sum(weights) == 0:\n weights = np.ones_like(weights)\n weights[np.where(sample_counts == 0)] = 0\n\n probabilities = (weights) / np.sum(weights)\n chosen_vertex = np.random.choice(np.arange(degrees.shape[0]), p=probabilities)\n chosen_adj_list = adj_list[chosen_vertex]\n seen[chosen_vertex] = True\n\n chosen_edge = np.random.choice(np.arange(chosen_adj_list.shape[0]))\n chosen_edge = chosen_adj_list[chosen_edge]\n edge_number = chosen_edge[0]\n\n while picked[edge_number]:\n chosen_edge = np.random.choice(np.arange(chosen_adj_list.shape[0]))\n chosen_edge = chosen_adj_list[chosen_edge]\n edge_number = chosen_edge[0]\n\n edges[i] = edge_number\n other_vertex = chosen_edge[1]\n picked[edge_number] = True\n sample_counts[chosen_vertex] -= 1\n sample_counts[other_vertex] -= 1\n seen[other_vertex] = True\n\n return edges\n\n\nif 'NegativeSampleRate' in general_settings:\n ns = auxilliaries.NegativeSampler(int(general_settings['NegativeSampleRate']), general_settings['EntityCount'])\n ns.set_known_positives(train_triplets)\n\n def t_func(x): #horrible hack!!!\n arr = np.array(x)\n if not encoder.needs_graph():\n return ns.transform(arr)\n else:\n if 'GraphBatchSize' in general_settings:\n graph_batch_size = int(general_settings['GraphBatchSize'])\n\n '''\n n = np.zeros(100)\n for i in range(100):\n if i % 20 == 0:\n print(i)\n n[i] = sample_TIES(arr, 1000).shape[0]\n\n print(n.mean())\n print(n.std())\n exit()\n '''\n\n\n #graph_batch_ids = sample_TIES(arr, 1000) #sample_edge_neighborhood(arr, graph_batch_size)\n graph_batch_ids = sample_edge_neighborhood(arr, graph_batch_size)\n else:\n graph_batch_size = arr.shape[0]\n graph_batch_ids = np.arange(graph_batch_size)\n\n graph_batch = np.array(train_triplets)[graph_batch_ids]\n\n # Apply dropouts:\n graph_percentage = float(general_settings['GraphSplitSize'])\n split_size = int(graph_percentage * graph_batch.shape[0])\n graph_split_ids = np.random.choice(graph_batch_ids, size=split_size, replace=False)\n graph_split = np.array(train_triplets)[graph_split_ids]\n\n t = ns.transform(graph_batch)\n\n if 'StoreEdgeData' in encoder_settings and encoder_settings['StoreEdgeData'] == \"Yes\":\n return (graph_split, graph_split_ids, t[0], t[1])\n else:\n return (graph_split, t[0], t[1])\n\n opp.set_sample_transform_function(t_func)\n\n\n'''\nInitialize for training:\n'''\nprint(\"# Initialize for training\")\n# Hack for validation evaluation:\nmodel.preprocess(train_triplets)\nmodel.register_for_test(train_triplets)\n\nmodel.initialize_train()\n\noptimizer_weights = model.get_weights()\noptimizer_input = model.get_train_input_variables()\nloss = model.get_loss(mode='train') + model.get_regularization()\nprint(\"# optimizer_input\", optimizer_input)\n\n'''\nClean this up:\n'''\n\nfor add_op in model.get_additional_ops():\n opp.additional_ops.append(add_op)\n\noptimizer_parameters = opp.get_parametrization()\n\n'''\nTrain with Converge:\n'''\n\nprint(\"# Start training\")\nmodel.session = tf.Session()\noptimizer = build_tensorflow(loss, optimizer_weights, optimizer_parameters, optimizer_input)\noptimizer.set_session(model.session)\n\nprint(\"# Fitting training data\")\noptimizer.fit(train_triplets, validation_data=valid_triplets)\nprint(\"# Done\")\n#scorer.dump_all_scores(valid_triplets, 'dumps/subjects.valid', 'dumps/objects.valid')\n#scorer.dump_all_scores(test_triplets, 'dumps/subjects.test', 'dumps/objects.test')\n" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.ones_like", "numpy.random.choice", "numpy.zeros", "numpy.sum", "tensorflow.Session", "numpy.where", "numpy.arange" ] ]
vveitch/causal-network-embeddings
[ "0afe579c845370f04b0ede53831ab2acf9173764" ]
[ "src/semi_parametric_estimation/helpers.py" ]
[ "import numpy as np\nfrom scipy.special import logit\n\nimport sklearn.linear_model as lm\n\n\ndef calibrate_g(g, t):\n \"\"\"\n Improve calibation of propensity scores by fitting 1 parameter (temperature) logistic regression on heldout data\n\n :param g: raw propensity score estimates\n :param t: treatment assignments\n :return:\n \"\"\"\n\n logit_g = logit(g).reshape(-1,1)\n calibrator = lm.LogisticRegression(fit_intercept=False, C=1e6, solver='lbfgs') # no intercept or regularization\n calibrator.fit(logit_g, t)\n calibrated_g = calibrator.predict_proba(logit_g)[:,1]\n return calibrated_g\n\n\ndef truncate_by_g(attribute, g, level=0.1):\n keep_these = np.logical_and(g >= level, g <= 1.-level)\n\n return attribute[keep_these]\n\n\ndef truncate_all_by_g(q_t0, q_t1, g, t, y, truncate_level=0.05):\n \"\"\"\n Helper function to clean up nuisance parameter estimates.\n\n \"\"\"\n\n orig_g = np.copy(g)\n\n q_t0 = truncate_by_g(np.copy(q_t0), orig_g, truncate_level)\n q_t1 = truncate_by_g(np.copy(q_t1), orig_g, truncate_level)\n g = truncate_by_g(np.copy(g), orig_g, truncate_level)\n t = truncate_by_g(np.copy(t), orig_g, truncate_level)\n y = truncate_by_g(np.copy(y), orig_g, truncate_level)\n\n return q_t0, q_t1, g, t, y\n\n\ndef cross_entropy(y, p):\n return -np.mean((y*np.log(p) + (1.-y)*np.log(1.-p)))\n\n\ndef mse(x, y):\n return np.mean(np.square(x-y))\n" ]
[ [ "numpy.square", "numpy.log", "numpy.copy", "numpy.logical_and", "sklearn.linear_model.LogisticRegression", "scipy.special.logit" ] ]
ifding/self-driving-car
[ "fc8bc808d5439686f0ee24a4f0f3b1f5354df6c0" ]
[ "faster-rcnn-tutorial/detection/transformations.py" ]
[ "from functools import partial\nfrom typing import List, Callable\n\nimport albumentations as A\nimport numpy as np\nimport torch\nfrom sklearn.externals._pilutil import bytescale\nfrom torchvision.ops import nms\n\n\ndef normalize_01(inp: np.ndarray):\n \"\"\"Squash image input to the value range [0, 1] (no clipping)\"\"\"\n inp_out = (inp - np.min(inp)) / np.ptp(inp)\n return inp_out\n\n\ndef normalize(inp: np.ndarray, mean: float, std: float):\n \"\"\"Normalize based on mean and standard deviation.\"\"\"\n inp_out = (inp - mean) / std\n return inp_out\n\n\ndef re_normalize(inp: np.ndarray,\n low: int = 0,\n high: int = 255\n ):\n \"\"\"Normalize the data to a certain range. Default: [0-255]\"\"\"\n inp_out = bytescale(inp, low=low, high=high)\n return inp_out\n\n\ndef clip_bbs(inp: np.ndarray,\n bbs: np.ndarray):\n \"\"\"\n If the bounding boxes exceed one dimension, they are clipped to the dim's maximum.\n Bounding boxes are expected to be in xyxy format.\n Example: x_value=224 but x_shape=200 -> x1=199\n \"\"\"\n\n def clip(value: int, max: int):\n\n if value >= max - 1:\n value = max - 1\n elif value <= 0:\n value = 0\n\n return value\n\n output = []\n for bb in bbs:\n x1, y1, x2, y2 = tuple(bb)\n x_shape = inp.shape[1]\n y_shape = inp.shape[0]\n\n x1 = clip(x1, x_shape)\n y1 = clip(y1, y_shape)\n x2 = clip(x2, x_shape)\n y2 = clip(y2, y_shape)\n\n output.append([x1, y1, x2, y2])\n\n return np.array(output)\n\n\ndef map_class_to_int(labels: List[str], mapping: dict):\n \"\"\"Maps a string to an integer.\"\"\"\n labels = np.array(labels)\n dummy = np.empty_like(labels)\n for key, value in mapping.items():\n dummy[labels == key] = value\n\n return dummy.astype(np.uint8)\n\n\ndef apply_nms(target: dict, iou_threshold):\n \"\"\"Non-maximum Suppression\"\"\"\n boxes = torch.tensor(target['boxes'])\n labels = torch.tensor(target['labels'])\n scores = torch.tensor(target['scores'])\n\n if boxes.size()[0] > 0:\n mask = nms(boxes, scores, iou_threshold=iou_threshold)\n mask = (np.array(mask),)\n\n target['boxes'] = np.asarray(boxes)[mask]\n target['labels'] = np.asarray(labels)[mask]\n target['scores'] = np.asarray(scores)[mask]\n\n return target\n\n\ndef apply_score_threshold(target: dict, score_threshold):\n \"\"\"Removes bounding box predictions with low scores.\"\"\"\n boxes = target['boxes']\n labels = target['labels']\n scores = target['scores']\n\n mask = np.where(scores > score_threshold)\n target['boxes'] = boxes[mask]\n target['labels'] = labels[mask]\n target['scores'] = scores[mask]\n\n return target\n\n\nclass Repr:\n \"\"\"Evaluatable string representation of an object\"\"\"\n\n def __repr__(self): return f'{self.__class__.__name__}: {self.__dict__}'\n\n\nclass FunctionWrapperSingle(Repr):\n \"\"\"A function wrapper that returns a partial for input only.\"\"\"\n\n def __init__(self, function: Callable, *args, **kwargs):\n self.function = partial(function, *args, **kwargs)\n\n def __call__(self, inp: np.ndarray): return self.function(inp)\n\n\nclass FunctionWrapperDouble(Repr):\n \"\"\"A function wrapper that returns a partial for an input-target pair.\"\"\"\n\n def __init__(self, function: Callable, input: bool = True, target: bool = False, *args, **kwargs):\n self.function = partial(function, *args, **kwargs)\n self.input = input\n self.target = target\n\n def __call__(self, inp: np.ndarray, tar: dict):\n if self.input: inp = self.function(inp)\n if self.target: tar = self.function(tar)\n return inp, tar\n\n\nclass Compose:\n \"\"\"Baseclass - composes several transforms together.\"\"\"\n\n def __init__(self, transforms: List[Callable]):\n self.transforms = transforms\n\n def __repr__(self): return str([transform for transform in self.transforms])\n\n\nclass ComposeDouble(Compose):\n \"\"\"Composes transforms for input-target pairs.\"\"\"\n\n def __call__(self, inp: np.ndarray, target: dict):\n for t in self.transforms:\n inp, target = t(inp, target)\n return inp, target\n\n\nclass ComposeSingle(Compose):\n \"\"\"Composes transforms for input only.\"\"\"\n\n def __call__(self, inp: np.ndarray):\n for t in self.transforms:\n inp = t(inp)\n return inp\n\n\nclass AlbumentationWrapper(Repr):\n \"\"\"\n A wrapper for the albumentation package.\n Bounding boxes are expected to be in xyxy format (pascal_voc).\n Bounding boxes cannot be larger than the spatial image's dimensions.\n Use Clip() if your bounding boxes are outside of the image, before using this wrapper.\n \"\"\"\n\n def __init__(self, albumentation: Callable, format: str = 'pascal_voc'):\n self.albumentation = albumentation\n self.format = format\n\n def __call__(self, inp: np.ndarray, tar: dict):\n # input, target\n transform = A.Compose([\n self.albumentation\n ], bbox_params=A.BboxParams(format=self.format, label_fields=['class_labels']))\n\n out_dict = transform(image=inp, bboxes=tar['boxes'], class_labels=tar['labels'])\n\n input_out = np.array(out_dict['image'])\n boxes = np.array(out_dict['bboxes'])\n labels = np.array(out_dict['class_labels'])\n\n tar['boxes'] = boxes\n tar['labels'] = labels\n\n return input_out, tar\n\n\nclass Clip(Repr):\n \"\"\"\n If the bounding boxes exceed one dimension, they are clipped to the dim's maximum.\n Bounding boxes are expected to be in xyxy format.\n Example: x_value=224 but x_shape=200 -> x1=199\n \"\"\"\n\n def __call__(self, inp: np.ndarray, tar: dict):\n new_boxes = clip_bbs(inp=inp, bbs=tar['boxes'])\n tar['boxes'] = new_boxes\n\n return inp, tar\n" ]
[ [ "numpy.array", "numpy.asarray", "numpy.min", "numpy.where", "torch.tensor", "sklearn.externals._pilutil.bytescale", "numpy.ptp", "numpy.empty_like" ] ]
sebastiantiesmeyer/deeplabchop3d
[ "ee2dc65ef558a8c7ee052b3078c717ab8422fd5e" ]
[ "train_chop.py" ]
[ "import torch\nfrom torch.autograd import Variable\n#import time\nimport os\n#import sys\nimport h5py\nfrom utils import Logger\nimport pickle\nimport generator\n\nfrom train import train_epoch\nfrom validation import val_epoch\nfrom utils import AverageMeter, calculate_accuracy\nfrom generator import Generator\nimport numpy as np\nfrom torch import nn\nfrom torch import optim\nfrom torch.optim import lr_scheduler\nfrom modules import create_model\n\ndata_path =('/home/sebastian/code/3D-ResNets-PyTorch/deeplabcut3d/projects/test1-sebastian-2018-09-30/data/')\ntrain_test_split=0.9\nn_epochs=100\nbatch_size = 1\nlearning_rate = 0.01\nmomentum = 0.9\nweight_decay = 0.0001\nnesterov = True\n\nfile = h5py.File(os.path.join(data_path,'ExtractedFrames.h5'),'r')\nn_frames = file['X'].shape[0]\nshuffle = np.random.permutation(np.arange(n_frames))\ntrain_test_split = int(shuffle.shape[0]*train_test_split) \n\n#train_loader = Generator(file,batch_size=batch_size, idcs=shuffle[0:train_test_split])#,n_frames = 5)\n#val_loader = Generator(file,batch_size=batch_size, idcs=shuffle[train_test_split:])#,n_frames = 5)\n\ntrain_logger = Logger(\nos.path.join(data_path, 'train.log'),\n['epoch', 'loss', 'acc', 'lr'])\n\ntrain_batch_logger = Logger(\nos.path.join(data_path, 'train_batch.log'),\n['epoch', 'batch', 'iter', 'loss', 'acc', 'lr'])\n\nval_logger = Logger(\nos.path.join(data_path, 'test.log'),\n['epoch', 'loss', 'acc', 'lr'])\n\n\n#res2d_pruned = nn.Sequential(*list(resnet18.children())[2:-4])\n#res3d_pruned = nn.Sequential(*(list(model.children())[:5]))#+(list(inter1.children()))))#+list(resnet18.children())[2:-4]))\n#inter1 = Inter_Layers()\n\n\n#print(resnet_hybrid(torch.rand([1,3,5,100,100])).shape)\n#torch.save(resnet_hybrid,'/home/sebastian/code/deeplabchop-3d/deeplabcut3d/deeplabchop/resnet_hybrid.pth',pickle_protocol=pickle.HIGHEST_PROTOCOL)\n\ndata_path='/home/sebastian/Desktop/'\nbatch_size=3\npath_val = None\nt_size = 10\n\npath_X = os.path.join(data_path,'ExtractedFrames.h5')\npath_Y = os.path.join(data_path,'labels.pkl')\n\ndata = h5py.File(path_X,'r')\n \nX = data['X']\n\nwith open(path_Y, 'rb') as handler:\n Y = pickle.load(handler)\n\nn_categories = len(Y[0].keys())\n\nresnet_hybrid = create_model(5)\n\ncriterion = nn.MSELoss()\n# criterion = criterion.cuda()\noptimizer = optim.SGD(\n resnet_hybrid.parameters(),\n lr=learning_rate,\n momentum=momentum,\n weight_decay=weight_decay,\n nesterov=nesterov)\nscheduler = lr_scheduler.ReduceLROnPlateau(\n optimizer, 'min', patience=20)\n\nopt = type('', (), {})() #create empty object\nopt.arch = 'resnet-101'\nopt.result_path = data_path\nopt.no_cuda=True\nopt.checkpoint=1\ntrain_generator = generator.Generator(X, Y, batch_size, n_frames = 5, size_x = 400, size_y = 400)\nresnet_hybrid.cuda()\n\nprint('run')\nfor i in range(0,100):# n_epochs + 1):\n x,y = train_generator.__getitem__()\n \n y_pred = resnet_hybrid(x)\n\n # Compute and print loss\n loss = criterion(y_pred, y)\n print(i, loss.item())\n\n # Zero gradients, perform a backward pass, and update the weights.\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n\n" ]
[ [ "torch.optim.lr_scheduler.ReduceLROnPlateau", "numpy.arange", "torch.nn.MSELoss" ] ]
Jay-9912/AlignShift
[ "db0032ae7f10ec4acfe7593b9a985a8f8fd3c44e" ]
[ "deeplesion/dataset/DeepLesionDataset_align.py" ]
[ "import numpy as np\nimport os\nimport csv\nimport cv2\nimport logging \nfrom pycocotools import mask as mutils\nfrom mmcv import Config\nimport torch\nimport os\nfrom mmdet.datasets.registry import DATASETS\nimport pickle\nfrom mmdet.datasets.pipelines import Compose\nfrom mmdet.datasets.custom import CustomDataset\n\[email protected]_module\nclass DeepLesionDatasetAlign(CustomDataset):\n\n CLASSES = ('lesion')\n def __init__(self, \n ann_file, \n pipeline,\n pre_pipeline,\n dicm2png_cfg,\n data_root=None, \n image_path='',\n seg_prefix=None,\n proposal_file=None,\n test_mode=False):\n self.data_path = data_root\n self.classes = ['__background__', 'lesion']\n self.num_classes = len(self.classes)\n self.load_annotations(ann_file)\n self.img_ids = [a['filename'] for a in self.ann]\n self.cat_ids = self.classes\n # self.image_fn_list, self.lesion_idx_grouped = self.load_split_index()\n # self.num_images = len(self.image_fn_list)\n self.cfg = Config(dicm2png_cfg)\n self.pipeline = Compose(pipeline)\n self.pre_pipeline = Compose(pre_pipeline)\n self.img_path = image_path\n self.seg_prefix = seg_prefix\n self.proposals = None\n if proposal_file is not None:\n self.proposals = None\n self.slice_num = self.cfg.NUM_SLICES\n self.is_train = not test_mode\n \n if self.is_train:\n self._set_group_flag()\n\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n\n Returns:\n tuple: (image, target, info).\n \"\"\"\n # print(index)\n ann = self.ann[index]\n image_fn = ann['filename']\n boxes = np.array(ann['ann']['bboxes'], dtype=np.float32)\n masks= np.array([mutils.decode(m) for m in ann['ann']['masks']], dtype=np.float32).transpose((1,2,0))\n # masks = masks.sum(0)>0\n slice_intv = ann['ann']['slice_intv']\n spacing = ann['ann']['spacing']\n label = ann['ann']['labels']\n # recists = ann['ann']['recists']\n # diameters = ann['ann']['diameters']\n # window = ann['ann']['window']\n gender = float(ann['ann']['gender'])\n age = float(ann['ann']['age'])\n z_coord = float(ann['ann']['z_coord'])\n\n im, im_scale = load_prep_img(self.img_path, image_fn, spacing, slice_intv,\n self.cfg, num_slice=self.slice_num, is_train=self.is_train)\n\n # im -= self.cfg.PIXEL_MEAN\n boxes = self.clip_to_image(boxes, im, False)\n\n masks = masks.transpose((2, 0, 1))\n boxes = boxes.astype(np.float32)\n infos = dict(gender=gender,\n age=age,\n z_coord=z_coord)\n results = dict(additional_fts=infos)#img_info=ann, ann_info=infos\n\n results['filename'] = image_fn\n # results['flage'] = flage\n results['img'] = im\n results['img_shape'] = im.shape\n results['ori_shape'] = im.shape#[ann['height'], ann['width']]\n if self.proposals is not None:\n results['proposals'] = self.proposals[index]\n\n results['bbox_fields'] = []\n results['mask_fields'] = []\n results['gt_bboxes'] = boxes\n results['bbox_fields'].append('gt_bboxes')\n results['gt_labels'] = label.astype(np.int64)\n results['gt_masks'] = masks\n results['mask_fields'].append('gt_masks')\n results['thickness'] = slice_intv\n results = self.pre_pipeline(results)\n # results['gt_masks'] = masks_scaled\n # results['mask_fields'].append('gt_masks')\n \n return self.pipeline(results)\n\n\n def __len__(self):\n return len(self.ann)\n # return 160 #for debug\n def clip_to_image(self, bbox, img, remove_empty=True):\n TO_REMOVE = 1\n bbox[:, 0] = bbox[:, 0].clip(min=0, max=img.shape[1] - TO_REMOVE)\n bbox[:, 1] = bbox[:, 1].clip(min=0, max=img.shape[0] - TO_REMOVE)\n bbox[:, 2] = bbox[:, 2].clip(min=0, max=img.shape[1] - TO_REMOVE)\n bbox[:, 3] = bbox[:, 3].clip(min=0, max=img.shape[0] - TO_REMOVE)\n if remove_empty:\n box = bbox\n keep = (box[:, 3] > box[:, 1]) & (box[:, 2] > box[:, 0])\n return bbox[keep]\n return bbox\n\n def _set_group_flag(self):\n \"\"\"Set flag according to image aspect ratio.\n\n Images with aspect ratio greater than 1 will be set as group 1,\n otherwise group 0.\n \"\"\"\n self.flag = np.zeros(len(self), dtype=np.uint8)\n if not self.cfg.GROUNP_ZSAPACING: return\n for i in range(len(self)):\n img_info = self.ann[i]\n if img_info['ann']['slice_intv'] < 2.0:\n self.flag[i] = 1\n logging.info(f'slice_intv grounped by 2.0: {sum(self.flag)}/{len(self)-sum(self.flag)}')\n\n def load_annotations(self, ann_file):\n \"\"\"load annotations and meta-info from DL_info.csv\"\"\"\n with open(ann_file,'rb') as f:\n self.ann = pickle.load(f)\n \n\n\ndef load_prep_img(data_dir, imname, spacing, slice_intv, cfg, num_slice=3, is_train=False):\n \"\"\"load volume, windowing, interpolate multiple slices, clip black border, resize according to spacing\"\"\"\n im = load_multislice_img_16bit_png(data_dir, imname, slice_intv, num_slice, norm_slice_intv=cfg.SLICE_INTV)\n\n im = windowing(im, cfg.WINDOWING)\n im_shape = im.shape[0:2]\n im_scale = 1.0\n\n return im, im_scale\n\ndef load_multislice_img_16bit_png(data_dir, imname, slice_intv, num_slice, norm_slice_intv):\n data_cache = {}\n def _load_data_from_png(imname, delta=0):\n imname1 = get_slice_name(data_dir, imname, delta)\n if imname1 not in data_cache.keys():\n data_cache[imname1] = cv2.imread(os.path.join(data_dir, imname1), -1)\n assert data_cache[imname1] is not None, 'file reading error: ' + imname1\n # if data_cache[imname1] is None:\n # print('file reading error:', imname1)\n return data_cache[imname1]\n\n _load_data = _load_data_from_png\n im_cur = _load_data(imname)\n\n\n if norm_slice_intv == 0 or np.isnan(slice_intv) or slice_intv < 0:\n ims = [im_cur] * num_slice # only use the central slice\n\n else:\n ims = [im_cur]\n # find neighboring slices of im_cure\n rel_pos = float(norm_slice_intv) / slice_intv\n a = rel_pos - np.floor(rel_pos)\n b = np.ceil(rel_pos) - rel_pos\n if rel_pos <= 1.0:#slice_intv bigger than norm_slice_intv, let alien_conv to interplot \n for p in range(int((num_slice-1)/2)):\n im_prev = _load_data(imname, - (p + 1))\n im_next = _load_data(imname, (p + 1))\n ims = [im_prev] + ims + [im_next]\n elif a == 0: # required SLICE_INTV is a divisible to the actual slice_intv, don't need interpolation\n for p in range(int((num_slice-1)/2)):\n im_prev = _load_data(imname, - rel_pos * (p + 1))\n im_next = _load_data(imname, rel_pos * (p + 1))\n ims = [im_prev] + ims + [im_next]\n else:\n for p in range(int((num_slice-1)/2)):\n intv1 = rel_pos*(p+1)\n slice1 = _load_data(imname, - np.ceil(intv1))\n slice2 = _load_data(imname, - np.floor(intv1))\n im_prev = a * slice1 + b * slice2 # linear interpolation\n\n slice1 = _load_data(imname, np.ceil(intv1))\n slice2 = _load_data(imname, np.floor(intv1))\n im_next = a * slice1 + b * slice2\n\n ims = [im_prev] + ims + [im_next]\n\n\n ims = [im.astype(float) for im in ims]\n im = cv2.merge(ims)\n im = im.astype(np.float32,\n copy=False) - 32768 # there is an offset in the 16-bit png files, intensity - 32768 = Hounsfield unit\n\n\n return im\n\n\ndef get_slice_name(data_dir, imname, delta=0):\n \"\"\"Infer slice name with an offset\"\"\"\n if delta == 0:\n return imname\n delta = int(delta)\n dirname, slicename = imname.split(os.sep)\n slice_idx = int(slicename[:-4])\n imname1 = '%s%s%03d.png' % (dirname, os.sep, slice_idx + delta)\n\n # if the slice is not in the dataset, use its neighboring slice\n while not os.path.exists(os.path.join(data_dir, imname1)):\n # print('file not found:', imname1)\n delta -= np.sign(delta)\n imname1 = '%s%s%03d.png' % (dirname, os.sep, slice_idx + delta)\n if delta == 0:\n break\n\n return imname1\n\n\ndef windowing(im, win):\n \"\"\"scale intensity from win[0]~win[1] to float numbers in 0~255\"\"\"\n im1 = im.astype(float)\n im1 -= win[0]\n im1 /= win[1] - win[0]\n im1[im1 > 1] = 1\n im1[im1 < 0] = 0\n im1 *= 255\n im1 -= 50\n return im1\n\n\ndef windowing_rev(im, win):\n \"\"\"backward windowing\"\"\"\n im1 = im.astype(float)#/255\n im1 *= win[1] - win[0]\n im1 += win[0]\n return im1\n\n\n\ndef get_range(mask, margin=0):\n \"\"\"Get up, down, left, right extreme coordinates of a binary mask\"\"\"\n idx = np.nonzero(mask)\n u = max(0, idx[0].min() - margin)\n d = min(mask.shape[0] - 1, idx[0].max() + margin)\n l = max(0, idx[1].min() - margin)\n r = min(mask.shape[1] - 1, idx[1].max() + margin)\n return [u, d, l, r]\n\n\ndef map_box_back(boxes, cx=0, cy=0, im_scale=1.):\n \"\"\"Reverse the scaling and offset of boxes\"\"\"\n boxes /= im_scale\n boxes[:, [0,2]] += cx\n boxes[:, [1,3]] += cy\n return boxes\n" ]
[ [ "numpy.array", "numpy.isnan", "numpy.ceil", "numpy.nonzero", "numpy.sign", "numpy.floor" ] ]
vishalbelsare/emmental
[ "040ff13752a8443485abe5f664d7e7df2f30f894" ]
[ "tests/schedulers/test_mixed_scheduler.py" ]
[ "\"\"\"Emmental mixed scheduler unit tests.\"\"\"\nimport logging\n\nimport numpy as np\nimport torch\n\nfrom emmental import EmmentalDataLoader, EmmentalDataset, init\nfrom emmental.schedulers.mixed_scheduler import MixedScheduler\n\nlogger = logging.getLogger(__name__)\n\n\ndef test_mixed_scheduler(caplog):\n \"\"\"Unit test of mixed scheduler.\"\"\"\n caplog.set_level(logging.INFO)\n\n init()\n\n task1 = \"task1\"\n x1 = np.random.rand(20, 2)\n y1 = torch.from_numpy(np.random.rand(20))\n\n task2 = \"task2\"\n x2 = np.random.rand(30, 3)\n y2 = torch.from_numpy(np.random.rand(30))\n\n dataloaders = [\n EmmentalDataLoader(\n task_to_label_dict={task_name: \"label\"},\n dataset=EmmentalDataset(\n name=task_name, X_dict={\"feature\": x}, Y_dict={\"label\": y}\n ),\n split=\"train\",\n batch_size=10,\n shuffle=True,\n )\n for task_name, x, y in [(task1, x1, y1), (task2, x2, y2)]\n ]\n\n scheduler = MixedScheduler()\n\n assert scheduler.get_num_batches(dataloaders) == 2\n\n batch_data_names_1 = [\n batch[0].data_name for batch in scheduler.get_batches(dataloaders)\n ]\n batch_data_names_2 = [\n batch[1].data_name for batch in scheduler.get_batches(dataloaders)\n ]\n\n assert batch_data_names_1 == [task1, task1]\n assert batch_data_names_2 == [task2, task2]\n\n scheduler = MixedScheduler(fillup=True)\n\n assert scheduler.get_num_batches(dataloaders) == 3\n\n batch_data_names_1 = [\n batch[0].data_name for batch in scheduler.get_batches(dataloaders)\n ]\n batch_data_names_2 = [\n batch[1].data_name for batch in scheduler.get_batches(dataloaders)\n ]\n\n assert batch_data_names_1 == [task1, task1, task1]\n assert batch_data_names_2 == [task2, task2, task2]\n\n\ndef test_mixed_scheduler_no_y_dict(caplog):\n \"\"\"Unit test of mixed scheduler with no y_dict.\"\"\"\n caplog.set_level(logging.INFO)\n\n init()\n\n task1 = \"task1\"\n x1 = np.random.rand(20, 2)\n\n task2 = \"task2\"\n x2 = np.random.rand(30, 3)\n\n dataloaders = [\n EmmentalDataLoader(\n task_to_label_dict={task_name: None},\n dataset=EmmentalDataset(name=task_name, X_dict={\"feature\": x}),\n split=\"train\",\n batch_size=10,\n shuffle=True,\n )\n for task_name, x in [(task1, x1), (task2, x2)]\n ]\n\n scheduler = MixedScheduler()\n\n assert scheduler.get_num_batches(dataloaders) == 2\n\n batch_y_dict_1 = [batch[0].Y_dict for batch in scheduler.get_batches(dataloaders)]\n batch_y_dict_2 = [batch[1].Y_dict for batch in scheduler.get_batches(dataloaders)]\n\n assert batch_y_dict_1 == [None] * 2\n assert batch_y_dict_2 == [None] * 2\n" ]
[ [ "numpy.random.rand" ] ]
xzx482/captcha_identify.pytorch_fork
[ "8c2ff599c6afb196dddca3d4bc477ac78c95992e" ]
[ "train.py" ]
[ "# -*- coding: UTF-8 -*-\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport datasets\nfrom models import *\n# import torch_util\nimport os, shutil\nimport argparse\nimport test\nimport torchvision\nimport settings\n\n# os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0,1\"\n\n# Hyper Parameters\nnum_epochs = 300\nbatch_size = 20\nlearning_rate = 0.001\n\n# device = torch_util.select_device()\ndevice = torch.device(\"cpu\")\n\ndef main(args):\n cnn = CNN().to(device)\n \n cnn.train()\n criterion = nn.MultiLabelSoftMarginLoss()\n optimizer = torch.optim.Adam(cnn.parameters(), lr=learning_rate)\n\n if args.resume:\n cnn.load_state_dict(torch.load(args.model_path, map_location=device))\n\n max_acc = 0\n # Train the Model\n train_dataloader = datasets.get_train_data_loader()\n for epoch in range(num_epochs):\n for i, (images, labels) in enumerate(train_dataloader):\n images = Variable(images)\n labels = Variable(labels.float())\n predict_labels = cnn(images)\n loss = criterion(predict_labels, labels)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n if (i+1) % 2 == 0:\n print(\"epoch: %03g \\t step: %03g \\t loss: %.5f \\t\\r\" % (epoch, i+1, loss.item()))\n torch.save(cnn.state_dict(), \"./weights/cnn_%03g.pt\" % epoch)\n print(\"epoch: %03g \\t step: %03g \\t loss: %.5f \\t\" % (epoch, i, loss.item()))\n torch.save(cnn.state_dict(), \"./weights/cnn_%03g.pt\" % epoch)\n acc = test.test_data(\"./weights/cnn_%03g.pt\" % epoch)\n if max_acc < acc:\n print(\"update accuracy %.5f.\" % acc)\n max_acc = acc\n shutil.copy(\"./weights/cnn_%03g.pt\" % epoch, \"./weights/cnn_best.pt\")\n else:\n print(\"do not update %.5f.\" % acc)\n \n torch.save(cnn.state_dict(), \"./weights/cnn_last.pt\")\n print(\"save last model\")\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\"load path\")\n parser.add_argument('--model-path', type=str, default=\"./weights/cnn_0.pt\")\n parser.add_argument('--resume',action='store_true')\n \n args = parser.parse_args()\n main(args)\n\n\n" ]
[ [ "torch.autograd.Variable", "torch.device", "torch.nn.MultiLabelSoftMarginLoss", "torch.load" ] ]
sharanyavenkat25/SearchEngine
[ "5c903fdc6e762c228f574a3dc2272f73f8b101d8" ]
[ "Src/pre-processing/create_corpus.py" ]
[ "import os\r\nimport pandas as pd\r\nimport re\r\nimport string\r\n\r\n\r\nimport nltk\r\nnltk.download('stopwords')\r\nnltk.download('punkt')\r\nfrom nltk.corpus import stopwords\r\nfrom nltk.tokenize import word_tokenize\r\n\r\ndirectory = '/mnt/d/SearchEngine/TelevisionNews/'\r\nop_dir='/mnt/d/SearchEngine/Corpus/'\r\n\r\n\r\ndef create_corpus():\r\n\tfor filename in os.listdir(directory):\r\n\t\tif filename.endswith(\".csv\"):\r\n\t\t\tprint(filename, type(filename))\r\n\t\t\tdf = pd.read_csv(directory+filename)\r\n\t\t\tdf_text= df['Snippet']\r\n\t\t\tdf_text.columns=['Snippet']\r\n\r\n\t\t\t# clean(df_text)\r\n\r\n\t\t\toutdir = '/mnt/d/SearchEngine/Corpus/'\r\n\t\t\tif not os.path.exists(outdir):\r\n\t\t\t\tos.mkdir(outdir)\r\n\t\t\tfullname = os.path.join(op_dir,filename)\r\n\t\t\tdf_text.to_csv(fullname)\r\n\r\ncreate_corpus()" ]
[ [ "pandas.read_csv" ] ]
irisliucy/garage
[ "2f4dfe1c7472534df4187fde33d81af13e25c769" ]
[ "tests/garage/experiment/test_meta_evaluator.py" ]
[ "import csv\nimport tempfile\n\nimport cloudpickle\nfrom dowel import CsvOutput, logger, tabular\nimport numpy as np\nimport pytest\nimport tensorflow as tf\n\nfrom garage.envs import GarageEnv, PointEnv\nfrom garage.experiment import LocalTFRunner, MetaEvaluator, SnapshotConfig\nfrom garage.experiment.deterministic import set_seed\nfrom garage.experiment.local_runner import LocalRunner\nfrom garage.experiment.task_sampler import SetTaskSampler\nfrom garage.np.algos import MetaRLAlgorithm\nfrom garage.sampler import LocalSampler\nfrom garage.tf.policies import GaussianMLPPolicy\n\n\nclass RandomPolicy:\n\n def __init__(self, action_space):\n self._action_space = action_space\n\n def reset(self):\n pass\n\n def get_action(self, observation):\n del observation\n return self._action_space.sample(), {}\n\n\nclass SingleActionPolicy:\n\n def __init__(self, action):\n self._action = action\n\n def reset(self):\n pass\n\n def get_action(self, observation):\n del observation\n return self._action, {}\n\n\nclass OptimalActionInference(MetaRLAlgorithm):\n\n sampler_cls = LocalSampler\n\n def __init__(self, env, max_path_length):\n self.env = env\n self.policy = RandomPolicy(self.env.spec.action_space)\n self.max_path_length = max_path_length\n\n def train(self, runner):\n del runner\n\n def get_exploration_policy(self):\n return self.policy\n\n def adapt_policy(self, exploration_policy, exploration_trajectories):\n best_timestep = np.argmax(exploration_trajectories.rewards)\n best_action = exploration_trajectories.actions[best_timestep]\n return SingleActionPolicy(best_action)\n\n\[email protected]\ndef test_meta_evaluator():\n set_seed(100)\n tasks = SetTaskSampler(lambda: GarageEnv(PointEnv()))\n max_path_length = 200\n with tempfile.TemporaryDirectory() as log_dir_name:\n runner = LocalRunner(\n SnapshotConfig(snapshot_dir=log_dir_name,\n snapshot_mode='last',\n snapshot_gap=1))\n env = GarageEnv(PointEnv())\n algo = OptimalActionInference(env=env, max_path_length=max_path_length)\n runner.setup(algo, env)\n meta_eval = MetaEvaluator(test_task_sampler=tasks,\n max_path_length=max_path_length,\n n_test_tasks=10)\n log_file = tempfile.NamedTemporaryFile()\n csv_output = CsvOutput(log_file.name)\n logger.add_output(csv_output)\n meta_eval.evaluate(algo)\n logger.log(tabular)\n meta_eval.evaluate(algo)\n logger.log(tabular)\n logger.dump_output_type(CsvOutput)\n logger.remove_output_type(CsvOutput)\n with open(log_file.name, 'r') as file:\n rows = list(csv.DictReader(file))\n assert len(rows) == 2\n assert float(rows[0]['MetaTest/__unnamed_task__/CompletionRate']) < 1.0\n assert float(rows[0]['MetaTest/__unnamed_task__/Iteration']) == 0\n assert (float(rows[0]['MetaTest/__unnamed_task__/MaxReturn']) >= float(\n rows[0]['MetaTest/__unnamed_task__/AverageReturn']))\n assert (float(rows[0]['MetaTest/__unnamed_task__/AverageReturn']) >=\n float(rows[0]['MetaTest/__unnamed_task__/MinReturn']))\n assert float(rows[1]['MetaTest/__unnamed_task__/Iteration']) == 1\n\n\nclass MockAlgo:\n\n sampler_cls = LocalSampler\n\n def __init__(self, env, policy, max_path_length, n_exploration_traj,\n meta_eval):\n self.env = env\n self.policy = policy\n self.max_path_length = max_path_length\n self.n_exploration_traj = n_exploration_traj\n self.meta_eval = meta_eval\n\n def train(self, runner):\n for step in runner.step_epochs():\n if step % 5 == 0:\n self.meta_eval.evaluate(self)\n\n def get_exploration_policy(self):\n return self.policy\n\n def adapt_policy(self, exploration_policy, exploration_trajectories):\n del exploration_policy\n assert (len(\n exploration_trajectories.lengths) == self.n_exploration_traj)\n\n\nclass MockTFAlgo(MockAlgo):\n\n sampler_cls = LocalSampler\n\n def __init__(self, env, policy, max_path_length, n_exploration_traj,\n meta_eval):\n super().__init__(env, policy, max_path_length, n_exploration_traj,\n meta_eval)\n self._build()\n\n def _build(self):\n input_var = tf.compat.v1.placeholder(\n tf.float32,\n shape=(None, None, self.env.observation_space.flat_dim))\n self.policy.build(input_var)\n\n def __setstate__(self, state):\n \"\"\"Parameters to restore from snapshot.\n\n Args:\n state (dict): Parameters to restore from.\n\n \"\"\"\n self.__dict__ = state\n self._build()\n\n\ndef test_pickle_meta_evaluator():\n set_seed(100)\n tasks = SetTaskSampler(lambda: GarageEnv(PointEnv()))\n max_path_length = 200\n env = GarageEnv(PointEnv())\n n_traj = 3\n with tempfile.TemporaryDirectory() as log_dir_name:\n runner = LocalRunner(\n SnapshotConfig(snapshot_dir=log_dir_name,\n snapshot_mode='last',\n snapshot_gap=1))\n meta_eval = MetaEvaluator(test_task_sampler=tasks,\n max_path_length=max_path_length,\n n_test_tasks=10,\n n_exploration_traj=n_traj)\n policy = RandomPolicy(env.spec.action_space)\n algo = MockAlgo(env, policy, max_path_length, n_traj, meta_eval)\n runner.setup(algo, env)\n log_file = tempfile.NamedTemporaryFile()\n csv_output = CsvOutput(log_file.name)\n logger.add_output(csv_output)\n meta_eval.evaluate(algo)\n meta_eval_pickle = cloudpickle.dumps(meta_eval)\n meta_eval2 = cloudpickle.loads(meta_eval_pickle)\n meta_eval2.evaluate(algo)\n\n\ndef test_meta_evaluator_with_tf():\n set_seed(100)\n tasks = SetTaskSampler(lambda: GarageEnv(PointEnv()))\n max_path_length = 200\n env = GarageEnv(PointEnv())\n n_traj = 3\n with tempfile.TemporaryDirectory() as log_dir_name:\n ctxt = SnapshotConfig(snapshot_dir=log_dir_name,\n snapshot_mode='none',\n snapshot_gap=1)\n with LocalTFRunner(ctxt) as runner:\n meta_eval = MetaEvaluator(test_task_sampler=tasks,\n max_path_length=max_path_length,\n n_test_tasks=10,\n n_exploration_traj=n_traj)\n policy = GaussianMLPPolicy(env.spec)\n algo = MockTFAlgo(env, policy, max_path_length, n_traj, meta_eval)\n runner.setup(algo, env)\n log_file = tempfile.NamedTemporaryFile()\n csv_output = CsvOutput(log_file.name)\n logger.add_output(csv_output)\n meta_eval.evaluate(algo)\n algo_pickle = cloudpickle.dumps(algo)\n tf.compat.v1.reset_default_graph()\n with LocalTFRunner(ctxt) as runner:\n algo2 = cloudpickle.loads(algo_pickle)\n runner.setup(algo2, env)\n runner.train(10, 0)\n" ]
[ [ "tensorflow.compat.v1.placeholder", "tensorflow.compat.v1.reset_default_graph", "numpy.argmax" ] ]
dbirman/cs375
[ "7aeac1ed57eff74cbecb3e1091b01f00d34629a8" ]
[ "final_project/combine_pred/combinet_builder.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\nimport sys\nimport copy\n\nsys.path.append('../normal_pred/')\nfrom normal_encoder_asymmetric_with_bypass import *\n\ndef getWhetherResBlock(i, cfg, key_want = \"encode\"):\n tmp_dict = cfg[key_want][i]\n return 'ResBlock' in tmp_dict\n\ndef getResBlockSettings(i, cfg, key_want = \"encode\"):\n tmp_dict = cfg[key_want][i]\n return tmp_dict['ResBlock']\n\ndef getWhetherBn(i, cfg, key_want = \"encode\"):\n tmp_dict = cfg[key_want][i]\n return 'bn' in tmp_dict\n\ndef getWhetherSoftmax(i, cfg, key_want = \"encode\"):\n tmp_dict = cfg[key_want][i]\n return 'softmax' in tmp_dict\n\ndef getWhetherKin(cfg, key_want = \"encode\"):\n tmp_dict = cfg[key_want]\n return 'kin_act' in tmp_dict\n\ndef getKinFrom(cfg, key_want = \"encode\"):\n tmp_dict = cfg[key_want]\n return tmp_dict['kin_act']\n\ndef getKinSplitFrom(cfg, key_want = \"encode\"):\n tmp_dict = cfg[key_want]\n return tmp_dict['split_para']\n\ndef getWhetherFdb(i, cfg, key_want = \"encode\"):\n tmp_dict = cfg[key_want][i]\n return 'fdb' in tmp_dict\n\ndef getFdbFrom(i, cfg, key_want = \"encode\"):\n tmp_dict = cfg[key_want][i]['fdb']\n return tmp_dict['from']\n\ndef getFdbType(i, cfg, key_want = \"encode\"):\n tmp_dict = cfg[key_want][i]['fdb']\n return tmp_dict['type']\n\ndef getDepConvWhetherBn(i, cfg, key_want = \"encode\"):\n val = False\n tmp_dict = cfg[key_want][i]\n if 'conv' in tmp_dict:\n val = 'bn' in tmp_dict['conv']\n return val \n\ndef getConvOutput(i, cfg, key_want = \"encode\"):\n tmp_dict = cfg[key_want][i][\"conv\"]\n return tmp_dict.get(\"output\",0)==1\n\ndef getWhetherInitFile(i, cfg, key_want = \"encode\", layer_type = \"conv\"):\n tmp_dict = cfg[key_want][i][layer_type]\n return \"init_file\" in tmp_dict\n\ndef getInitFileName(i, cfg, key_want = \"encode\", layer_type = \"conv\"):\n tmp_dict = cfg[key_want][i][layer_type]\n init_path = tmp_dict[\"init_file\"]\n if init_path[0]=='$':\n init_path = cfg[init_path[1:]]\n return init_path\n\ndef getInitFileArgs(i, cfg, key_want = \"encode\", layer_type = \"conv\"):\n tmp_dict = cfg[key_want][i][layer_type]\n init_args = tmp_dict[\"init_layer_keys\"]\n return init_args\n\ndef getVarName(cfg, key_want = \"encode\"):\n tmp_dict = cfg[key_want]\n return tmp_dict.get('var_name', key_want)\n\ndef getVarOffset(cfg, key_want = \"encode\"):\n tmp_dict = cfg[key_want]\n return tmp_dict.get('var_offset', 0)\n\ndef getFdbVarName(cfg, key_want = \"encode\"):\n tmp_dict = cfg[key_want]\n return tmp_dict.get('fdb_var_name', key_want)\n\ndef getFdbVarOffset(cfg, key_want = \"encode\"):\n tmp_dict = cfg[key_want]\n return tmp_dict.get('fdb_var_offset', 0)\n\ndef getEncodeConvBn(i, cfg, which_one = 'encode'):\n val = False\n\n if which_one in cfg and (i in cfg[which_one]):\n if 'conv' in cfg[which_one][i]:\n if 'bn' in cfg[which_one][i]['conv']:\n val = True\n\n return val\n\ndef getPoolPadding(i, cfg, which_one = 'encode'):\n val = 'SAME'\n\n if which_one in cfg and (i in cfg[which_one]):\n if 'pool' in cfg[which_one][i]:\n if 'padding' in cfg[which_one][i]['pool']:\n val = cfg[which_one][i]['pool']['padding']\n\n return val\n\ndef getConvPadding(i, cfg, which_one = 'encode'):\n val = None\n\n if which_one in cfg and (i in cfg[which_one]):\n if 'conv' in cfg[which_one][i]:\n if 'padding' in cfg[which_one][i]['conv']:\n val = cfg[which_one][i]['conv']['padding']\n\n return val\n\ndef getConvUpsample(i, cfg, which_one = 'encode'):\n val = None\n\n if which_one in cfg and (i in cfg[which_one]):\n if 'conv' in cfg[which_one][i]:\n if 'upsample' in cfg[which_one][i]['conv']:\n val = cfg[which_one][i]['conv']['upsample']\n\n return val\n\ndef normal_vgg16_forcombine(inputs, cfg_initial, train=True, seed = None, reuse_flag = None, reuse_batch = None, batch_name = '', **kwargs):\n \"\"\"The Model definition for normals\"\"\"\n\n cfg = cfg_initial\n if seed==None:\n fseed = getFilterSeed(cfg)\n else:\n fseed = seed\n\n m = NoramlNetfromConv(seed = fseed, **kwargs)\n\n encode_nodes = []\n encode_nodes.append(inputs)\n\n with tf.contrib.framework.arg_scope([m.conv], init='xavier',\n stddev=.01, bias=0, activation='relu'):\n encode_depth = getEncodeDepth(cfg)\n print('Encode depth: %d' % encode_depth)\n\n #with tf.variable_scope('encode_bn0%s' % batch_name, reuse=reuse_batch):\n # inputs = m.batchnorm_corr(train, inputs = inputs)\n\n for i in range(1, encode_depth + 1):\n with tf.variable_scope('encode%i' % i, reuse=reuse_flag):\n cfs = getEncodeConvFilterSize(i, cfg)\n nf = getEncodeConvNumFilters(i, cfg)\n cs = getEncodeConvStride(i, encode_depth, cfg)\n\n if i==1:\n new_encode_node = m.conv(nf, cfs, cs, padding='VALID', in_layer=inputs)\n else:\n new_encode_node = m.conv(nf, cfs, cs)\n\n print('Encode conv %d with size %d stride %d numfilters %d' % (i, cfs, cs, nf)) \n do_pool = getEncodeDoPool(i, cfg)\n if do_pool:\n pfs = getEncodePoolFilterSize(i, cfg)\n ps = getEncodePoolStride(i, cfg)\n pool_type = getEncodePoolType(i, cfg)\n\n if pool_type == 'max':\n pfunc = 'maxpool'\n elif pool_type == 'avg':\n pfunc = 'avgpool' \n\n new_encode_node = m.pool(pfs, ps, pfunc=pfunc)\n print('Encode %s pool %d with size %d stride %d' % (pfunc, i, pfs, ps))\n if getWhetherBn(i, cfg):\n with tf.variable_scope('encode_bn%i%s' % (i, batch_name), reuse=reuse_batch):\n new_encode_node = m.batchnorm_corr(train)\n\n encode_nodes.append(new_encode_node) \n\n decode_depth = getDecodeDepth(cfg)\n print('Decode depth: %d' % decode_depth)\n\n for i in range(1, decode_depth + 1):\n with tf.variable_scope('decode%i' % (encode_depth + i), reuse=reuse_flag):\n\n add_bypass = getDecodeBypass(i, encode_nodes, None, 0, cfg)\n\n if add_bypass != None:\n bypass_layer = encode_nodes[add_bypass]\n\n decode = m.add_bypass(bypass_layer)\n\n print('Decode bypass from %d at %d for shape' % (add_bypass, i), decode.get_shape().as_list())\n\n do_unpool = getDecodeDoUnPool(i, cfg)\n if do_unpool:\n unpool_scale = getDecodeUnPoolScale(i, cfg)\n new_encode_node = m.resize_images_scale(unpool_scale)\n\n print('Decode unpool %d with scale %d' % (i, unpool_scale))\n\n cfs = getEncodeConvFilterSize(i, cfg, which_one = 'decode')\n nf = getEncodeConvNumFilters(i, cfg, which_one = 'decode')\n cs = getEncodeConvStride(i, encode_depth, cfg, which_one = 'decode')\n\n new_encode_node = m.conv(nf, cfs, cs)\n\n print('Decode conv %d with size %d stride %d numfilters %d' % (i, cfs, cs, nf)) \n\n return m\n\n# Function for building subnetwork based on configurations\ndef build_partnet(\n inputs, \n cfg_initial, \n key_want='encode', \n train=True, \n seed=None, \n reuse_flag=None, \n reuse_batch=None, \n fdb_reuse_flag=None, \n batch_name='', \n all_out_dict={}, \n init_stddev=.01, \n ignorebname=0, \n weight_decay=None, \n init_type='xavier', \n cache_filter=0, \n dict_cache_filter={},\n fix_pretrain = 0,\n **kwargs):\n\n cfg = cfg_initial\n if seed==None:\n fseed = getFilterSeed(cfg)\n else:\n fseed = seed\n\n if ignorebname==1:\n batch_name = ''\n reuse_batch = reuse_flag\n\n #print(cfg[key_want])\n\n m = NoramlNetfromConv(seed = fseed, **kwargs)\n\n assert key_want in cfg, \"Wrong key %s for network\" % key_want\n\n valid_flag = True\n if inputs==None:\n assert 'input' in cfg[key_want], \"No inputs specified for network %s!\" % key_want\n input_node = cfg[key_want]['input']\n assert input_node in all_out_dict, \"Input nodes not built yet for network %s!\" % key_want\n inputs = all_out_dict[input_node]\n valid_flag = False\n\n if getWhetherKin(cfg_initial, key_want = key_want):\n\n # Action related for kinetics\n\n kin_act = getKinFrom(cfg, key_want = key_want)\n\n # Reshape: put the time dimension to channel directly, assume time dimension is second dimension\n if kin_act=='reshape':\n inputs = tf.transpose(inputs, perm = [0,2,3,4,1])\n curr_shape = inputs.get_shape().as_list()\n inputs = tf.reshape(inputs, [curr_shape[0], curr_shape[1], curr_shape[2], -1])\n\n # Split: split the time dimension, build shared networks for all splits\n if kin_act=='split':\n split_para = getKinSplitFrom(cfg, key_want = key_want)\n split_inputs = tf.split(inputs, num_or_size_splits = split_para, axis = 1)\n\n new_cfg = copy.deepcopy(cfg)\n new_cfg[key_want]['kin_act'] = 'reshape'\n add_out_dict = {}\n all_outs = []\n\n for split_indx, curr_split in enumerate(split_inputs):\n curr_all_out_dict = copy.copy(all_out_dict)\n curr_m, curr_all_out_dict = build_partnet(\n curr_split, new_cfg, key_want=key_want, train=train, \n seed=seed, reuse_flag=reuse_flag or (split_indx > 0), \n reuse_batch=reuse_batch or (split_indx > 0), \n fdb_reuse_flag=fdb_reuse_flag or (split_indx > 0), \n batch_name=batch_name, all_out_dict=curr_all_out_dict, \n init_stddev=init_stddev, ignorebname=ignorebname, \n weight_decay=weight_decay, cache_filter=cache_filter,\n dict_cache_filter=dict_cache_filter,\n **kwargs)\n all_outs.append(curr_m.output)\n for layer_name in curr_all_out_dict:\n if layer_name in all_out_dict:\n continue\n if not layer_name in add_out_dict:\n add_out_dict[layer_name] = []\n add_out_dict[layer_name].append(curr_all_out_dict[layer_name])\n\n for layer_name in add_out_dict:\n all_out_dict[layer_name] = tf.stack(add_out_dict[layer_name], axis = 1)\n\n curr_m.output = tf.stack(all_outs, axis = 1)\n\n return curr_m, all_out_dict\n\n # Set the input\n m.output = inputs\n\n # General network building\n with tf.contrib.framework.arg_scope([m.conv], init = init_type,\n stddev=init_stddev, bias=0, activation='relu'):\n encode_depth = getPartnetDepth(cfg, key_want = key_want)\n\n # Sometimes we want this network share parameters with different network\n # we can achieve that by setting var_name (variable name) and var_offset (layer offset for sharing)\n var_name = getVarName(cfg, key_want = key_want)\n var_offset = getVarOffset(cfg, key_want = key_want)\n\n # fdb connections may have different var_name and var_offset\n fdb_var_name = getFdbVarName(cfg, key_want = key_want)\n fdb_var_offset = getFdbVarOffset(cfg, key_want = key_want)\n\n # Build each layer, as cfg file starts from 1, we also start from 1\n for i in range(1, encode_depth + 1):\n layer_name = \"%s_%i\" % (key_want, i)\n\n with tf.variable_scope('%s%i' % (var_name, i + var_offset), reuse=reuse_flag):\n\n # Build resnet block\n if getWhetherResBlock(i, cfg, key_want = key_want):\n new_encode_node = m.resblock(\n conv_settings = getResBlockSettings(i, cfg, key_want = key_want),\n weight_decay = weight_decay, bias = 0, init = init_type, \n stddev = init_stddev, train = True, padding = 'SAME',\n )\n\n # add bypass\n add_bypass = getDecodeBypass_light(i, cfg, key_want = key_want)\n\n if add_bypass != None:\n for bypass_layer_name in add_bypass:\n if bypass_layer_name=='_coord':\n new_encode_node = m.add_coord()\n #print('Add Coord here!')\n continue\n \n assert bypass_layer_name in all_out_dict, \"Node %s not built yet for network %s!\" % (bypass_layer_name, key_want)\n bypass_layer = all_out_dict[bypass_layer_name]\n new_encode_node = m.add_bypass(bypass_layer)\n #print('Network %s bypass from %s at %s' % (key_want, bypass_layer_name, layer_name))\n\n\n # do convolution\n if getDoConv(i, cfg, which_one = key_want):\n cfs = getEncodeConvFilterSize(i, cfg, which_one = key_want)\n nf = getEncodeConvNumFilters(i, cfg, which_one = key_want)\n cs = getEncodeConvStride(i, encode_depth, cfg, which_one = key_want)\n cvBn = getEncodeConvBn(i, cfg, which_one = key_want)\n conv_padding = getConvPadding(i, cfg, which_one = key_want)\n\n trans_out_shape = None\n conv_upsample = getConvUpsample(i, cfg, which_one = key_want)\n if not conv_upsample is None:\n trans_out_shape = m.output.get_shape().as_list()\n trans_out_shape[1] = conv_upsample*trans_out_shape[1]\n trans_out_shape[2] = conv_upsample*trans_out_shape[2]\n trans_out_shape[3] = nf\n\n padding = 'SAME'\n activation = 'relu'\n bias = 0\n\n if valid_flag:\n padding = 'VALID'\n valid_flag = False\n else:\n if getConvOutput(i, cfg, key_want = key_want):\n activation = None\n bias = 0\n\n if conv_padding!=None:\n padding = conv_padding\n\n init = init_type\n init_file = None\n init_layer_keys = None\n\n trainable = None\n\n #if getWhetherInitFile(i, cfg, key_want = key_want) and (reuse_flag!=True):\n if getWhetherInitFile(i, cfg, key_want = key_want):\n init = 'from_file'\n init_file = getInitFileName(i, cfg, key_want = key_want)\n init_layer_keys = getInitFileArgs(i, cfg, key_want = key_want)\n\n # if cache_filter is 1, will load into a tensor, save it for later reuse\n if cache_filter==1:\n init = 'from_cached'\n filter_cache_str_prefix = '%s_%i' % (var_name, i + var_offset)\n weight_key = '%s/weight' % filter_cache_str_prefix\n bias_key = '%s/bias' % filter_cache_str_prefix\n if not weight_key in dict_cache_filter:\n params = np.load(init_file)\n dict_cache_filter[weight_key] = tf.constant(params[init_layer_keys['weight']], dtype = tf.float32)\n dict_cache_filter[bias_key] = tf.constant(params[init_layer_keys['bias']], dtype = tf.float32)\n \n init_layer_keys = {'weight': dict_cache_filter[weight_key], 'bias': dict_cache_filter[bias_key]}\n else:\n print('Layer conv %s init from file' % layer_name)\n\n if fix_pretrain==1:\n trainable = False\n\n if not getConvDepsep(i, cfg, which_one = key_want):\n new_encode_node = m.conv(nf, cfs, cs, activation=activation, bias=bias, padding=padding, \n weight_decay=weight_decay, init=init, init_file=init_file, whetherBn=cvBn,\n train=train, init_layer_keys=init_layer_keys, trans_out_shape=trans_out_shape,\n trainable=trainable,\n )\n else:\n with_bn = getDepConvWhetherBn(i, cfg, key_want = key_want)\n new_encode_node = m.depthsep_conv(nf, getConvDepmul(i, cfg, which_one = key_want), cfs, cs, \n dep_padding=padding, sep_padding=padding, activation = activation, bias = bias,\n with_bn = with_bn, bn_name = batch_name, reuse_batch = reuse_batch, train = train, \n weight_decay = weight_decay,\n )\n\n #print('Network %s conv %s with size %d stride %d numfilters %d' % (key_want, layer_name, cfs, cs, nf)) \n\n # do unpool\n do_unpool = getDecodeDoUnPool(i, cfg, key_want = key_want)\n if do_unpool:\n unpool_scale = getDecodeUnPoolScale(i, cfg, key_want = key_want)\n new_encode_node = m.resize_images_scale(unpool_scale)\n\n #print('Network %s unpool %s with scale %d' % (key_want, layer_name, unpool_scale))\n\n if getDoFc(i, cfg, which_one = key_want):\n\n init = 'trunc_norm'\n init_file = None\n init_layer_keys = None\n\n if getWhetherInitFile(i, cfg, key_want = key_want, layer_type = 'fc'):\n print('Layer fc %s init from file' % layer_name)\n init = 'from_file'\n init_file = getInitFileName(i, cfg, key_want = key_want, layer_type = 'fc')\n init_layer_keys = getInitFileArgs(i, cfg, key_want = key_want, layer_type = 'fc')\n\n if cache_filter==1:\n init = 'from_cached'\n filter_cache_str_prefix = '%s_%i' % (var_name, i + var_offset)\n weight_key = '%s/weight' % filter_cache_str_prefix\n bias_key = '%s/bias' % filter_cache_str_prefix\n if not weight_key in dict_cache_filter:\n params = np.load(init_file)\n dict_cache_filter[weight_key] = tf.constant(params[init_layer_keys['weight']], dtype = tf.float32)\n dict_cache_filter[bias_key] = tf.constant(params[init_layer_keys['bias']], dtype = tf.float32)\n \n init_layer_keys = {'weight': dict_cache_filter[weight_key], 'bias': dict_cache_filter[bias_key]}\n\n if getFcOutput(i, cfg, key_want = key_want):\n if init == 'trunc_norm':\n init = init_type\n new_encode_node = m.fc(getFcNumFilters(i, cfg, key_want = key_want), \n activation=None, dropout=None, bias=0, weight_decay = weight_decay,\n init = init, init_file = init_file, init_layer_keys = init_layer_keys)\n else:\n new_encode_node = m.fc(getFcNumFilters(i, cfg, key_want = key_want), \n dropout=getFcDropout(i, cfg, train, key_want = key_want), bias=.1, \n weight_decay = weight_decay,\n init = init, init_file = init_file, init_layer_keys = init_layer_keys)\n\n # do pool\n do_pool = getEncodeDoPool(i, cfg, key_want = key_want)\n if do_pool:\n pfs = getEncodePoolFilterSize(i, cfg, key_want = key_want)\n ps = getEncodePoolStride(i, cfg, key_want = key_want)\n pool_type = getEncodePoolType(i, cfg, key_want = key_want)\n pool_padding = getPoolPadding(i, cfg, which_one = key_want)\n\n if pool_type == 'max':\n pfunc = 'maxpool'\n elif pool_type == 'avg':\n pfunc = 'avgpool' \n\n new_encode_node = m.pool(pfs, ps, pfunc=pfunc, padding=pool_padding)\n #print('Network %s %s pool %s with size %d stride %d' % (key_want, pfunc, layer_name, pfs, ps))\n\n if getWhetherSoftmax(i, cfg, key_want = key_want):\n new_encode_node = m.softmax()\n\n if getWhetherBn(i, cfg, key_want = key_want):\n #with tf.variable_scope('%s_bn%i%s' % (key_want, i, batch_name), reuse=reuse_batch):\n with tf.variable_scope('%s_bn%i%s' % (var_name, i + var_offset, batch_name), reuse=reuse_batch):\n new_encode_node = m.batchnorm_corr(train)\n\n if getWhetherFdb(i, cfg, key_want = key_want):\n from_layer = getFdbFrom(i, cfg, key_want = key_want)\n assert from_layer in all_out_dict, \"Fdb nodes not built yet for network %s, layer %i!\" % (key_want, i)\n with tf.variable_scope('%s_fdb%i' % (fdb_var_name, i + fdb_var_offset), reuse=fdb_reuse_flag):\n new_encode_node = m.modulate(all_out_dict[from_layer], bias=0, init = 'trunc_norm', stddev=init_stddev,\n weight_decay = weight_decay)\n\n all_out_dict[layer_name] = new_encode_node\n\n return m, all_out_dict\n\ndef combine_normal_tfutils(inputs, center_im = False, **kwargs):\n image_scenenet = tf.cast(inputs['image_scenenet'], tf.float32)\n image_scenenet = tf.div(image_scenenet, tf.constant(255, dtype=tf.float32))\n if center_im:\n image_scenenet = tf.subtract(image_scenenet, tf.constant(0.5, dtype=tf.float32))\n\n m_scenenet = normal_vgg16_forcombine(image_scenenet, reuse_flag = None, reuse_batch = None, batch_name = '_scenenet', **kwargs)\n\n image_pbrnet = tf.cast(inputs['image_pbrnet'], tf.float32)\n image_pbrnet = tf.div(image_pbrnet, tf.constant(255, dtype=tf.float32))\n if center_im:\n image_pbrnet = tf.subtract(image_pbrnet, tf.constant(0.5, dtype=tf.float32))\n\n m_pbrnet = normal_vgg16_forcombine(image_pbrnet, reuse_flag = True, reuse_batch = None, batch_name = '_pbrnet', **kwargs)\n\n return [m_scenenet.output, m_pbrnet.output], m_pbrnet.params\n\ndef input_reshape_mult(inputs, categorymulti = 1):\n if categorymulti>1:\n if 'image_place' in inputs:\n old_shape = inputs['image_place'].get_shape().as_list()\n new_shape = [old_shape[0] * old_shape[1]] + old_shape[2:]\n inputs['image_place'] = tf.reshape(inputs['image_place'], new_shape)\n inputs['label_place'] = tf.reshape(inputs['label_place'], [-1])\n\n if 'image_imagenet' in inputs:\n old_shape = inputs['image_imagenet'].get_shape().as_list()\n new_shape = [old_shape[0] * old_shape[1]] + old_shape[2:]\n inputs['image_imagenet'] = tf.reshape(inputs['image_imagenet'], new_shape)\n inputs['label_imagenet'] = tf.reshape(inputs['label_imagenet'], [-1])\n\n return inputs\n\ndef combine_normal_tfutils_new_half(inputs, center_im = False, categorymulti = 1, cfg_dataset = {}, twonormals = 0, **kwargs):\n all_outputs = []\n encode_reuse = None\n decode_half_reuse = None\n decode_next_reuse = None\n normal_reuse = None\n depth_reuse = None\n ins_decode_reuse = None\n ret_params = None\n\n inputs = input_reshape_mult(inputs, categorymulti = categorymulti)\n\n if cfg_dataset.get('scenenet', 0)==1 and 'image_scenenet' in inputs:\n image_scenenet = tf.cast(inputs['image_scenenet'], tf.float32)\n image_scenenet = tf.div(image_scenenet, tf.constant(255, dtype=tf.float32))\n if center_im:\n image_scenenet = tf.subtract(image_scenenet, tf.constant(0.5, dtype=tf.float32))\n\n output_nodes = []\n\n all_out_dict_scenenet = {}\n m_scenenet_encode, all_out_dict_scenenet = build_partnet(image_scenenet, key_want = 'encode', reuse_flag = encode_reuse, reuse_batch = None, batch_name = '_scenenet', all_out_dict = all_out_dict_scenenet, **kwargs)\n encode_reuse = True\n\n m_scenenet_decode, all_out_dict_scenenet = build_partnet(None, key_want = 'decode_half', reuse_flag = decode_half_reuse, reuse_batch = None, batch_name = '_scenenet', all_out_dict = all_out_dict_scenenet, **kwargs)\n decode_half_reuse = True\n\n m_scenenet_decode, all_out_dict_scenenet = build_partnet(None, key_want = 'decode_next', reuse_flag = decode_next_reuse, reuse_batch = None, batch_name = '_scenenet', all_out_dict = all_out_dict_scenenet, **kwargs)\n decode_next_reuse = True\n \n if cfg_dataset.get('scene_normal', 1)==1:\n\n if twonormals==0:\n m_scenenet_normal, all_out_dict_scenenet = build_partnet(None, key_want = 'normal', reuse_flag = normal_reuse, reuse_batch = None, batch_name = '_scenenet', all_out_dict = all_out_dict_scenenet, **kwargs)\n normal_reuse = True\n else:\n m_scenenet_normal, all_out_dict_scenenet = build_partnet(None, key_want = 'normal_s', reuse_flag = None, reuse_batch = None, batch_name = '_scenenet', all_out_dict = all_out_dict_scenenet, **kwargs)\n\n output_nodes.append(m_scenenet_normal.output)\n ret_params = m_scenenet_normal.params\n\n if cfg_dataset.get('scene_depth', 1)==1:\n m_scenenet_depth, all_out_dict_scenenet = build_partnet(None, key_want = 'depth', reuse_flag = depth_reuse, reuse_batch = None, batch_name = '_scenenet', all_out_dict = all_out_dict_scenenet, **kwargs)\n depth_reuse = True\n output_nodes.append(m_scenenet_depth.output)\n ret_params = m_scenenet_depth.params\n\n if cfg_dataset.get('scene_instance', 0)==1:\n m_scenenet_ins_decode, all_out_dict_scenenet = build_partnet(None, key_want = 'ins_decode', reuse_flag = ins_decode_reuse, reuse_batch = None, batch_name = '_scenenet', all_out_dict = all_out_dict_scenenet, **kwargs)\n ins_decode_reuse = True\n m_scenenet_instance, all_out_dict_scenenet = build_partnet(None, key_want = 'scene_instance', reuse_flag = None, reuse_batch = None, batch_name = '_scenenet', all_out_dict = all_out_dict_scenenet, **kwargs)\n output_nodes.append(m_scenenet_instance.output)\n ret_params = m_scenenet_instance.params\n\n all_outputs.extend(output_nodes)\n\n if cfg_dataset.get('scannet', 0)==1 and 'image_scannet' in inputs:\n image_scannet = tf.cast(inputs['image_scannet'], tf.float32)\n image_scannet = tf.div(image_scannet, tf.constant(255, dtype=tf.float32))\n if center_im:\n image_scannet = tf.subtract(image_scannet, tf.constant(0.5, dtype=tf.float32))\n\n all_out_dict_scannet = {}\n m_scannet_encode, all_out_dict_scannet = build_partnet(image_scannet, key_want = 'encode', reuse_flag = encode_reuse, reuse_batch = None, batch_name = '_scannet', all_out_dict = all_out_dict_scannet, **kwargs)\n encode_reuse = True\n\n m_scannet_decode, all_out_dict_scannet = build_partnet(None, key_want = 'decode_half', reuse_flag = decode_half_reuse, reuse_batch = None, batch_name = '_scannet', all_out_dict = all_out_dict_scannet, **kwargs)\n decode_half_reuse = True\n\n m_scannet_decode, all_out_dict_scannet = build_partnet(None, key_want = 'decode_next', reuse_flag = decode_next_reuse, reuse_batch = None, batch_name = '_scannet', all_out_dict = all_out_dict_scannet, **kwargs)\n decode_next_reuse = True\n \n m_scannet_depth, all_out_dict_scannet = build_partnet(None, key_want = 'depth', reuse_flag = depth_reuse, reuse_batch = None, batch_name = '_scannet', all_out_dict = all_out_dict_scannet, **kwargs)\n depth_reuse = True\n\n all_outputs.extend([m_scannet_depth.output])\n ret_params = m_scannet_depth.params\n\n if cfg_dataset.get('pbrnet', 0)==1 and 'image_pbrnet' in inputs:\n image_pbrnet = tf.cast(inputs['image_pbrnet'], tf.float32)\n image_pbrnet = tf.div(image_pbrnet, tf.constant(255, dtype=tf.float32))\n if center_im:\n image_pbrnet = tf.subtract(image_pbrnet, tf.constant(0.5, dtype=tf.float32))\n\n output_nodes = []\n\n all_out_dict_pbrnet = {}\n m_pbrnet_encode, all_out_dict_pbrnet = build_partnet(image_pbrnet, key_want = 'encode', reuse_flag = encode_reuse, reuse_batch = None, batch_name = '_pbrnet', all_out_dict = all_out_dict_pbrnet, **kwargs)\n encode_reuse = True\n\n m_pbrnet_decode, all_out_dict_pbrnet = build_partnet(None, key_want = 'decode_half', reuse_flag = decode_half_reuse, reuse_batch = None, batch_name = '_pbrnet', all_out_dict = all_out_dict_pbrnet, **kwargs)\n decode_half_reuse = True\n\n m_pbrnet_decode, all_out_dict_pbrnet = build_partnet(None, key_want = 'decode_next', reuse_flag = decode_next_reuse, reuse_batch = None, batch_name = '_pbrnet', all_out_dict = all_out_dict_pbrnet, **kwargs)\n decode_next_reuse = True\n\n if cfg_dataset.get('pbr_normal', 1)==1:\n if twonormals==0:\n m_pbrnet_normal, all_out_dict_pbrnet = build_partnet(None, key_want = 'normal', reuse_flag = normal_reuse, reuse_batch = None, batch_name = '_pbrnet', all_out_dict = all_out_dict_pbrnet, **kwargs)\n normal_reuse = True\n else:\n m_pbrnet_normal, all_out_dict_pbrnet = build_partnet(None, key_want = 'normal_p', reuse_flag = None, reuse_batch = None, batch_name = '_pbrnet', all_out_dict = all_out_dict_pbrnet, **kwargs)\n\n output_nodes.append(m_pbrnet_normal.output)\n ret_params = m_pbrnet_normal.params\n \n if cfg_dataset.get('pbr_depth', 1)==1:\n m_pbrnet_depth, all_out_dict_pbrnet = build_partnet(None, key_want = 'depth', reuse_flag = depth_reuse, reuse_batch = None, batch_name = '_pbrnet', all_out_dict = all_out_dict_pbrnet, **kwargs)\n depth_reuse = True\n output_nodes.append(m_pbrnet_depth.output)\n ret_params = m_pbrnet_depth.params\n\n if cfg_dataset.get('pbr_instance', 0)==1:\n m_pbrnet_ins_decode, all_out_dict_pbrnet = build_partnet(None, key_want = 'ins_decode', reuse_flag = ins_decode_reuse, reuse_batch = None, batch_name = '_pbrnet', all_out_dict = all_out_dict_pbrnet, **kwargs)\n ins_decode_reuse = True\n m_pbrnet_instance, all_out_dict_pbrnet = build_partnet(None, key_want = 'pbr_instance', reuse_flag = None, reuse_batch = None, batch_name = '_pbrnet', all_out_dict = all_out_dict_pbrnet, **kwargs)\n output_nodes.append(m_pbrnet_instance.output)\n ret_params = m_pbrnet_instance.params\n\n all_outputs.extend(output_nodes)\n\n if cfg_dataset.get('imagenet', 0)==1 and 'image_imagenet' in inputs:\n image_imagenet = tf.cast(inputs['image_imagenet'], tf.float32)\n image_imagenet = tf.div(image_imagenet, tf.constant(255, dtype=tf.float32))\n if center_im:\n image_imagenet = tf.subtract(image_imagenet, tf.constant(0.5, dtype=tf.float32))\n\n all_out_dict_imagenet = {}\n m_imagenet_encode, all_out_dict_imagenet = build_partnet(image_imagenet, key_want = 'encode', reuse_flag = encode_reuse, reuse_batch = None, batch_name = '_imagenet', all_out_dict = all_out_dict_imagenet, **kwargs)\n encode_reuse = True\n \n m_imagenet_category, all_out_dict_imagenet = build_partnet(None, key_want = 'category', reuse_flag = None, reuse_batch = None, batch_name = '_imagenet', all_out_dict = all_out_dict_imagenet, **kwargs)\n\n all_outputs.extend([m_imagenet_category.output])\n ret_params = m_imagenet_category.params\n\n if cfg_dataset.get('coco', 0)==1 and 'image_coco' in inputs:\n\n image_coco = tf.cast(inputs['image_coco'], tf.float32)\n image_coco = tf.div(image_coco, tf.constant(255, dtype=tf.float32))\n if center_im:\n image_coco = tf.subtract(image_coco, tf.constant(0.5, dtype=tf.float32))\n\n output_nodes = []\n\n all_out_dict_coco = {}\n m_coco_encode, all_out_dict_coco = build_partnet(image_coco, key_want = 'encode', reuse_flag = encode_reuse, reuse_batch = None, batch_name = '_coco', all_out_dict = all_out_dict_coco, **kwargs)\n encode_reuse = True\n\n m_coco_decode, all_out_dict_coco = build_partnet(None, key_want = 'decode_half', reuse_flag = decode_half_reuse, reuse_batch = None, batch_name = '_coco', all_out_dict = all_out_dict_coco, **kwargs)\n decode_half_reuse = True\n\n m_coco_ins_decode, all_out_dict_coco = build_partnet(None, key_want = 'ins_decode', reuse_flag = ins_decode_reuse, reuse_batch = None, batch_name = '_coco', all_out_dict = all_out_dict_coco, **kwargs)\n ins_decode_reuse = True\n m_coco_instance, all_out_dict_coco = build_partnet(None, key_want = 'coco_instance', reuse_flag = None, reuse_batch = None, batch_name = '_coco', all_out_dict = all_out_dict_coco, **kwargs)\n output_nodes.append(m_coco_instance.output)\n ret_params = m_coco_instance.params\n\n all_outputs.extend(output_nodes)\n\n if cfg_dataset.get('place', 0)==1 and 'image_place' in inputs:\n image_place = tf.cast(inputs['image_place'], tf.float32)\n image_place = tf.div(image_place, tf.constant(255, dtype=tf.float32))\n if center_im:\n image_place = tf.subtract(image_place, tf.constant(0.5, dtype=tf.float32))\n\n all_out_dict_place = {}\n m_place_encode, all_out_dict_place = build_partnet(image_place, key_want = 'encode', reuse_flag = encode_reuse, reuse_batch = None, batch_name = '_place', all_out_dict = all_out_dict_place, **kwargs)\n encode_reuse = True\n \n m_place_category, all_out_dict_place = build_partnet(None, key_want = 'place_category', reuse_flag = None, reuse_batch = None, batch_name = '_place', all_out_dict = all_out_dict_place, **kwargs)\n\n all_outputs.extend([m_place_category.output])\n ret_params = m_place_category.params\n\n if cfg_dataset.get('nyuv2', 0)==1 and 'image_nyuv2' in inputs:\n image_nyuv2 = tf.cast(inputs['image_nyuv2'], tf.float32)\n image_nyuv2 = tf.div(image_nyuv2, tf.constant(255, dtype=tf.float32))\n if center_im:\n image_nyuv2 = tf.subtract(image_nyuv2, tf.constant(0.5, dtype=tf.float32))\n\n all_out_dict_nyuv2 = {}\n m_nyuv2_encode, all_out_dict_nyuv2 = build_partnet(image_nyuv2, key_want = 'encode', reuse_flag = encode_reuse, reuse_batch = None, batch_name = '_nyuv2', all_out_dict = all_out_dict_nyuv2, **kwargs)\n encode_reuse = True\n\n m_nyuv2_decode, all_out_dict_nyuv2 = build_partnet(None, key_want = 'decode_half', reuse_flag = decode_half_reuse, reuse_batch = None, batch_name = '_nyuv2', all_out_dict = all_out_dict_nyuv2, **kwargs)\n decode_half_reuse = True\n\n m_nyuv2_decode, all_out_dict_nyuv2 = build_partnet(None, key_want = 'decode_next', reuse_flag = decode_next_reuse, reuse_batch = None, batch_name = '_nyuv2', all_out_dict = all_out_dict_nyuv2, **kwargs)\n decode_next_reuse = True\n \n m_nyuv2_depth, all_out_dict_nyuv2 = build_partnet(None, key_want = 'depth', reuse_flag = depth_reuse, reuse_batch = None, batch_name = '_nyuv2', all_out_dict = all_out_dict_nyuv2, **kwargs)\n depth_reuse = True\n\n all_outputs.extend([m_nyuv2_depth.output])\n #ret_params = m_nyuv2_depth.params\n\n return all_outputs, ret_params\n\ndef combine_normal_tfutils_new_f2(inputs, center_im = False, categorymulti = 1, cfg_dataset = {}, twonormals = 0, **kwargs):\n all_outputs = []\n encode_reuse = None\n decode_reuse = None\n normal_reuse = None\n depth_reuse = None\n category_reuse = None\n ins_decode_reuse = None\n decode_encode_reuse = None\n ret_params = None\n\n inputs = input_reshape_mult(inputs, categorymulti = categorymulti)\n\n if cfg_dataset.get('scenenet', 0)==1 and 'image_scenenet' in inputs:\n image_scenenet = tf.cast(inputs['image_scenenet'], tf.float32)\n image_scenenet = tf.div(image_scenenet, tf.constant(255, dtype=tf.float32))\n if center_im:\n image_scenenet = tf.subtract(image_scenenet, tf.constant(0.5, dtype=tf.float32))\n\n output_nodes = []\n\n all_out_dict_scenenet = {}\n m_scenenet_encode, all_out_dict_scenenet = build_partnet(image_scenenet, key_want = 'encode', reuse_flag = encode_reuse, reuse_batch = None, batch_name = '_scenenet', all_out_dict = all_out_dict_scenenet, **kwargs)\n encode_reuse = True\n\n m_scenenet_decode, all_out_dict_scenenet = build_partnet(None, key_want = 'decode', reuse_flag = decode_reuse, reuse_batch = None, batch_name = '_scenenet', all_out_dict = all_out_dict_scenenet, **kwargs)\n decode_reuse = True\n \n if cfg_dataset.get('scene_normal', 1)==1:\n\n if twonormals==0:\n m_scenenet_normal, all_out_dict_scenenet = build_partnet(None, key_want = 'normal', reuse_flag = normal_reuse, reuse_batch = None, batch_name = '_scenenet', all_out_dict = all_out_dict_scenenet, **kwargs)\n normal_reuse = True\n else:\n m_scenenet_normal, all_out_dict_scenenet = build_partnet(None, key_want = 'normal_s', reuse_flag = None, reuse_batch = None, batch_name = '_scenenet', all_out_dict = all_out_dict_scenenet, **kwargs)\n\n output_nodes.append(m_scenenet_normal.output)\n ret_params = m_scenenet_normal.params\n\n if cfg_dataset.get('scene_depth', 1)==1:\n m_scenenet_depth, all_out_dict_scenenet = build_partnet(None, key_want = 'depth', reuse_flag = depth_reuse, reuse_batch = None, batch_name = '_scenenet', all_out_dict = all_out_dict_scenenet, **kwargs)\n depth_reuse = True\n output_nodes.append(m_scenenet_depth.output)\n ret_params = m_scenenet_depth.params\n\n if cfg_dataset.get('scene_instance', 0)==1:\n m_scenenet_ins_decode, all_out_dict_scenenet = build_partnet(None, key_want = 'ins_decode', reuse_flag = ins_decode_reuse, reuse_batch = None, batch_name = '_scenenet', all_out_dict = all_out_dict_scenenet, **kwargs)\n ins_decode_reuse = True\n m_scenenet_instance, all_out_dict_scenenet = build_partnet(None, key_want = 'scene_instance', reuse_flag = None, reuse_batch = None, batch_name = '_scenenet', all_out_dict = all_out_dict_scenenet, **kwargs)\n output_nodes.append(m_scenenet_instance.output)\n ret_params = m_scenenet_instance.params\n\n all_outputs.extend(output_nodes)\n\n if cfg_dataset.get('scannet', 0)==1 and 'image_scannet' in inputs:\n image_scannet = tf.cast(inputs['image_scannet'], tf.float32)\n image_scannet = tf.div(image_scannet, tf.constant(255, dtype=tf.float32))\n if center_im:\n image_scannet = tf.subtract(image_scannet, tf.constant(0.5, dtype=tf.float32))\n\n all_out_dict_scannet = {}\n m_scannet_encode, all_out_dict_scannet = build_partnet(image_scannet, key_want = 'encode', reuse_flag = encode_reuse, reuse_batch = None, batch_name = '_scannet', all_out_dict = all_out_dict_scannet, **kwargs)\n encode_reuse = True\n\n m_scannet_decode, all_out_dict_scannet = build_partnet(None, key_want = 'decode', reuse_flag = decode_reuse, reuse_batch = None, batch_name = '_scannet', all_out_dict = all_out_dict_scannet, **kwargs)\n decode_reuse = True\n \n m_scannet_depth, all_out_dict_scannet = build_partnet(None, key_want = 'depth', reuse_flag = depth_reuse, reuse_batch = None, batch_name = '_scannet', all_out_dict = all_out_dict_scannet, **kwargs)\n depth_reuse = True\n\n all_outputs.extend([m_scannet_depth.output])\n ret_params = m_scannet_depth.params\n\n if cfg_dataset.get('pbrnet', 0)==1 and 'image_pbrnet' in inputs:\n image_pbrnet = tf.cast(inputs['image_pbrnet'], tf.float32)\n image_pbrnet = tf.div(image_pbrnet, tf.constant(255, dtype=tf.float32))\n if center_im:\n image_pbrnet = tf.subtract(image_pbrnet, tf.constant(0.5, dtype=tf.float32))\n\n output_nodes = []\n\n all_out_dict_pbrnet = {}\n m_pbrnet_encode, all_out_dict_pbrnet = build_partnet(image_pbrnet, key_want = 'encode', reuse_flag = encode_reuse, reuse_batch = None, batch_name = '_pbrnet', all_out_dict = all_out_dict_pbrnet, **kwargs)\n encode_reuse = True\n\n m_pbrnet_decode, all_out_dict_pbrnet = build_partnet(None, key_want = 'decode', reuse_flag = decode_reuse, reuse_batch = None, batch_name = '_pbrnet', all_out_dict = all_out_dict_pbrnet, **kwargs)\n decode_reuse = True\n\n if cfg_dataset.get('pbr_normal', 1)==1:\n if twonormals==0:\n m_pbrnet_normal, all_out_dict_pbrnet = build_partnet(None, key_want = 'normal', reuse_flag = normal_reuse, reuse_batch = None, batch_name = '_pbrnet', all_out_dict = all_out_dict_pbrnet, **kwargs)\n normal_reuse = True\n else:\n m_pbrnet_normal, all_out_dict_pbrnet = build_partnet(None, key_want = 'normal_p', reuse_flag = None, reuse_batch = None, batch_name = '_pbrnet', all_out_dict = all_out_dict_pbrnet, **kwargs)\n\n output_nodes.append(m_pbrnet_normal.output)\n ret_params = m_pbrnet_normal.params\n \n if cfg_dataset.get('pbr_depth', 1)==1:\n m_pbrnet_depth, all_out_dict_pbrnet = build_partnet(None, key_want = 'depth', reuse_flag = depth_reuse, reuse_batch = None, batch_name = '_pbrnet', all_out_dict = all_out_dict_pbrnet, **kwargs)\n depth_reuse = True\n output_nodes.append(m_pbrnet_depth.output)\n ret_params = m_pbrnet_depth.params\n\n if cfg_dataset.get('pbr_instance', 0)==1:\n m_pbrnet_ins_decode, all_out_dict_pbrnet = build_partnet(None, key_want = 'ins_decode', reuse_flag = ins_decode_reuse, reuse_batch = None, batch_name = '_pbrnet', all_out_dict = all_out_dict_pbrnet, **kwargs)\n ins_decode_reuse = True\n m_pbrnet_instance, all_out_dict_pbrnet = build_partnet(None, key_want = 'pbr_instance', reuse_flag = None, reuse_batch = None, batch_name = '_pbrnet', all_out_dict = all_out_dict_pbrnet, **kwargs)\n output_nodes.append(m_pbrnet_instance.output)\n ret_params = m_pbrnet_instance.params\n\n all_outputs.extend(output_nodes)\n\n if cfg_dataset.get('imagenet', 0)==1 and 'image_imagenet' in inputs:\n image_imagenet = tf.cast(inputs['image_imagenet'], tf.float32)\n image_imagenet = tf.div(image_imagenet, tf.constant(255, dtype=tf.float32))\n if center_im:\n image_imagenet = tf.subtract(image_imagenet, tf.constant(0.5, dtype=tf.float32))\n\n all_out_dict_imagenet = {}\n m_imagenet_encode, all_out_dict_imagenet = build_partnet(image_imagenet, key_want = 'encode', reuse_flag = encode_reuse, reuse_batch = None, batch_name = '_imagenet', all_out_dict = all_out_dict_imagenet, **kwargs)\n encode_reuse = True\n\n m_imagenet_decode, all_out_dict_imagenet = build_partnet(None, key_want = 'decode', reuse_flag = decode_reuse, reuse_batch = None, batch_name = '_imagenet', all_out_dict = all_out_dict_imagenet, **kwargs)\n decode_reuse = True\n\n m_imagenet_ins_decode, all_out_dict_imagenet = build_partnet(None, key_want = 'ins_decode', reuse_flag = ins_decode_reuse, reuse_batch = None, batch_name = '_imagenet', all_out_dict = all_out_dict_imagenet, **kwargs)\n ins_decode_reuse = True\n\n m_imagenet_decode, all_out_dict_imagenet = build_partnet(None, key_want = 'decode_encode', reuse_flag = decode_encode_reuse, reuse_batch = None, batch_name = '_imagenet', all_out_dict = all_out_dict_imagenet, **kwargs)\n decode_encode_reuse = True\n \n m_imagenet_category, all_out_dict_imagenet = build_partnet(None, key_want = 'category', reuse_flag = category_reuse, reuse_batch = None, batch_name = '_imagenet', all_out_dict = all_out_dict_imagenet, **kwargs)\n category_reuse = True\n\n all_outputs.extend([m_imagenet_category.output])\n ret_params = m_imagenet_category.params\n\n if cfg_dataset.get('coco', 0)==1 and 'image_coco' in inputs:\n\n image_coco = tf.cast(inputs['image_coco'], tf.float32)\n image_coco = tf.div(image_coco, tf.constant(255, dtype=tf.float32))\n if center_im:\n image_coco = tf.subtract(image_coco, tf.constant(0.5, dtype=tf.float32))\n\n output_nodes = []\n\n all_out_dict_coco = {}\n m_coco_encode, all_out_dict_coco = build_partnet(image_coco, key_want = 'encode', reuse_flag = encode_reuse, reuse_batch = None, batch_name = '_coco', all_out_dict = all_out_dict_coco, **kwargs)\n encode_reuse = True\n\n m_coco_ins_decode, all_out_dict_coco = build_partnet(None, key_want = 'ins_decode', reuse_flag = ins_decode_reuse, reuse_batch = None, batch_name = '_coco', all_out_dict = all_out_dict_coco, **kwargs)\n ins_decode_reuse = True\n m_coco_instance, all_out_dict_coco = build_partnet(None, key_want = 'coco_instance', reuse_flag = None, reuse_batch = None, batch_name = '_coco', all_out_dict = all_out_dict_coco, **kwargs)\n output_nodes.append(m_coco_instance.output)\n ret_params = m_coco_instance.params\n\n all_outputs.extend(output_nodes)\n\n if cfg_dataset.get('place', 0)==1 and 'image_place' in inputs:\n image_place = tf.cast(inputs['image_place'], tf.float32)\n image_place = tf.div(image_place, tf.constant(255, dtype=tf.float32))\n if center_im:\n image_place = tf.subtract(image_place, tf.constant(0.5, dtype=tf.float32))\n\n all_out_dict_place = {}\n m_place_encode, all_out_dict_place = build_partnet(image_place, key_want = 'encode', reuse_flag = encode_reuse, reuse_batch = None, batch_name = '_place', all_out_dict = all_out_dict_place, **kwargs)\n encode_reuse = True\n\n m_place_decode, all_out_dict_place = build_partnet(None, key_want = 'decode', reuse_flag = decode_reuse, reuse_batch = None, batch_name = '_place', all_out_dict = all_out_dict_place, **kwargs)\n decode_reuse = True\n\n m_place_ins_decode, all_out_dict_place = build_partnet(None, key_want = 'ins_decode', reuse_flag = ins_decode_reuse, reuse_batch = None, batch_name = '_place', all_out_dict = all_out_dict_place, **kwargs)\n ins_decode_reuse = True\n\n m_place_decode, all_out_dict_place = build_partnet(None, key_want = 'decode_encode', reuse_flag = decode_encode_reuse, reuse_batch = None, batch_name = '_place', all_out_dict = all_out_dict_place, **kwargs)\n decode_encode_reuse = True\n \n m_place_category, all_out_dict_place = build_partnet(None, key_want = 'place_category', reuse_flag = None, reuse_batch = None, batch_name = '_place', all_out_dict = all_out_dict_place, **kwargs)\n\n all_outputs.extend([m_place_category.output])\n ret_params = m_place_category.params\n\n if cfg_dataset.get('nyuv2', 0)==1 and 'image_nyuv2' in inputs:\n image_nyuv2 = tf.cast(inputs['image_nyuv2'], tf.float32)\n image_nyuv2 = tf.div(image_nyuv2, tf.constant(255, dtype=tf.float32))\n if center_im:\n image_nyuv2 = tf.subtract(image_nyuv2, tf.constant(0.5, dtype=tf.float32))\n\n all_out_dict_nyuv2 = {}\n m_nyuv2_encode, all_out_dict_nyuv2 = build_partnet(image_nyuv2, key_want = 'encode', reuse_flag = encode_reuse, reuse_batch = None, batch_name = '_nyuv2', all_out_dict = all_out_dict_nyuv2, **kwargs)\n encode_reuse = True\n\n m_nyuv2_decode, all_out_dict_nyuv2 = build_partnet(None, key_want = 'decode', reuse_flag = decode_reuse, reuse_batch = None, batch_name = '_nyuv2', all_out_dict = all_out_dict_nyuv2, **kwargs)\n decode_reuse = True\n \n m_nyuv2_depth, all_out_dict_nyuv2 = build_partnet(None, key_want = 'depth', reuse_flag = depth_reuse, reuse_batch = None, batch_name = '_nyuv2', all_out_dict = all_out_dict_nyuv2, **kwargs)\n depth_reuse = True\n\n all_outputs.extend([m_nyuv2_depth.output])\n #ret_params = m_nyuv2_depth.params\n\n return all_outputs, ret_params\n\ndef build_datasetnet(\n inputs, \n cfg_initial, \n dataset_prefix, \n all_outputs=[], \n reuse_dict={}, \n center_im=False, \n cfg_dataset={}, \n no_prep=0, \n cache_filter=0, \n extra_feat=0, \n **kwargs):\n ret_params = None\n\n now_input_name = 'image_%s' % dataset_prefix\n\n if cfg_dataset.get(dataset_prefix, 0)==1 and now_input_name in inputs:\n\n image_dataset = tf.cast(inputs[now_input_name], tf.float32)\n if no_prep==0:\n image_dataset = tf.div(image_dataset, tf.constant(255, dtype=tf.float32))\n if center_im:\n image_dataset = tf.subtract(image_dataset, tf.constant(0.5, dtype=tf.float32))\n\n output_nodes = []\n all_out_dict_dataset = {}\n dict_cache_filter = {}\n\n curr_order = '%s_order' % dataset_prefix\n assert curr_order in cfg_initial\n network_order = cfg_initial.get(curr_order)\n\n if extra_feat==1 and dataset_prefix in ['imagenet', 'place']:\n # If extra_feat is 1, then depth and normal branch will be added to imagenet and place dataset as outputs\n # Remember to skip them during calculating loss and calculating rep_loss!\n\n add_branch_list = ['depth', 'normal']\n \n # Check whether needed information is there\n for curr_add_branch in add_branch_list:\n assert '%s_order' % curr_add_branch in cfg_initial, 'Model cfg should include %s branch info!' % curr_add_branch\n\n # Work on adding the branches into network order\n for curr_add_branch in add_branch_list:\n add_network_order = cfg_initial.get('%s_order' % curr_add_branch)\n for add_network in add_network_order:\n if add_network not in network_order:\n network_order.append(add_network)\n\n first_flag = True\n\n for network_name in network_order:\n if first_flag:\n input_now = image_dataset\n first_flag = False\n else:\n input_now = None\n\n var_name = getVarName(cfg_initial, key_want = network_name)\n reuse_name = '%s_reuse' % var_name\n reuse_curr = reuse_dict.get(reuse_name, None)\n\n fdb_var_name = getFdbVarName(cfg_initial, key_want = network_name)\n fdb_reuse_name = '_fdb_%s_reuse' % fdb_var_name\n fdb_reuse_curr = reuse_dict.get(fdb_reuse_name, None)\n\n m_curr, all_out_dict_dataset = build_partnet(\n input_now, cfg_initial=cfg_initial, key_want=network_name, reuse_flag=reuse_curr, \n fdb_reuse_flag=fdb_reuse_curr, reuse_batch=None, batch_name='_%s' % network_name, \n all_out_dict=all_out_dict_dataset, cache_filter=cache_filter, \n dict_cache_filter=dict_cache_filter,\n **kwargs)\n\n reuse_dict[reuse_name] = True\n reuse_dict[fdb_reuse_name] = True\n as_output = cfg_initial.get(network_name).get('as_output', 0)\n if as_output==1:\n output_nodes.append(m_curr.output)\n ret_params = m_curr.params\n\n all_outputs.extend(output_nodes)\n\n return all_outputs, reuse_dict, ret_params\n\ndef combine_tfutils_general(inputs, categorymulti = 1, **kwargs):\n\n inputs = input_reshape_mult(inputs, categorymulti = categorymulti)\n\n all_outputs = []\n reuse_dict = {}\n ret_params_final = None\n\n #dataset_prefix_list = ['scenenet', 'pbrnet', 'imagenet', 'coco', 'place']\n dataset_prefix_list = ['scenenet', 'pbrnet', 'imagenet', 'coco', 'place', 'kinetics']\n for dataset_prefix in dataset_prefix_list:\n all_outputs, reuse_dict, ret_params = build_datasetnet(inputs, all_outputs = all_outputs, reuse_dict = reuse_dict, dataset_prefix = dataset_prefix, **kwargs)\n if not ret_params is None:\n ret_params_final = ret_params\n\n all_outputs, reuse_dict, _ = build_datasetnet(inputs, all_outputs = all_outputs, reuse_dict = reuse_dict, dataset_prefix = 'nyuv2', **kwargs)\n\n return all_outputs, ret_params_final\n\ndef combine_normal_tfutils_new(inputs, center_im = False, cfg_dataset = {}, twonormals = 0, categorymulti = 1, no_prep = 0, **kwargs):\n all_outputs = []\n encode_reuse = None\n decode_reuse = None\n normal_reuse = None\n depth_reuse = None\n category_reuse = None\n ins_decode_reuse = None\n ret_params = None\n\n inputs = input_reshape_mult(inputs, categorymulti = categorymulti)\n\n if cfg_dataset.get('scenenet', 0)==1 and 'image_scenenet' in inputs:\n image_scenenet = tf.cast(inputs['image_scenenet'], tf.float32)\n image_scenenet = tf.div(image_scenenet, tf.constant(255, dtype=tf.float32))\n if center_im:\n image_scenenet = tf.subtract(image_scenenet, tf.constant(0.5, dtype=tf.float32))\n\n output_nodes = []\n\n all_out_dict_scenenet = {}\n m_scenenet_encode, all_out_dict_scenenet = build_partnet(image_scenenet, key_want = 'encode', reuse_flag = encode_reuse, reuse_batch = None, batch_name = '_scenenet', all_out_dict = all_out_dict_scenenet, **kwargs)\n encode_reuse = True\n\n m_scenenet_decode, all_out_dict_scenenet = build_partnet(None, key_want = 'decode', reuse_flag = decode_reuse, reuse_batch = None, batch_name = '_scenenet', all_out_dict = all_out_dict_scenenet, **kwargs)\n decode_reuse = True\n \n if cfg_dataset.get('scene_normal', 1)==1:\n\n if twonormals==0:\n m_scenenet_normal, all_out_dict_scenenet = build_partnet(None, key_want = 'normal', reuse_flag = normal_reuse, reuse_batch = None, batch_name = '_scenenet', all_out_dict = all_out_dict_scenenet, **kwargs)\n normal_reuse = True\n else:\n m_scenenet_normal, all_out_dict_scenenet = build_partnet(None, key_want = 'normal_s', reuse_flag = None, reuse_batch = None, batch_name = '_scenenet', all_out_dict = all_out_dict_scenenet, **kwargs)\n\n output_nodes.append(m_scenenet_normal.output)\n ret_params = m_scenenet_normal.params\n\n if cfg_dataset.get('scene_depth', 1)==1:\n m_scenenet_depth, all_out_dict_scenenet = build_partnet(None, key_want = 'depth', reuse_flag = depth_reuse, reuse_batch = None, batch_name = '_scenenet', all_out_dict = all_out_dict_scenenet, **kwargs)\n depth_reuse = True\n output_nodes.append(m_scenenet_depth.output)\n ret_params = m_scenenet_depth.params\n\n if cfg_dataset.get('scene_instance', 0)==1:\n m_scenenet_ins_decode, all_out_dict_scenenet = build_partnet(None, key_want = 'ins_decode', reuse_flag = ins_decode_reuse, reuse_batch = None, batch_name = '_scenenet', all_out_dict = all_out_dict_scenenet, **kwargs)\n ins_decode_reuse = True\n m_scenenet_instance, all_out_dict_scenenet = build_partnet(None, key_want = 'scene_instance', reuse_flag = None, reuse_batch = None, batch_name = '_scenenet', all_out_dict = all_out_dict_scenenet, **kwargs)\n output_nodes.append(m_scenenet_instance.output)\n ret_params = m_scenenet_instance.params\n\n all_outputs.extend(output_nodes)\n\n if cfg_dataset.get('scannet', 0)==1 and 'image_scannet' in inputs:\n image_scannet = tf.cast(inputs['image_scannet'], tf.float32)\n image_scannet = tf.div(image_scannet, tf.constant(255, dtype=tf.float32))\n if center_im:\n image_scannet = tf.subtract(image_scannet, tf.constant(0.5, dtype=tf.float32))\n\n all_out_dict_scannet = {}\n m_scannet_encode, all_out_dict_scannet = build_partnet(image_scannet, key_want = 'encode', reuse_flag = encode_reuse, reuse_batch = None, batch_name = '_scannet', all_out_dict = all_out_dict_scannet, **kwargs)\n encode_reuse = True\n\n m_scannet_decode, all_out_dict_scannet = build_partnet(None, key_want = 'decode', reuse_flag = decode_reuse, reuse_batch = None, batch_name = '_scannet', all_out_dict = all_out_dict_scannet, **kwargs)\n decode_reuse = True\n \n m_scannet_depth, all_out_dict_scannet = build_partnet(None, key_want = 'depth', reuse_flag = depth_reuse, reuse_batch = None, batch_name = '_scannet', all_out_dict = all_out_dict_scannet, **kwargs)\n depth_reuse = True\n\n all_outputs.extend([m_scannet_depth.output])\n ret_params = m_scannet_depth.params\n\n if cfg_dataset.get('pbrnet', 0)==1 and 'image_pbrnet' in inputs:\n image_pbrnet = tf.cast(inputs['image_pbrnet'], tf.float32)\n image_pbrnet = tf.div(image_pbrnet, tf.constant(255, dtype=tf.float32))\n if center_im:\n image_pbrnet = tf.subtract(image_pbrnet, tf.constant(0.5, dtype=tf.float32))\n\n output_nodes = []\n\n all_out_dict_pbrnet = {}\n m_pbrnet_encode, all_out_dict_pbrnet = build_partnet(image_pbrnet, key_want = 'encode', reuse_flag = encode_reuse, reuse_batch = None, batch_name = '_pbrnet', all_out_dict = all_out_dict_pbrnet, **kwargs)\n encode_reuse = True\n\n m_pbrnet_decode, all_out_dict_pbrnet = build_partnet(None, key_want = 'decode', reuse_flag = decode_reuse, reuse_batch = None, batch_name = '_pbrnet', all_out_dict = all_out_dict_pbrnet, **kwargs)\n decode_reuse = True\n\n if cfg_dataset.get('pbr_normal', 1)==1:\n if twonormals==0:\n m_pbrnet_normal, all_out_dict_pbrnet = build_partnet(None, key_want = 'normal', reuse_flag = normal_reuse, reuse_batch = None, batch_name = '_pbrnet', all_out_dict = all_out_dict_pbrnet, **kwargs)\n normal_reuse = True\n else:\n m_pbrnet_normal, all_out_dict_pbrnet = build_partnet(None, key_want = 'normal_p', reuse_flag = None, reuse_batch = None, batch_name = '_pbrnet', all_out_dict = all_out_dict_pbrnet, **kwargs)\n\n output_nodes.append(m_pbrnet_normal.output)\n ret_params = m_pbrnet_normal.params\n \n if cfg_dataset.get('pbr_depth', 1)==1:\n m_pbrnet_depth, all_out_dict_pbrnet = build_partnet(None, key_want = 'depth', reuse_flag = depth_reuse, reuse_batch = None, batch_name = '_pbrnet', all_out_dict = all_out_dict_pbrnet, **kwargs)\n depth_reuse = True\n output_nodes.append(m_pbrnet_depth.output)\n ret_params = m_pbrnet_depth.params\n\n if cfg_dataset.get('pbr_instance', 0)==1:\n m_pbrnet_ins_decode, all_out_dict_pbrnet = build_partnet(None, key_want = 'ins_decode', reuse_flag = ins_decode_reuse, reuse_batch = None, batch_name = '_pbrnet', all_out_dict = all_out_dict_pbrnet, **kwargs)\n ins_decode_reuse = True\n m_pbrnet_instance, all_out_dict_pbrnet = build_partnet(None, key_want = 'pbr_instance', reuse_flag = None, reuse_batch = None, batch_name = '_pbrnet', all_out_dict = all_out_dict_pbrnet, **kwargs)\n output_nodes.append(m_pbrnet_instance.output)\n ret_params = m_pbrnet_instance.params\n\n all_outputs.extend(output_nodes)\n\n if cfg_dataset.get('imagenet', 0)==1 and 'image_imagenet' in inputs:\n image_imagenet = tf.cast(inputs['image_imagenet'], tf.float32)\n if no_prep==0:\n image_imagenet = tf.div(image_imagenet, tf.constant(255, dtype=tf.float32))\n if center_im:\n image_imagenet = tf.subtract(image_imagenet, tf.constant(0.5, dtype=tf.float32))\n\n all_out_dict_imagenet = {}\n m_imagenet_encode, all_out_dict_imagenet = build_partnet(image_imagenet, key_want = 'encode', reuse_flag = encode_reuse, reuse_batch = None, batch_name = '_imagenet', all_out_dict = all_out_dict_imagenet, **kwargs)\n encode_reuse = True\n \n m_imagenet_category, all_out_dict_imagenet = build_partnet(None, key_want = 'category', reuse_flag = category_reuse, reuse_batch = None, batch_name = '_imagenet', all_out_dict = all_out_dict_imagenet, **kwargs)\n category_reuse = True\n\n all_outputs.extend([m_imagenet_category.output])\n ret_params = m_imagenet_category.params\n\n if cfg_dataset.get('coco', 0)==1 and 'image_coco' in inputs:\n\n image_coco = tf.cast(inputs['image_coco'], tf.float32)\n image_coco = tf.div(image_coco, tf.constant(255, dtype=tf.float32))\n if center_im:\n image_coco = tf.subtract(image_coco, tf.constant(0.5, dtype=tf.float32))\n\n output_nodes = []\n\n all_out_dict_coco = {}\n m_coco_encode, all_out_dict_coco = build_partnet(image_coco, key_want = 'encode', reuse_flag = encode_reuse, reuse_batch = None, batch_name = '_coco', all_out_dict = all_out_dict_coco, **kwargs)\n encode_reuse = True\n\n m_coco_ins_decode, all_out_dict_coco = build_partnet(None, key_want = 'ins_decode', reuse_flag = ins_decode_reuse, reuse_batch = None, batch_name = '_coco', all_out_dict = all_out_dict_coco, **kwargs)\n ins_decode_reuse = True\n m_coco_instance, all_out_dict_coco = build_partnet(None, key_want = 'coco_instance', reuse_flag = None, reuse_batch = None, batch_name = '_coco', all_out_dict = all_out_dict_coco, **kwargs)\n output_nodes.append(m_coco_instance.output)\n ret_params = m_coco_instance.params\n\n all_outputs.extend(output_nodes)\n\n if cfg_dataset.get('place', 0)==1 and 'image_place' in inputs:\n image_place = tf.cast(inputs['image_place'], tf.float32)\n image_place = tf.div(image_place, tf.constant(255, dtype=tf.float32))\n if center_im:\n image_place = tf.subtract(image_place, tf.constant(0.5, dtype=tf.float32))\n\n all_out_dict_place = {}\n m_place_encode, all_out_dict_place = build_partnet(image_place, key_want = 'encode', reuse_flag = encode_reuse, reuse_batch = None, batch_name = '_place', all_out_dict = all_out_dict_place, **kwargs)\n encode_reuse = True\n \n m_place_category, all_out_dict_place = build_partnet(None, key_want = 'place_category', reuse_flag = None, reuse_batch = None, batch_name = '_place', all_out_dict = all_out_dict_place, **kwargs)\n\n all_outputs.extend([m_place_category.output])\n ret_params = m_place_category.params\n\n if cfg_dataset.get('nyuv2', 0)==1 and 'image_nyuv2' in inputs:\n image_nyuv2 = tf.cast(inputs['image_nyuv2'], tf.float32)\n image_nyuv2 = tf.div(image_nyuv2, tf.constant(255, dtype=tf.float32))\n if center_im:\n image_nyuv2 = tf.subtract(image_nyuv2, tf.constant(0.5, dtype=tf.float32))\n\n all_out_dict_nyuv2 = {}\n m_nyuv2_encode, all_out_dict_nyuv2 = build_partnet(image_nyuv2, key_want = 'encode', reuse_flag = encode_reuse, reuse_batch = None, batch_name = '_nyuv2', all_out_dict = all_out_dict_nyuv2, **kwargs)\n encode_reuse = True\n\n m_nyuv2_decode, all_out_dict_nyuv2 = build_partnet(None, key_want = 'decode', reuse_flag = decode_reuse, reuse_batch = None, batch_name = '_nyuv2', all_out_dict = all_out_dict_nyuv2, **kwargs)\n decode_reuse = True\n \n m_nyuv2_depth, all_out_dict_nyuv2 = build_partnet(None, key_want = 'depth', reuse_flag = depth_reuse, reuse_batch = None, batch_name = '_nyuv2', all_out_dict = all_out_dict_nyuv2, **kwargs)\n depth_reuse = True\n\n all_outputs.extend([m_nyuv2_depth.output])\n #ret_params = m_nyuv2_depth.params\n\n return all_outputs, ret_params\n\ndef split_input(inputs, n_gpus = 1):\n if n_gpus==1:\n return [inputs]\n\n temp_args = {v: tf.split(inputs[v], axis = 0, num_or_size_splits=n_gpus) for v in inputs}\n list_of_args = [{now_arg: temp_args[now_arg][ind] for now_arg in temp_args} for ind in xrange(n_gpus)]\n\n return list_of_args\n\ndef parallel_network_tfutils(inputs, model_func, n_gpus = 1, gpu_offset = 0, **kwargs):\n with tf.variable_scope(tf.get_variable_scope()) as vscope:\n inputs = split_input(inputs, n_gpus)\n\n outputs = []\n params = []\n for i, curr_input in enumerate(inputs):\n with tf.device('/gpu:%d' % (i + gpu_offset)):\n with tf.name_scope('gpu_' + str(i)) as gpu_scope:\n curr_output, curr_param = model_func(curr_input, **kwargs)\n outputs.append(curr_output)\n params.append(curr_param)\n tf.get_variable_scope().reuse_variables()\n\n return outputs, params[0]\n" ]
[ [ "numpy.load", "tensorflow.transpose", "tensorflow.cast", "tensorflow.reshape", "tensorflow.constant", "tensorflow.variable_scope", "tensorflow.get_variable_scope", "tensorflow.stack", "tensorflow.device", "tensorflow.split", "tensorflow.contrib.framework.arg_scope" ] ]
xiaoyuehe/TFFinance
[ "8fd737671a0249a184ba7c7418133265abaaf4f7", "8fd737671a0249a184ba7c7418133265abaaf4f7" ]
[ "test/test.py", "src/rnn/SimpleLstmRnn.py" ]
[ "import random\nimport tensorflow as tf\n\nimport numpy as np\n\n\nprint(random.randint(30,300))\n\ns = [float(i) / 100 for i in\n range(32, 53)]\nprint(s)\n\nx = np.array([1, 2, 3, 4])\nprint(x.shape)\n\ny = np.zeros((2, 3, 4))\nprint(y)\na = tf.constant(y)\nb = tf.unstack(a,axis=0)\nc = tf.unstack(a,axis=1)\nd = tf.unstack(a,axis=2)\n\nwith tf.Session() as sess:\n print(sess.run(b))\n print(sess.run(c))\n print(sess.run(d))\n", "# coding=utf-8\n'''\n构造rnn网络基础方法\n'''\n\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\n\n\nclass RnnConfig(object):\n def __init__(self):\n pass\n\n\nclass SimpleLstmRnn(object):\n def __init__(self, rnn_config):\n self.rnn_config = rnn_config\n self.__build_rnn__()\n\n def __build_rnn__(self):\n batch_size = self.rnn_config.batch_size\n num_steps = self.rnn_config.num_steps\n input_size = self.rnn_config.input_size\n output_size = self.rnn_config.output_size\n lr = self.rnn_config.lr\n layer_nums = self.rnn_config.layer_nums\n\n # 处理输入数据\n self.input_holder = tf.placeholder(tf.float32, [batch_size, num_steps, input_size])\n w_in = tf.Variable(tf.random_normal([input_size, layer_nums[0]]))\n b_in = tf.Variable(tf.random_normal([layer_nums[0], ]))\n input_data = tf.reshape(self.input_holder, [-1, input_size])\n input_rnn = tf.matmul(input_data, w_in) + b_in\n input_rnn = tf.reshape(input_rnn, [-1, num_steps, layer_nums[0]])\n\n # 创建lstm层\n lcl = []\n for i in range(len(layer_nums)):\n size = layer_nums[i]\n cell = tf.nn.rnn_cell.BasicLSTMCell(size, forget_bias=0.0, state_is_tuple=True)\n lcl.append(cell)\n\n cell_layers = tf.nn.rnn_cell.MultiRNNCell(lcl, state_is_tuple=True)\n self.initial_state = cell_layers.zero_state(batch_size, tf.float32)\n\n inner_state = self.initial_state\n outputs = []\n with tf.variable_scope(\"RNN\"):\n for time_step in range(num_steps):\n print(num_steps)\n if time_step > 0:\n tf.get_variable_scope().reuse_variables()\n (cell_output, inner_state) = cell_layers(input_rnn[:, time_step, :], inner_state)\n outputs.append(cell_output)\n self.final_state = inner_state\n\n # 处理输出\n self.output_holder = tf.placeholder(tf.float32, [batch_size, num_steps, output_size])\n output = tf.reshape(outputs, [-1, layer_nums[-1]]) # 作为输出层的输入\n w_out = tf.Variable(tf.random_normal([layer_nums[-1], output_size]))\n b_out = tf.Variable(tf.random_normal([output_size, ]))\n pred = tf.matmul(output, w_out) + b_out\n\n # 损失函数\n self.loss = tf.reduce_mean(tf.square(tf.reshape(pred, [-1]) - tf.reshape(self.output_holder, [-1])))\n self.train_op = tf.train.AdamOptimizer(lr).minimize(self.loss)\n\n\n# ——————————————————导入数据——————————————————————\nf = open('601628_2.csv')\ndf = pd.read_csv(f) # 读入股票数据\ndata = np.array(df.loc[:, ['chr', 'exr']]) # 获取最高价序列\nnormalize_data = data[::-1] # 反转,使数据按照日期先后顺序排列\n\n# 生成训练集\n# 设置常量\nrnn_config = RnnConfig()\nrnn_config.batch_size = 100\nrnn_config.num_steps = 25\nrnn_config.input_size = 2\nrnn_config.output_size = 1\nrnn_config.lr = 0.05\nrnn_config.layer_nums = [10, 10]\n\ntime_step = rnn_config.num_steps\nbatch_size = rnn_config.batch_size\n\ntrain_x, train_y = [], [] # 训练集\nfor i in range(len(normalize_data) - time_step - 101):\n x = normalize_data[i:i + time_step]\n y = normalize_data[:, :1][i + 1:i + time_step + 1]\n train_x.append(x.tolist())\n train_y.append(y.tolist())\n\ntest_x, test_y = [], [] # 训练集\nfor i in range(len(normalize_data) - time_step - 101, len(normalize_data) - time_step - 1):\n x = normalize_data[i:i + time_step]\n y = normalize_data[:, :1][i + 1:i + time_step + 1]\n test_x.append(x.tolist())\n test_y.append(y.tolist())\n\nrnn = SimpleLstmRnn(rnn_config)\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n init_stat = sess.run(rnn.initial_state)\n\n print(init_stat)\n step = 0\n for i in range(10000):\n start = 0\n end = start + batch_size\n while (end < len(train_x)):\n feed_dict = {rnn.input_holder: train_x[start:end], rnn.output_holder: train_y[start:end]}\n for j, (c, h) in enumerate(rnn.initial_state):\n feed_dict[c] = init_stat[j].c\n feed_dict[h] = init_stat[j].h\n\n _,loss_value, init_stat = sess.run([rnn.train_op,rnn.loss, rnn.final_state], feed_dict=feed_dict)\n start += batch_size\n end = start + batch_size\n # 每10步保存一次参数\n if step % 5 == 0:\n print(i, step, loss_value)\n step += 1\n" ]
[ [ "numpy.array", "numpy.zeros", "tensorflow.Session", "tensorflow.constant", "tensorflow.unstack" ], [ "tensorflow.nn.rnn_cell.BasicLSTMCell", "numpy.array", "tensorflow.train.AdamOptimizer", "tensorflow.Session", "tensorflow.matmul", "tensorflow.reshape", "tensorflow.variable_scope", "tensorflow.placeholder", "tensorflow.get_variable_scope", "tensorflow.nn.rnn_cell.MultiRNNCell", "pandas.read_csv", "tensorflow.global_variables_initializer", "tensorflow.random_normal" ] ]
KayaDevSolutions/deepgaze
[ "a6d444c70bb75ffcfc23d3b31a0567711fb956a7" ]
[ "deepgaze/color_detection.py" ]
[ "#!/usr/bin/env python\n\n#The MIT License (MIT)\n#Copyright (c) 2016 Massimiliano Patacchiola\n#\n#THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF \n#MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY \n#CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE \n#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nimport numpy as np\nimport cv2\nimport sys\n\nclass BackProjectionColorDetector:\n \"\"\"Implementation of the Histogram Backprojection algorithm.\n\n The histogram backprojection was proposed by Michael Swain and Dana Ballard \n in their paper \"Indexing via color histograms\".\n Abstract: The color spectrum of multicolored objects provides a a robust, \n efficient cue for indexing into a large database of models. This paper shows \n color histograms to be stable object representations over change in view, and \n demonstrates they can differentiate among a large number of objects. It introduces \n a technique called Histogram Intersection for matching model and image histograms \n and a fast incremental version of Histogram Intersection that allows real-time \n indexing into a large database of stored models using standard vision hardware. \n Color can also be used to search for the location of an object. An algorithm \n called Histogram Backprojection performs this task efficiently in crowded scenes.\n \"\"\"\n\n def __init__(self):\n \"\"\"Init the color detector object.\n\n \"\"\"\n self.template_hsv = None\n\n def setTemplate(self, frame):\n \"\"\"Set the BGR image used as template during the pixel selection\n \n The template can be a spedific region of interest of the main\n frame or a representative color scheme to identify. the template\n is internally stored as an HSV image.\n @param frame the template to use in the algorithm\n \"\"\" \n self.template_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n\n def getTemplate(self):\n \"\"\"Get the BGR image used as template during the pixel selection\n \n The template can be a spedific region of interest of the main\n frame or a representative color scheme to identify.\n \"\"\"\n if(self.template_hsv is None): \n return None\n else:\n return cv2.cvtColor(self.template_hsv, cv2.COLOR_HSV2BGR)\n\n def returnFiltered(self, frame, morph_opening=True, blur=True, kernel_size=5, iterations=1):\n \"\"\"Given an input frame in BGR return the filtered version.\n \n @param frame the original frame (color)\n @param morph_opening it is a erosion followed by dilatation to remove noise\n @param blur to smoth the image it is possible to apply Gaussian Blur\n @param kernel_size is the kernel dimension used for morph and blur\n \"\"\"\n if(self.template_hsv is None): return None\n #Get the mask from the internal function\n frame_threshold = self.returnMask(frame, morph_opening=morph_opening, blur=blur, kernel_size=kernel_size, iterations=iterations)\n #Return the AND image\n return cv2.bitwise_and(frame, frame_threshold)\n\n def returnMask(self, frame, morph_opening=True, blur=True, kernel_size=5, iterations=1):\n \"\"\"Given an input frame in BGR return the black/white mask.\n \n @param frame the original frame (color)\n @param morph_opening it is a erosion followed by dilatation to remove noise\n @param blur to smoth the image it is possible to apply Gaussian Blur\n @param kernel_size is the kernel dimension used for morph and blur\n \"\"\"\n if(self.template_hsv is None): return None\n #Convert the input framge from BGR -> HSV\n frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n #Set the template histogram\n template_hist = cv2.calcHist([self.template_hsv],[0, 1], None, [180, 256], [0, 180, 0, 256] )\n #Normalize the template histogram and apply backprojection\n cv2.normalize(template_hist, template_hist, 0, 255, cv2.NORM_MINMAX)\n frame_hsv = cv2.calcBackProject([frame_hsv], [0,1], template_hist, [0,180,0,256], 1)\n #Get the kernel and apply a convolution\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (kernel_size,kernel_size))\n frame_hsv = cv2.filter2D(frame_hsv, -1, kernel)\n #Applying the morph open operation (erosion followed by dilation)\n if(morph_opening==True):\n kernel = np.ones((kernel_size,kernel_size), np.uint8)\n frame_hsv = cv2.morphologyEx(frame_hsv, cv2.MORPH_OPEN, kernel, iterations=iterations)\n #Applying Gaussian Blur\n if(blur==True): \n frame_hsv = cv2.GaussianBlur(frame_hsv, (kernel_size,kernel_size), 0)\n #Get the threshold\n ret, frame_threshold = cv2.threshold(frame_hsv, 50, 255, 0)\n #Merge the threshold matrices\n return cv2.merge((frame_threshold,frame_threshold,frame_threshold))\n\nclass MultiBackProjectionColorDetector:\n \"\"\"Implementation of the Histogram Backprojection algorithm with multi-template.\n\n This class is the reimplementation of the BackProjectionColorDetector class for\n multi-template color detection. Instead of specifing a single template it is \n possible to pass a list of templates, which can be multiple subframe taken from\n different part of an object. Multiple version of the Backprojection algorithm\n are then run at the same time and the filtered output added togheter. The result\n of this process is much robust (but slower) than the standard class.\n \"\"\"\n\n def __init__(self):\n \"\"\"Init the color detector object.\n\n \"\"\"\n self.template_hsv_list = list()\n\n def setTemplateList(self, frame_list):\n \"\"\"Set the BGR image list used as container for the templates\n \n The template can be a spedific region of interest of the main\n frame or a representative color scheme to identify. the template\n is internally stored as an HSV image.\n @param frame the template to use in the algorithm\n \"\"\" \n for frame in frame_list: \n self.template_hsv_list.append(cv2.cvtColor(frame, cv2.COLOR_BGR2HSV))\n\n def getTemplateList(self):\n \"\"\"Get the BGR image list used as container for the templates\n \n The template can be a spedific region of interest of the main\n frame or a representative color scheme to identify.\n \"\"\"\n output_list = list()\n for frame in self.template_hsv_list:\n output_list.append(cv2.cvtColor(frame, cv2.COLOR_HSV2BGR))\n return output_list\n\n def returnFiltered(self, frame, morph_opening=True, blur=True, kernel_size=5, iterations=1):\n \"\"\"Given an input frame in BGR return the filtered version.\n \n @param frame the original frame (color)\n @param morph_opening it is a erosion followed by dilatation to remove noise\n @param blur to smoth the image it is possible to apply Gaussian Blur\n @param kernel_size is the kernel dimension used for morph and blur\n \"\"\"\n if(len(self.template_hsv_list) == 0): return None\n #Get the mask from the internal function\n frame_threshold = self.returnMask(frame, morph_opening=morph_opening, blur=blur, kernel_size=kernel_size, iterations=iterations)\n #Return the AND image\n return cv2.bitwise_and(frame, frame_threshold)\n\n def returnMask(self, frame, morph_opening=True, blur=True, kernel_size=5, iterations=1):\n \"\"\"Given an input frame in BGR return the black/white mask.\n \n @param frame the original frame (color)\n @param morph_opening it is a erosion followed by dilatation to remove noise\n @param blur to smoth the image it is possible to apply Gaussian Blur\n @param kernel_size is the kernel dimension used for morph and blur\n \"\"\"\n if(len(self.template_hsv_list) == 0): return None\n frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n mask = np.zeros((frame.shape[0], frame.shape[1]))\n for template_hsv in self.template_hsv_list:\n #Set the template histogram\n template_hist = cv2.calcHist([template_hsv],[0, 1], None, [256, 256], [0, 256, 0, 256] )\n #Normalize the template histogram and apply backprojection\n cv2.normalize(template_hist, template_hist, 0, 255, cv2.NORM_MINMAX)\n frame_hsv_back = cv2.calcBackProject([frame_hsv], [0,1], template_hist, [0,256,0,256], 1)\n #Get the kernel and apply a convolution\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (kernel_size,kernel_size))\n frame_hsv_clean = cv2.filter2D(frame_hsv_back, -1, kernel)\n #Applying the morph open operation (erosion followed by dilation)\n if(morph_opening==True):\n kernel = np.ones((kernel_size,kernel_size), np.uint8)\n frame_hsv_clean = cv2.morphologyEx(frame_hsv_clean, cv2.MORPH_OPEN, kernel, iterations=iterations)\n #Applying Gaussian Blur\n if(blur==True): \n frame_hsv_clean = cv2.GaussianBlur(frame_hsv_clean, (kernel_size,kernel_size), 0)\n #Get the threshold\n ret, frame_hsv_threshold = cv2.threshold(frame_hsv_clean, 50, 255, 0)\n mask = np.add(mask, frame_hsv_threshold) #Add the threshold to the mask\n\n\n #Normalize the mask because it contains\n #values added during the previous loop\n #Attention: here it is not necessary to normalize because the astype(np.uint8) method\n #will resize to 255 each value which is higher that that...\n #cv2.normalize(mask, mask, 0, 255, cv2.NORM_MINMAX) #Not necessary\n ret, mask = cv2.threshold(mask.astype(np.uint8), 50, 255, 0)\n return cv2.merge((mask,mask,mask))\n\nclass RangeColorDetector:\n \"\"\"Using this detector it is possible to isolate colors in a specified range.\n\n In this detector the frame given as input is filtered and the pixel which\n fall in a specific range are taken, the other rejected. Some erosion and\n dilatation operation are used in order to remove noise.\n This class use the HSV (Hue, Saturation, Value) color representation to filter pixels.\n The H and S components characterize the color (independent of illumination) \n and V compoenent specifies the illuminations.\n \"\"\"\n\n def __init__(self, min_range, max_range):\n \"\"\"Init the color detector object.\n\n The object must be initialised with an HSV range to use as filter.\n Ex: skin color in channel H is characterized by values between [0, 20], \n in the channel S=[48, 255] and V=[80, 255] (Asian and Caucasian). To\n initialise the vectors in this range it is possible to write: \n min_range = numpy.array([0, 48, 80], dtype = \"uint8\")\n max_range = numpy.array([20, 255, 255], dtype = \"uint8\")\n @param range_min the minimum HSV value to use as filer (numpy.array)\n @param range_max the maximum HSV value to use as filter (numpy.array)\n \"\"\"\n # min and max range to use as filter for the detector (HSV)\n self.min_range = min_range\n self.max_range = max_range\n\n def setRange(self, min_range, max_range):\n \"\"\"Set the min and max range used in the range detector\n \n The skin in channel H is characterized by values between 0 and 50, \n in the channel S from 0.23 to 0.68 (Asian and Caucasian).\n @param range_min the minimum HSV value to use as filer\n @param range_max the maximum HSV value to use as filter\n \"\"\"\n # min and max range to use as filter for the detector (HSV)\n self.min_range = min_range\n self.max_range = max_range\n\n def getRange(self):\n \"\"\"Return the min and max range used in the skin detector\n\n \"\"\"\n return (self.min_range, self.max_range)\n\n\n def returnFiltered(self, frame, morph_opening=True, blur=True, kernel_size=5, iterations=1):\n \"\"\"Given an input frame return the filtered and denoised version.\n \n @param frame the original frame (color)\n @param morph_opening it is a erosion followed by dilatation to remove noise\n @param blur to smoth the image it is possible to apply Gaussian Blur\n @param kernel_size is the kernel dimension used for morph and blur\n @param iterations the number of time erode and dilate are called\n \"\"\"\n frame_filtered = self.returnMask(frame, morph_opening=morph_opening, blur=blur, kernel_size=kernel_size, iterations=iterations)\n #bitwiseAND mask\n frame_denoised = cv2.bitwise_and(frame, frame, mask = frame_filtered)\n return frame_denoised\n\n def returnMask(self, frame, morph_opening=True, blur=True, kernel_size=5, iterations=1):\n \"\"\"Given an input frame return the black/white mask.\n \n This version of the function does not use the blur and bitwise \n operations, then the resulting frame contains white pixels\n in correspondance of the skin found during the searching process.\n @param frame the original frame (color)\n \"\"\"\n #Convert to HSV and eliminate pixels outside the range\n frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n frame_filtered = cv2.inRange(frame_hsv, self.min_range, self.max_range)\n if(morph_opening==True):\n kernel = np.ones((kernel_size,kernel_size), np.uint8)\n frame_filtered = cv2.morphologyEx(frame_filtered, cv2.MORPH_OPEN, kernel, iterations=iterations)\n #Applying Gaussian Blur\n if(blur==True): \n frame_filtered = cv2.GaussianBlur(frame_filtered, (kernel_size,kernel_size), 0)\n return frame_filtered\n" ]
[ [ "numpy.ones", "numpy.zeros", "numpy.add" ] ]
sontheimer/ModularScience-Cosim-Template
[ "cc5718217a695b70d8f38c38452f1403706014d3" ]
[ "cosim_example_demos/TVB-NEST-demo/nest_elephant_tvb/Interscale_hub/pivot.py" ]
[ "# ------------------------------------------------------------------------------\n# Copyright 2020 Forschungszentrum Jülich GmbH\n# \"Licensed to the Apache Software Foundation (ASF) under one or more contributor\n# license agreements; and to You under the Apache License, Version 2.0. \"\n#\n# Forschungszentrum Jülich\n# Institute: Institute for Advanced Simulation (IAS)\n# Section: Jülich Supercomputing Centre (JSC)\n# Division: High Performance Computing in Neuroscience\n# Laboratory: Simulation Laboratory Neuroscience\n# Team: Multi-scale Simulation and Design\n#\n# ------------------------------------------------------------------------------ \n# \nfrom mpi4py import MPI\nimport time\nimport numpy as np\nimport logging\nimport sys\n\n#nest to tvb\nfrom Interscale_hub.transformer import store_data, analyse_data, spiketorate\n#tvb to nest\nfrom Interscale_hub.transformer import generate_data\n\n\n# NestTvbPivot and TvbNestPivot classes:\n# TODO: proper abstraction -> extract the usecase details from the general implementation\n# -> Init, start, stop are pretty much the same every time\n# -> incoming (receive) and outgoing (send) loops (M:N mapping)\n# -> the analyse (method) should be \n# a) pivot, as raw data to cosim data \n# b) transform (might be trivial) and \n# c) analysis (might be trivial)\n\n# TODO: rework on the receive and send loops (both, general coding style and usecase specifics)\n\nclass NestTvbPivot:\n def __init__(self, intracomm, param, comm_receiver, comm_sender, databuffer):\n '''\n '''\n \n # TODO: logger placeholder for testing\n self.__logger = logging.getLogger(\"NestTvbPivot\")\n handler = logging.StreamHandler(sys.stdout)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n handler.setFormatter(formatter)\n self.__logger.addHandler(handler)\n self.__logger.setLevel(logging.DEBUG)\n \n # Parameter for transformation and analysis\n self.__param = param\n # INTERcommunicator\n if intracomm.Get_rank() == 0:\n self.__comm_receiver = comm_receiver\n self.__num_sending = self.__comm_receiver.Get_remote_size()\n else:\n self.__comm_sender = comm_sender\n self.__num_receiving = self.__comm_sender.Get_remote_size()\n\n # How many Nest ranks are sending, how many Tvb ranks are receiving\n self.__databuffer = databuffer\n \n \n def start(self, intracomm):\n '''\n Start the pivot operation.\n M:N mapping of MPI ranks, receive data, further process data.\n \n MVP: receive on rank 0, do the rest on rank 1.\n '''\n if intracomm.Get_rank() == 0: # Receiver from input sim, rank 0\n self._receive()\n else: # Science/analyse and sender to TVB, rank 1-x\n self._send()\n\n\n def stop(self):\n '''\n TODO: proper execution of stop command\n '''\n self.__stop = True\n\n\n def _receive(self):\n '''\n Receive data on rank 0. Put it into the shared mem buffer.\n Replaces the former 'receive' function.\n NOTE: First refactored version -> not pretty, not final. \n '''\n # The last two buffer entries are used for shared information\n # --> they replace the status_data variable from previous version\n # --> find more elegant solution?\n self.__logger.info(\"setting up buffers\")\n self.__databuffer[-1] = 1 # set buffer to 'ready to receive from nest'\n self.__databuffer[-2] = 0 # marks the 'head' of the buffer\n # It seems the 'check' variable is used to receive tags from NEST, i.e. ready for send...\n # change this in the future, also mentioned in the FatEndPoint solution from Wouter.\n check = np.empty(1,dtype='b')\n shape = np.empty(1, dtype='i') \n count = 0\n status_ = MPI.Status()\n self.__logger.info(\"reading from buffer\")\n # self.__logger.info(\"NESTtoTVB -- consumer/receiver -- Rank:\"+str(self.__comm_receiver.Get_rank()))\n while True:\n head_ = 0 # head of the buffer, reset after each iteration \n # TODO: This is still not correct. We only check for the Tag of the last rank.\n # IF all ranks send always the same tag in one iteration (simulation step)\n # then this works. But it should be handled differently!!!!\n self.__comm_receiver.Recv([check, 1, MPI.CXX_BOOL], source=0, tag=MPI.ANY_TAG, status=status_)\n \n status_rank_0 = status_.Get_tag()\n for i in range(1, self.__num_sending):\n # new: We do not care which source sends first, give MPI the freedom to send in whichever order.\n # self.__comm_receiver.Recv([check, 1, MPI.CXX_BOOL], source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status_)\n # self.__logger.info(\"checking status\")\n self.__comm_receiver.Recv([check, 1, MPI.CXX_BOOL], source=i, tag=MPI.ANY_TAG, status=status_)\n if status_rank_0 != status_.Get_tag():\n raise Exception('Abnormal state : the state of Nest is different between rank')\n\n if status_.Get_tag() == 0:\n # wait until ready to receive new data (i.e. the sender has cleared the buffer)\n while self.__databuffer[-1] != 1: # TODO: use MPI, remove the sleep\n time.sleep(0.001)\n pass\n for source in range(self.__num_sending):\n # send 'ready' to the nest rank\n # self.__logger.info(\"send ready\")\n self.__comm_receiver.Send([np.array(True,dtype='b'),MPI.BOOL],dest=source,tag=0)\n # receive package size info\n # self.__logger.info(\"DEBUG 121 ====> receiving size in NEST_TVB_PIVOT\")\n self.__comm_receiver.Recv([shape, 1, MPI.INT], source=source, tag=0, status=status_)\n # self.__comm_receiver.Recv([shape, 1, MPI.INT], source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status_)\n # NEW: receive directly into the buffer\n self.__comm_receiver.Recv([self.__databuffer[head_:], MPI.DOUBLE], source=source, tag=0, status=status_)\n head_ += shape[0] # move head \n # Mark as 'ready to do analysis'\n self.__databuffer[-1] = 0\n # important: head_ is first buffer index WITHOUT data.\n self.__databuffer[-2] = head_\n elif status_.Get_tag() == 1:\n count += 1\n elif status_.Get_tag() == 2:\n # NOTE: simulation ended\n break\n else:\n raise Exception(\"bad mpi tag\"+str(status_.Get_tag()))\n \n \n def _send(self):\n '''\n Send data to TVB (multiple MPI ranks possible).\n Replaces the former 'send' function.\n NOTE: First refactored version -> not pretty, not final. \n '''\n count=0 # simulation/iteration step\n status_ = MPI.Status()\n # self.__logger.info(\"NESTtoTVB -- producer/sender -- Rank:\"+str(self.__comm_sender.Get_rank()))\n while True:\n # TODO: this communication has the 'rank 0' problem described in the beginning\n accept = False\n #logger.info(\"Nest to TVB : wait to send \" )\n while not accept:\n req = self.__comm_sender.irecv(source=MPI.ANY_SOURCE,tag=MPI.ANY_TAG)\n accept = req.wait(status_)\n #logger.info(\" Nest to TVB : send data status : \" +str(status_.Get_tag()))\n if status_.Get_tag() == 0:\n # wait until the receiver has cleared the buffer, i.e. filled with new data\n while self.__databuffer[-1] != 0: # TODO: use MPI, remove the sleep\n time.sleep(0.001)\n pass\n # TODO: All science/analysis here. Move to a proper place.\n times,data = self._transform(count)\n # Mark as 'ready to receive next simulation step'\n self.__databuffer[-1] = 1\n \n ### OLD Code\n #logger.info(\"Nest to TVB : send data :\"+str(np.sum(data)) )\n # time of sim step\n self.__comm_sender.Send([times, MPI.DOUBLE], dest=status_.Get_source(), tag=0)\n # send the size of the rate\n size = np.array(int(data.shape[0]),dtype='i')\n self.__comm_sender.Send([size,MPI.INT], dest=status_.Get_source(), tag=0)\n # send the rates\n self.__comm_sender.Send([data,MPI.DOUBLE], dest=status_.Get_source(), tag=0)\n ### OLD Code end\n elif status_.Get_tag() == 1:\n # NOTE: simulation ended\n break\n else:\n raise Exception(\"bad mpi tag\"+str(status_.Get_tag()))\n count+=1\n\n \n def _transform(self, count):\n '''\n This step contains some pivoting, transformation and analysis.\n TODO: encapsulate\n :param count: Simulation iteration/step\n :return times, data: simulation times and the calculated rates\n '''\n #store: Python object, create the histogram \n #analyse: Python object, calculate rates\n spikerate = spiketorate(self.__param)\n times, data = spikerate.spike_to_rate(count, self.__databuffer[-2], self.__databuffer)\n\n '''\n store = store_data(self.__param)\n analyse = analyse_data(self.__param)\n \n # TODO: Step 1 and 2 can be merged into one step. Buffer is no longer filled rank by rank.\n # Make this parallel with the INTRA communicator (should be embarrassingly parallel).\n # Step 1) take all data from buffer and create histogram\n # second to last index in databuffer denotes how much data there is\n self.__logger.info(\"NESTtoTVBPivot -- transform -- buffer head:\"+str(self.__databuffer[-2]))\n store.add_spikes(count, self.__databuffer[:int(self.__databuffer[-2])])\n # Step 2) take the resulting histogram\n data_to_analyse = store.return_data()\n # Step 3) Analyse this data, i.e. calculate rates?\n times,data = analyse.analyse(count, data_to_analyse)\n '''\n return times, data\n \n\n\nclass TvbNestPivot: \n def __init__(self, intracomm, param, comm_receiver, comm_sender, databuffer):\n '''\n '''\n \n # TODO: logger placeholder for testing\n self.__logger = logging.getLogger(\"TvbNestPivot\")\n handler = logging.StreamHandler(sys.stdout)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n handler.setFormatter(formatter)\n self.__logger.addHandler(handler)\n self.__logger.setLevel(logging.DEBUG)\n self.__logger.info(\"Initialise...\")\n \n # Parameter for transformation and analysis\n self.__param = param\n # INTERcommunicator\n if intracomm.Get_rank() == 1:\n self.__comm_receiver = comm_receiver\n self.__num_sending = self.__comm_receiver.Get_remote_size()\n else: \n self.__comm_sender = comm_sender\n self.__num_receiving = self.__comm_sender.Get_remote_size()\n # How many TVB ranks are sending, how many NEST ranks are receiving\n self.__databuffer = databuffer\n\n\n def start(self, intracomm):\n '''\n Start the pivot operation.\n M:N mapping of MPI ranks, receive data, further process data.\n \n MVP: receive on rank 0, do the rest on rank 1.\n '''\n if intracomm.Get_rank() == 0: # Receiver from input sim, rank 0\n self._send()\n else: # Science/analyse and sender to TVB, rank 1-x\n self._receive()\n\n\n def stop(self):\n '''\n TODO: proper execution of stop command\n '''\n self.__stop = True\n\n\n def _receive(self):\n '''\n Receive data on rank 0. Put it into the shared mem buffer.\n Replaces the former 'receive' function.\n NOTE: First refactored version -> not pretty, not final. \n '''\n # The last two buffer entries are used for shared information\n # --> they replace the status_data variable from previous version\n # --> find more elegant solution?\n self.__databuffer[-1] = 1 # set buffer to 'ready to receive from tvb'\n self.__databuffer[-2] = 0 # marks the 'head' of the buffer\n # init placeholder for incoming data\n size = np.empty(1, dtype='i') # size of the rate-array\n status_ = MPI.Status()\n # self.__logger.info(\"TVBtoNEST -- consumer/receiver -- Rank:\"+str(self.__comm_receiver.Get_rank()))\n while True:\n # NOTE: Check communication protocol between simulators and transformers!\n requests=[]\n for rank in range(self.__num_sending):\n requests.append(self.__comm_receiver.isend(True,dest=rank,tag=0))\n MPI.Request.Waitall(requests)\n # NOTE: works for now, needs rework if multiple ranks are used on TVB side\n # we receive from \"ANY_SOURCE\", but only check the status_ of the last receive...\n # get the starting and ending time of the simulation step\n # NEW: receive directly into the buffer\n self.__comm_receiver.Recv([self.__databuffer[0:], MPI.DOUBLE], source=0, tag=MPI.ANY_TAG, status=status_)\n if status_.Get_tag() == 0:\n # wait until ready to receive new data (i.e. the sender has cleared the buffer)\n while self.__databuffer[-1] != 1: # TODO: use MPI, remove the sleep\n time.sleep(0.001)\n pass\n # Get the size of the data\n self.__comm_receiver.Recv([size, 1, MPI.INT], source=status_.Get_source(), tag=0, status=status_)\n # NEW: receive directly into the buffer\n # First two entries are the times, see above\n self.__comm_receiver.Recv([self.__databuffer[2:], MPI.DOUBLE], source=status_.Get_source(), tag=0, status=status_)\n # Mark as 'ready to do analysis'\n self.__databuffer[-1] = 0\n self.__databuffer[-2] = size # info about size of data array\n elif status_.Get_tag() == 1:\n # NOTE: simulation ended\n break\n else:\n raise Exception(\"bad mpi tag\"+str(status_.Get_tag()))\n \n # logger.info('TVB_to_NEST: End of receive function')\n\n\n def _send(self):\n '''\n Send data to NEST (multiple MPI ranks possible).\n Replaces the former 'send' function.\n NOTE: First refactored version -> not pretty, not final. \n '''\n status_ = MPI.Status()\n # NOTE: hardcoded...\n check = np.empty(1,dtype='b')\n size_list = np.empty(1, dtype='i')\n id_first_spike_detector = self.__param['id_first_spike_detector']\n while True:\n # TODO: This is still not correct. We only check for the Tag of the last rank.\n # IF all ranks send always the same tag in one iteration (simulation step)\n # then this works. But it should be handled differently!!!!\n for rank in range(self.__num_receiving):\n self.__comm_sender.Recv([check, 1, MPI.CXX_BOOL], source=rank, tag=MPI.ANY_TAG, status=status_)\n if status_.Get_tag() == 0:\n # wait until the receiver has cleared the buffer, i.e. filled with new data\n while self.__databuffer[-1] != 0: # TODO: use MPI, remove the sleep\n time.sleep(0.001)\n pass\n\n # TODO: All science/generate here. Move to a proper place.\n spikes_times = self._transform()\n # Mark as 'ready to receive next simulation step'\n self.__databuffer[-1] = 1\n \n ### OLD code, kept the communication and science as it is for now\n # NOTE: Receive from status_.Get_source() and rank\n # Send to status_.Get_source() and rank\n # why?\n # a second status_ object is used, should not be named the same\n for rank in range(self.__num_receiving):\n # NOTE: hardcoded 10 in simulation mocks\n self.__comm_sender.Recv([size_list, 1, MPI.INT], source=rank, tag=0, status=status_)\n if size_list[0] != 0:\n list_id = np.empty(size_list, dtype='i')\n # NOTE: hardcoded np.arange(0,10,1) in simulation mocks\n self.__comm_sender.Recv([list_id, size_list, MPI.INT], source=status_.Get_source(), tag=0, status=status_)\n # Select the good spike train and send it\n # TODO: create lists, append to lists, nested loops\n # this is slow and will be a bottleneck when we scale up.\n data = []\n shape = []\n for i in list_id:\n shape += [spikes_times[i-id_first_spike_detector].shape[0]]\n data += [spikes_times[i-id_first_spike_detector]]\n send_shape = np.array(np.concatenate(([np.sum(shape)],shape)), dtype='i')\n # firstly send the size of the spikes train\n # self.__logger.info(\"sending size of train\")\n self.__comm_sender.Send([send_shape, MPI.INT], dest=status_.Get_source(), tag=list_id[0])\n # secondly send the spikes train\n data = np.concatenate(data).astype('d')\n # self.__logger.info(\"sending train\")\n self.__comm_sender.Send([data, MPI.DOUBLE], dest=rank, tag=list_id[0])\n ### OLD code end\n elif status_.Get_tag() == 1:\n # NOTE: one sim step? inconsistent with receiving side\n continue\n elif status_.Get_tag() == 2:\n # NOTE: simulation ended\n break\n else:\n raise Exception(\"bad mpi tag : \"+str(status_.Get_tag()))\n \n\n def _transform(self):\n '''\n This step contains some pivoting, transformation and analysis.\n TODO: encapsulate\n '''\n generator = generate_data(self.__param)\n # NOTE: count is a hardcoded '0'. Why?\n # time_step are the first two doubles in the buffer\n # rate is a double array, which size is stored in the second to last index\n if int(self.__databuffer[-2]) == 0:\n spikes_times = generator.generate_spike(0,\n self.__databuffer[:2],\n self.__databuffer[2:])\n else:\n spikes_times = generator.generate_spike(0,\n self.__databuffer[:2],\n self.__databuffer[2:int(self.__databuffer[-2])])\n return spikes_times\n" ]
[ [ "numpy.concatenate", "numpy.sum", "numpy.array", "numpy.empty" ] ]
KushanChamindu/Capsules-for-text
[ "03643d47e592d2034b5e72ddee769a4edae7137d" ]
[ "trainer.py" ]
[ "import pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\nfrom tensorflow.keras.preprocessing.text import Tokenizer\nfrom tensorflow.keras import utils\n\n# import ensemble_capsule_network\nimport ensemble_capsule_network\nimport network_test\nfrom config import Config\n# import network\nfrom preprocessing import text_preprocessing, load_word_embedding_matrix\n\nfolder_path = \"D:\\\\deep_learning_experiments\"\nlankadeepa_data_path = folder_path + \"\\\\sinhala_data\\\\lankadeepa_tagged_comments.csv\"\ngossip_lanka_data_path = folder_path + \"\\\\sinhala_data\\\\gossip_lanka_tagged_comments.csv\"\n\nword_embedding_keyed_vectors_path = 'D:\\\\deep_learning_experiments\\\\word_vectors_sinhala\\\\keyed.kv'\nword_embedding_matrix_path = 'D:\\\\deep_learning_experiments\\\\word_embedding_matrix'\nEMBEDDING_SIZE = 300\n\nlankadeepa_data = pd.read_csv(lankadeepa_data_path)[:9059]\ngossipLanka_data = pd.read_csv(gossip_lanka_data_path)\ngossipLanka_data = gossipLanka_data.drop(columns=['Unnamed: 3'])\n\nword_embedding_path = folder_path\n\nall_data = pd.concat([lankadeepa_data, gossipLanka_data], ignore_index=True)\nall_data['label'] = all_data['label'] - 2\nprint(all_data)\n\ncomments_text, labels = text_preprocessing(all_data)\nt = Tokenizer()\nt.fit_on_texts(comments_text)\nvocab_size = len(t.word_index) + 1\nprint(vocab_size)\n\nencoded_docs = t.texts_to_sequences(comments_text)\n# for i in encoded_docs:\n# print(len(i))\n# zzz = lambda z: len(z)\nlengths = list(map(lambda z: len(z), encoded_docs))\nprint('###########################################')\nprint(lengths)\n\nmax_length = max(lengths)\npadded_docs = pad_sequences(encoded_docs, maxlen=max_length, padding='post')\ncomment_labels = np.array(labels)\ncomment_labels = utils.to_categorical(comment_labels)\npadded_docs = np.array(padded_docs)\n\nprint(\"Shape of all comments: \", padded_docs.shape)\nprint(\"Shape of labels: \", comment_labels.shape)\n\nX_train, X_test, y_train, y_test = train_test_split(padded_docs, comment_labels, test_size=0.1, random_state=42,\n shuffle=True)\nprint(\"Train lables shape: \", y_train.shape)\n\n# generate embedding matrix\n# embedding_matrix = generate_embedding_matrix(word_embedding_keyed_vectors_path, word_embedding_matrix_path, vocab_size,\n# EMBEDDING_SIZE, t)\n\n# load embedding matrix\nembedding_matrix = load_word_embedding_matrix(word_embedding_matrix_path)\n\n# print(embedding_matrix[1])\nconfig = Config(\n seq_len=max_length,\n num_classes=4,\n vocab_size=vocab_size,\n embedding_size=EMBEDDING_SIZE,\n dropout_rate=0.8,\n x_train=X_train,\n y_train=y_train,\n x_test=X_test,\n y_test=y_test,\n pretrain_vec=embedding_matrix)\n\nmodel = ensemble_capsule_network.ensemble_capsule_network(config)\n# model = network_test.get_model_from_text_layer(config)\nmodel.fit(x=X_train, y=y_train, validation_data=(X_test, y_test), epochs=100)\n" ]
[ [ "tensorflow.keras.preprocessing.sequence.pad_sequences", "tensorflow.keras.utils.to_categorical", "numpy.array", "pandas.concat", "tensorflow.keras.preprocessing.text.Tokenizer", "sklearn.model_selection.train_test_split", "pandas.read_csv" ] ]
ab3llini/ASLRecognizer
[ "9a98887b13b73bb81bd4d6d8ebbfb13c4ef7e856" ]
[ "src/plots/Plot.py" ]
[ "import seaborn as sb\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as cols\n\n\n\"\"\"Plots a two lines graph.\n@:param x is a list of the values on the x-axis for both lines.\n@:param y1 is a list with the corresponding y-values for one line\n@:param y2 is a list with the corresponding y-values for the second line\n@:param l1 is the label associated to y1\n@:param l2 is the label associated to y2\n@:param title is the title of the graph\"\"\"\ndef line2(x, y1, y2, l1, l2, title, xlab, ylab):\n sb.lineplot(x=x, y=y1, color=\"red\", legend='full', label=l1)\n ax = sb.lineplot(x=x, y=y2, color=\"blue\", legend='full', label=l2)\n ax.set(xlabel=xlab, ylabel=ylab)\n plt.title(title)\n plt.show()\n\n\n\"\"\"Plots a 1-line graph.\n@:param x is a list of the values on the x-axis for both lines.\n@:param y1 is a list with the corresponding y-values\n@:param l1 is the label associated to y1\n@:param title is the title of the graph\"\"\"\ndef line(x, y1, l1, title):\n ax = sb.lineplot(x=x, y=y1, color=\"blue\", legend='full', label=l1)\n ax.set(xlabel=\"#training samples\", ylabel=\"accuracy\")\n plt.title(title)\n plt.show()\n\n\ndef meshgrid(x, y, z, cmap, ticks):\n plt.pcolor(x, y, z, cmap=cmap, alpha=0.2)\n plt.colorbar(ticks=ticks)\n plt.clim(min(ticks), max(ticks))\n\n\ndef scatter(x, y, classes, colors, annotate=False):\n ax = sb.scatterplot(x, y, marker=\"o\", hue=classes, legend=False, palette=colors, edgecolor=\"black\")\n plt.rcParams[\"lines.markeredgewidth\"] = 4\n if annotate:\n for i, txt in enumerate(classes):\n ax.annotate(txt, (x[i], y[i]))\n\n\ndef show():\n plt.show()\n\n\ndef colormap():\n return cols.ListedColormap(['yellow', 'blue', 'red']), [1, 2, 3]\n\n\n\n" ]
[ [ "matplotlib.pyplot.colorbar", "matplotlib.pyplot.pcolor", "matplotlib.pyplot.title", "matplotlib.pyplot.show", "matplotlib.colors.ListedColormap" ] ]
dblalock/sprintz
[ "a056cdb67d049669875ab5487359aca99ae873ea" ]
[ "python/test_scratch.py" ]
[ "#!/usr/bin/env python\n\n# import itertools\nimport numpy as np\n\nfrom .scratch1 import my_old_transform, my_transform_inverse\n\n\ndef test_my_old_transform_nonincreasing_abs():\n x = np.zeros((1, 2), dtype=np.int32)\n\n # idxs = np.arange(256)\n # left_col = np.tile(idxs, 256).reshape((256, 256)).T.reshape((-1))\n # idx_pairs = np.array(itertools.product(idxs, idxs))\n\n # for i in xrange(256):\n for i in range(-128, 128):\n abs_i = np.abs(i)\n x[0, 0] = i\n # for j in xrange(256):\n for j in range(-128, 128):\n orig_abs = max(abs_i, np.abs(j))\n x[0, 1] = j\n\n x_enc = my_old_transform(x)\n new_abs = np.max(np.abs(x_enc))\n\n assert new_abs <= orig_abs\n\n\ndef test_my_transform_inverse():\n print(\"running inverse transform test\")\n x = np.zeros((1, 2), dtype=np.int32)\n min_val = -128\n # min_val = 0\n max_val = 127\n\n for i in range(min_val, max_val + 1):\n x[0, 0] = i\n for j in range(min_val, max_val + 1):\n x[0, 1] = j\n x_enc = my_linear_transform(x)\n x_hat = my_transform_inverse(x_enc)\n\n eq = np.array_equal(x, x_hat)\n if not eq:\n print(\"failing x, x_hat\", x, x_hat)\n assert eq\n\n\ndef encode_decode(a, b):\n # ya, this one is just not a bijection; we lose b's LSB\n\n print(\"------------------------\")\n\n beta = (a >> 1) + (b >> 1)\n alpha = a - beta\n\n ahat = alpha + beta\n tmp = (beta - ahat >> 1)\n bhat = tmp << 1\n print(\"tmp, orig bhat\", tmp, bhat)\n bhat -= (bhat >> 1) > tmp\n\n print(\"a, b: \", a, b)\n print(\"alpha, beta: \", alpha, beta)\n print(\"ahat, bhat: \", ahat, bhat)\n\n assert a == ahat\n assert b == bhat\n\n\ndef encode_decode2(a, b):\n print(\"------------------------\")\n\n beta = b - (a >> 1)\n alpha = a - (beta >> 1)\n # beta = (a >> 1) + (b >> 1)\n # alpha = a - beta\n\n ahat = alpha + (beta >> 1)\n bhat = beta + (a >> 1)\n\n print(\"a, b: \", a, b)\n print(\"alpha, beta: \", alpha, beta)\n print(\"ahat, bhat: \", ahat, bhat)\n\n assert a == ahat\n assert b == bhat\n\n\ndef main():\n # a = 4\n # b = 6\n encode_decode2(4, 6)\n encode_decode2(6, 2)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.abs", "numpy.array_equal", "numpy.zeros" ] ]
danielnflam/GAN-Tests
[ "f112e27b802d717f64a8f2cfa79b9898667da14c" ]
[ "blocks.py" ]
[ "# IMPLEMENT THE RESNET\nimport torch\nfrom torch import Tensor\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn import init\n\nimport torchvision.transforms as vtransforms\nfrom typing import Type, Any, Callable, Union, List, Optional\nimport os, random, sys, time, pathlib\nfrom operator import add\n\ndef Identity(x):\n return x\n\nclass MiniBatchDiscrimination(nn.Module):\n \"\"\"\n source: https://gist.github.com/t-ae/732f78671643de97bbe2c46519972491\n paper: Salimans et al. 2016. Improved Methods for Training GANs\n \"\"\"\n def __init__(self, in_features, out_features, kernel_dims, mean=False):\n super().__init__()\n self.in_features = in_features\n self.out_features = out_features\n self.kernel_dims = kernel_dims\n self.mean = mean\n self.T = nn.Parameter(torch.Tensor(in_features, out_features, kernel_dims), requires_grad=True) # Tensor that is to be considered a module parameter.\n init.normal_(self.T, 0, 1)\n\n def forward(self, x):\n # x is NxA\n # T is AxBxC\n matrices = x.mm(self.T.view(self.in_features, -1))\n matrices = matrices.view(-1, self.out_features, self.kernel_dims)\n\n M = matrices.unsqueeze(0) # 1xNxBxC\n M_T = M.permute(1, 0, 2, 3) # Nx1xBxC\n norm = torch.abs(M - M_T).sum(3) # NxNxB\n expnorm = torch.exp(-norm)\n o_b = (expnorm.sum(0) - 1) # NxB, subtract self distance\n if self.mean:\n o_b /= x.size(0) - 1\n\n x = torch.cat([x, o_b], 1) # concatenate output with the input features\n return x\n\nclass ImageBuffer():\n \"\"\"\n Attain a significant performance improvement by using a saved buffer of previously generated images.\n 1) Take k-generated samples from the mini-batch in the i'th generation.\n 2) Randomly shuffle data in the buffer\n 3) Pop and concatenate the batch with the buffer sample\n \n \"\"\"\n def __init__(self, pool_size, buffer_out_rate=0.5):\n \"\"\"\n Inputs:\n pool_size, buffer_out_rate=0.5\n \"\"\"\n self.poolSize = pool_size\n if self.poolSize > 0: # create an empty pool\n self.numImagesInPoolCurrent = 0\n self.buffer_out_rate = buffer_out_rate\n self.images = []\n def query(self, images):\n \"\"\"\n Outputs an image from the image buffer by chance (self.buffer_out_rate).\n Inputs:\n images: a [N x C x H x W] torch tensor.\n \"\"\"\n \n # If image pool is size 0, identity function\n if self.poolSize == 0:\n return images\n \n # Else: build a list of tensors for the buffer\n return_images = []\n for image in images:\n # image.shape has [C x H x W]\n image = torch.unsqueeze(image.data, 0) # image.data is assumed to have dimensions [C x H x W]\n if self.numImagesInPoolCurrent < self.poolSize: # if the buffer is not full; keep inserting current images to the buffer\n self.numImagesInPoolCurrent += 1\n self.images.append(image)\n return_images.append(image)\n else:\n p = random.uniform(0, 1)\n if p < self.buffer_out_rate: \n # if buffer_out_rate = 0.8, 80% of the time the input image is swapped for a buffer-held image\n random_id = random.randint(0, self.poolSize - 1) # randint is inclusive\n tmp = self.images[random_id].clone()\n self.images[random_id] = image\n return_images.append(tmp)\n else:\n return_images.append(image)\n return_images = torch.cat(return_images, 0) # collect all the images and return\n return return_images\n \n\n \n\nclass ADL(nn.Module):\n \"\"\"\n From the work done by Choe et al. 2019.\n Inputs for initialisation: \n drop_rate: the proportion at which the drop mask is selected instead of the importance map. Default: 0.75 (i.e. use the drop mask 75% of the time).\n gamma: the ratio of maximum intensity of the self-attention map, at which the threshold is set to determine the importance map/drop mask. Default: 0.9 (should be set depending on network)\n \n Source:\n Choe, J., & Shim, H. (2019). Attention-Based Dropout Layer for Weakly Supervised Object Localization. Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2219–2228.\n Source: https://github.com/clovaai/wsolevaluation\n \"\"\"\n def __init__(self, drop_rate=0.75, gamma=0.9):\n super().__init__()\n self.drop_rate = drop_rate\n self.gamma = gamma\n \n def forward(self, x: Tensor) -> Tensor:\n \"\"\"\n x has units NxCxHxW\n \"\"\"\n # Evaluation mode:\n if not self.training:\n return x\n # Training mode:\n attention_map = torch.mean(x, dim=1, keepdim=True)\n drop_mask = self.calculate_drop_mask(attention_map)\n importance_map = torch.sigmoid(attention_map)\n selected_map = self.select_map(drop_mask, importance_map)\n \n return torch.mul(selected_map, x)\n \n def select_map(self, drop_mask, importance_map) -> Tensor:\n randNumber = torch.rand([], dtype=torch.float32) + self.gamma\n binaryNum = randNumber.floor()\n return (1.-binaryNum)*importance_map + binaryNum*drop_mask\n \n def calculate_drop_mask(self, x: Tensor) -> Tensor:\n batch_size = x.size(0)\n maxAtt, _ = torch.max(x.view(batch_size,-1), dim=1, keepdim=True) # maxAtt calculated for each batch individually.\n threshold = self.gamma * maxAtt\n threshold = threshold.view(batch_size,1,1,1) # reshape into NxCxHxW\n drop_mask = (x < threshold).float()\n return drop_mask\n \n def extra_repr(self):\n \"\"\"Information Function\"\"\"\n return \"ADL Drop Rate={}, ADL Gamma={}\".format(\n self.drop_rate, self.gamma)\n\nclass UpsampleConvolution(nn.Module):\n \"\"\"\n Instead of using ConvTranspose2d, which may lead to weird checkerboard artifacts, use a separate upsample and conv2d 1x1 operation.\n Here is a generic combination of the two.\n \"\"\"\n def __init__(self, in_channels, out_channels, upsample_size=None, upsample_scale_factor=None, upsample_mode=\"nearest\", upsample_align_corners=False,\n kernel_size=3, stride=1, padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros'):\n super().__init__()\n # upsample\n if upsample_size is not None:\n if kernel_size > 1:\n a = upsample_size\n b = (2*(kernel_size//2) , 2*(kernel_size//2))\n self.size=list(map(add, a, b))\n else:\n self.size = None\n self.scale_factor=upsample_scale_factor\n self.mode=upsample_mode\n self.align_corners=upsample_align_corners\n # conv\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.kernel_size = kernel_size\n self.stride = stride\n self.padding = padding\n self.dilation=dilation\n self.groups=groups\n self.bias=bias\n self.padding_mode=padding_mode\n \n # blocks\n if self.mode == \"nearest\":\n #print(\"Upsampling Mode: \" + str(self.mode))\n self.align_corners = None\n \n self.upsample = nn.Upsample(size=self.size, scale_factor = self.scale_factor, mode=self.mode, align_corners=self.align_corners)\n self.conv = nn.Conv2d(self.in_channels, self.out_channels, self.kernel_size, self.stride, self.padding, self.dilation, self.groups, self.bias, self.padding_mode)\n def forward(self, x: Tensor) -> Tensor:\n out = self.upsample(x)\n out = self.conv(out)\n return out\n\n######################################\n# Diakogiannis ResUNet-A\n# Not in use\n######################################\n \nclass PSPPooling_miniBlock(nn.Module):\n \"\"\"\n PSP Pooling miniBlock.\n \n Source of architecture:\n Diakogiannis, F. I., Waldner, F., Caccetta, P., & Wu, C. (2020). ResUNet-a: A deep learning framework for semantic segmentation of remotely sensed data. \n ISPRS Journal of Photogrammetry and Remote Sensing, 162, 94–114. https://doi.org/10.1016/j.isprsjprs.2020.01.013\n \n Source of original paper:\n Zhao, H., Shi, J., Qi, X., Wang, X., & Jia, J. (2017). Pyramid Scene Parsing Network. \n 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 6230–6239. https://doi.org/10.1109/CVPR.2017.660\n \"\"\"\n def __init__(self, _in_channels, _output_size, _kernel_size, _stride, _padding, _dilation, _pyramid_levels):\n super().__init__()\n \n self.in_channels = _in_channels\n self.kernel_size = _kernel_size\n self.stride = _stride\n self.padding = _padding\n self.dilation = _dilation\n self.output_size = _output_size\n \n self.maxPool = nn.MaxPool2d(kernel_size=self.kernel_size, stride=self.stride,padding=self.padding, dilation=self.dilation)\n self.upSample = nn.Upsample(size=self.output_size, mode='bilinear', align_corners=None)\n self.dimensionalReduction = Conv2DN(_in_channels = self.in_channels, _out_channels = self.in_channels//_pyramid_levels, _kernel_size=(1,1), _stride=(1, 1), _padding=(0,0), _dilation_rate=(1,1), _norm_type='BatchNorm')\n \n def forward(self, x: Tensor) -> Tensor:\n out = self.maxPool(x)\n out = self.upSample(out)\n out = self.dimensionalReduction(out)\n return out\n\nclass PSPPooling(nn.Module):\n \"\"\"\n PSP Pooling.\n On forward step:\n INPUT and OUTPUT tensors are the same size.\n \n \n INPUT when initialising class:\n _tensor_array_shape : (N, C, H, W)\n \n Source of architecture:\n Diakogiannis, F. I., Waldner, F., Caccetta, P., & Wu, C. (2020). ResUNet-a: A deep learning framework for semantic segmentation of remotely sensed data. \n ISPRS Journal of Photogrammetry and Remote Sensing, 162, 94–114. https://doi.org/10.1016/j.isprsjprs.2020.01.013\n \n Source of original paper:\n Zhao, H., Shi, J., Qi, X., Wang, X., & Jia, J. (2017). Pyramid Scene Parsing Network. \n 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 6230–6239. https://doi.org/10.1109/CVPR.2017.660\n \n \n \"\"\"\n def __init__(self, _tensor_array_shape):\n super().__init__()\n \n self.in_channels = _tensor_array_shape[1]\n self.output_size = (_tensor_array_shape[2] , _tensor_array_shape[3])\n self.pyramid_levels = 4\n \n self.miniBlock_out_channels = (self.in_channels//self.pyramid_levels)\n self.concat_channels = self.miniBlock_out_channels*self.pyramid_levels\n \n kernel_sizes = ((_tensor_array_shape[2] , _tensor_array_shape[3]) ,\n (_tensor_array_shape[2]//2 , _tensor_array_shape[3]//2),\n (_tensor_array_shape[2]//4 , _tensor_array_shape[3]//4),\n (_tensor_array_shape[2]//8 , _tensor_array_shape[3]//8))\n \n strides=((1,1),\n kernel_sizes[1],\n kernel_sizes[2],\n kernel_sizes[3])\n \n paddings=((0),(0),(0),(0))\n dilations=((1),(1),(1),(1))\n \n self.pooling_1 = PSPPooling_miniBlock(_in_channels=self.in_channels, _output_size=self.output_size, _kernel_size=kernel_sizes[0], _stride=strides[0], _padding=paddings[0], _dilation=dilations[0], _pyramid_levels=self.pyramid_levels)\n self.pooling_2 = PSPPooling_miniBlock(_in_channels=self.in_channels, _output_size=self.output_size, _kernel_size=kernel_sizes[1], _stride=strides[1], _padding=paddings[1], _dilation=dilations[1], _pyramid_levels=self.pyramid_levels)\n self.pooling_3 = PSPPooling_miniBlock(_in_channels=self.in_channels, _output_size=self.output_size, _kernel_size=kernel_sizes[2], _stride=strides[2], _padding=paddings[2], _dilation=dilations[2], _pyramid_levels=self.pyramid_levels)\n self.pooling_4 = PSPPooling_miniBlock(_in_channels=self.in_channels, _output_size=self.output_size, _kernel_size=kernel_sizes[3], _stride=strides[3], _padding=paddings[3], _dilation=dilations[3], _pyramid_levels=self.pyramid_levels)\n \n self.finalConv2DN = Conv2DN(_in_channels = self.concat_channels, _out_channels = self.in_channels, _kernel_size=(1,1), _stride=(1, 1), _padding=(0,0), _dilation_rate=(1,1), _norm_type='BatchNorm')\n \n def forward(self, x: Tensor) -> Tensor:\n \n out1 = self.pooling_1(x)\n out2 = self.pooling_2(x)\n out3 = self.pooling_3(x)\n out4 = self.pooling_4(x)\n \n # concat\n out = torch.cat((out1,out2,out3,out4),dim=1)\n out = self.finalConv2DN(out)\n return out\n \n \nclass ResUNet_A_miniBlock(nn.Module):\n \"\"\"\n This describes a miniblock in Fig 1b) of the paper. The use of atrous convolutions was found by Diakogiannis et al. 'almost doubles the convergence rate'.\n \n Adapted by Daniel NF Lam from MXNet to Pytorch, with reference to:\n https://github.com/feevos/resuneta/blob/master/nn/BBlocks/resnet_blocks.py\n \n Default values follow the paper shown below.\n \n Paper:\n Diakogiannis, F. I., Waldner, F., Caccetta, P., & Wu, C. (2020). ResUNet-a: A deep learning \n framework for semantic segmentation of remotely sensed data. ISPRS Journal of Photogrammetry \n and Remote Sensing, 162, 94–114. https://doi.org/10.1016/j.isprsjprs.2020.01.013\n \n Convolution here uses atrous convolution.\n \"\"\"\n def __init__( self, _in_channels: int, _kernel_size=(3,3) , _dilation_rate=(1,1), _stride=(1,1), _norm_type='BatchNorm', **kwargs):\n super().__init__()\n \n self.in_channels = _in_channels #input & output of res block has to have the same size\n self.out_channels = _in_channels #input & output of res block has to have the same size\n self.kernel_size = _kernel_size\n self.dilation_rate = _dilation_rate\n self.stride = _stride\n if (_norm_type == 'BatchNorm'):\n self.norm = nn.BatchNorm2d\n elif (_norm_type == 'InstanceNorm'):\n self.norm = nn.InstanceNorm2d\n else:\n raise NotImplementedError\n \n \n # PADDING for SAME CONVOLUTIONS (i.e. input in-plane size == output in-plane size)\n p0 = self.dilation_rate[0] * (self.kernel_size[0] - 1)/2 \n p1 = self.dilation_rate[1] * (self.kernel_size[1] - 1)/2 \n p = (int(p0),int(p1))\n \n # DEFINING THE LAYERS TO BE USED IN THIS BLOCK\n # LAYERS are objects, not functions.\n # Made here for ease of use & repeatability, reducing reused code\n self.BN = self.norm(num_features=self.in_channels, affine=True)\n self.conv2d = nn.Conv2d(\n in_channels=self.in_channels,\n out_channels=self.out_channels,\n kernel_size=self.kernel_size,\n stride=self.stride,\n padding=p,\n dilation=self.dilation_rate,\n bias=False)\n self.relu = nn.ReLU(inplace=True)\n\n def forward(self, x: Tensor) -> Tensor:\n # Forward function defines the network structure\n out = self.BN(x)\n out = self.relu(out)\n out = self.conv2d(out)\n\n out = self.BN(out)\n out = self.relu(out)\n out = self.conv2d(out)\n \n return out\n \nclass Conv2DN(nn.Module):\n \"\"\"\n This class describes the CONV2DN blocks used in the listed paper.\n \n Paper:\n Diakogiannis, F. I., Waldner, F., Caccetta, P., & Wu, C. (2020). ResUNet-a: A deep learning \n framework for semantic segmentation of remotely sensed data. ISPRS Journal of Photogrammetry \n and Remote Sensing, 162, 94–114. https://doi.org/10.1016/j.isprsjprs.2020.01.013\n \n \"\"\"\n def __init__( self, _in_channels: int, _out_channels: int, _kernel_size=(1,1), _stride=(1, 1), _padding=(0,0), _dilation_rate=(1,1), _norm_type='BatchNorm', **kwargs ):\n super().__init__()\n self.in_channels = _in_channels\n self.out_channels = _out_channels\n self.kernel_size = _kernel_size\n self.stride = _stride\n self.dilation_rate = _dilation_rate\n self.padding = _padding\n \n if (_norm_type == 'BatchNorm'):\n self.norm = nn.BatchNorm2d\n elif (_norm_type == 'InstanceNorm'):\n self.norm = nn.InstanceNorm2d\n else:\n raise NotImplementedError\n \n self.conv2d = nn.Conv2d(\n in_channels=self.in_channels,\n out_channels=self.out_channels,\n kernel_size=self.kernel_size,\n stride=self.stride,\n padding=self.padding,\n dilation=self.dilation_rate,\n bias=False)\n \n self.BN = self.norm(num_features=self.out_channels, affine=True)\n \n def forward(self,x: Tensor) -> Tensor:\n out = self.conv2d(x)\n out = self.BN(out)\n return out\n \nclass ResUNet_A_Block_4(nn.Module):\n \"\"\"\n A multi-scale residual block that uses atrous convolutions at different dilation rates to examine different scales.\n \n Based on Diakogiannis et al.'s work on ResUNet-A in 2019:\n doi: 10.1016/j.isprsjprs.2020.01.013\n \n Which is itself based on Zhang et al.'s 2017 work:\n doi: 10.1109/LGRS.2018.2802944.\n \"\"\"\n def __init__( self, _in_channels: int, _kernel_size , _dilation_rates , _stride=(1,1) , _norm_type='BatchNorm', **kwargs):\n super().__init__()\n self.in_channels = _in_channels #input & output of res block has to have the same size\n self.kernel_size = _kernel_size\n self.dilation_rate_acrossBlocks = _dilation_rates\n self.stride = _stride\n \n if (_norm_type == 'BatchNorm'):\n self.norm = nn.BatchNorm2d\n elif (_norm_type == 'InstanceNorm'):\n self.norm = nn.InstanceNorm2d\n else:\n raise NotImplementedError\n \n d = self.dilation_rate_acrossBlocks[0]\n self.miniBlock1 = ResUNet_A_miniBlock( self.in_channels, _kernel_size=self.kernel_size , _dilation_rate=(d,d), _stride=self.stride, _norm_type=_norm_type, **kwargs)\n d = self.dilation_rate_acrossBlocks[1]\n self.miniBlock2 = ResUNet_A_miniBlock( self.in_channels, _kernel_size=self.kernel_size , _dilation_rate=(d,d), _stride=self.stride, _norm_type=_norm_type, **kwargs)\n d = self.dilation_rate_acrossBlocks[2]\n self.miniBlock3 = ResUNet_A_miniBlock( self.in_channels, _kernel_size=self.kernel_size , _dilation_rate=(d,d), _stride=self.stride, _norm_type=_norm_type, **kwargs)\n d = self.dilation_rate_acrossBlocks[3]\n self.miniBlock4 = ResUNet_A_miniBlock( self.in_channels, _kernel_size=self.kernel_size , _dilation_rate=(d,d), _stride=self.stride, _norm_type=_norm_type, **kwargs)\n \n def forward(self, x: Tensor) -> Tensor:\n \n # identity map\n out = x # residual\n out = out + self.miniBlock1(x)\n out = out + self.miniBlock2(x)\n out = out + self.miniBlock3(x)\n out = out + self.miniBlock4(x)\n return out\n\nclass ResUNet_A_Block_3(nn.Module):\n \"\"\"\n A multi-scale residual block that uses atrous convolutions at different dilation rates to examine different scales.\n \n Based on Diakogiannis et al.'s work on ResUNet-A in 2019:\n doi: 10.1016/j.isprsjprs.2020.01.013\n \n Which is itself based on Zhang et al.'s 2017 work:\n doi: 10.1109/LGRS.2018.2802944.\n \"\"\"\n def __init__( self, _in_channels: int, _kernel_size , _dilation_rates , _stride=(1,1) , _norm_type='BatchNorm', **kwargs):\n super().__init__()\n self.in_channels = _in_channels #input & output of res block has to have the same size\n self.kernel_size = _kernel_size\n self.dilation_rate_acrossBlocks = _dilation_rates\n self.stride = _stride\n \n if (_norm_type == 'BatchNorm'):\n self.norm = nn.BatchNorm2d\n elif (_norm_type == 'InstanceNorm'):\n self.norm = nn.InstanceNorm2d\n else:\n raise NotImplementedError\n \n d = self.dilation_rate_acrossBlocks[0]\n self.miniBlock1 = ResUNet_A_miniBlock( self.in_channels, _kernel_size=self.kernel_size , _dilation_rate=(d,d), _stride=self.stride, _norm_type=_norm_type, **kwargs)\n d = self.dilation_rate_acrossBlocks[1]\n self.miniBlock2 = ResUNet_A_miniBlock( self.in_channels, _kernel_size=self.kernel_size , _dilation_rate=(d,d), _stride=self.stride, _norm_type=_norm_type, **kwargs)\n d = self.dilation_rate_acrossBlocks[2]\n self.miniBlock3 = ResUNet_A_miniBlock( self.in_channels, _kernel_size=self.kernel_size , _dilation_rate=(d,d), _stride=self.stride, _norm_type=_norm_type, **kwargs)\n \n def forward(self, x: Tensor) -> Tensor:\n \n # identity map\n out = x\n out = out + self.miniBlock1(x)\n out = out + self.miniBlock2(x)\n out = out + self.miniBlock3(x)\n return out\nclass ResUNet_A_Block_1(nn.Module):\n \"\"\"\n A multi-scale residual block that uses atrous convolutions at different dilation rates to examine different scales.\n \n Based on Diakogiannis et al.'s work on ResUNet-A in 2019:\n doi: 10.1016/j.isprsjprs.2020.01.013\n \n Which is itself based on Zhang et al.'s 2017 work:\n doi: 10.1109/LGRS.2018.2802944.\n \"\"\"\n def __init__( self, _in_channels: int, _kernel_size , _dilation_rates=(1) , _stride=(1,1) , _norm_type='BatchNorm', **kwargs):\n super().__init__()\n self.in_channels = _in_channels #input & output of res block has to have the same size\n self.kernel_size = _kernel_size\n self.dilation_rate_acrossBlocks = _dilation_rates\n self.stride = _stride\n \n \"\"\"if (_norm_type == 'BatchNorm'):\n self.norm = nn.BatchNorm2d\n elif (_norm_type == 'InstanceNorm'):\n self.norm = nn.InstanceNorm2d\n else:\n raise NotImplementedError\"\"\"\n \n d = self.dilation_rate_acrossBlocks[0]\n self.miniBlock1 = ResUNet_A_miniBlock( self.in_channels, _kernel_size=self.kernel_size , _dilation_rate=(d,d), _stride=self.stride, _norm_type=_norm_type, **kwargs)\n \n def forward(self, x: Tensor) -> Tensor:\n \n # identity map\n out = x\n out = out + self.miniBlock1(x)\n return out\n \n\nclass DownSample(nn.Module):\n \"\"\"\n Convolutional NN to reduce the in-plane dimensions by a factor of 2 each, and increase channels by a factor of 2.\n Default values follow the paper set on Diakogiannis et al.\n From Diakogiannis et al.\n doi: 10.1016/j.isprsjprs.2020.01.013\n \"\"\"\n def __init__(self, _in_channels, _factor=2, _kernel_size=(1,1), _stride=(2, 2), _padding=(0,0), _dilation_rate=(1,1), **kwargs ):\n super().__init__()\n \n self.in_channels = _in_channels;\n self.factor = _factor\n self.out_channels = _in_channels*_factor\n self.kernel_size = _kernel_size\n self.stride = _stride\n self.dilation_rate = _dilation_rate\n self.padding = _padding\n \n self.conv_layer = nn.Conv2d(\n in_channels=self.in_channels,\n out_channels=self.out_channels,\n kernel_size=self.kernel_size,\n stride=self.stride,\n padding=self.padding,\n dilation=self.dilation_rate,\n bias=False)\n def forward(self, x: Tensor) -> Tensor:\n out = self.conv_layer(x)\n return out\n\n \nclass UpSampleAndHalveChannels(nn.Module):\n \"\"\"\n Doubles the spatial dimensions (H,W) but halves the number of channels.\n Inverse of the DownSample function in blocks.py\n \n From Diakogiannis et al.\n doi: 10.1016/j.isprsjprs.2020.01.013\n \"\"\"\n def __init__(self, _in_channels, _factor=2):\n super().__init__()\n \n self.in_channels = _in_channels\n self.factor = _factor\n \n self.upSample = nn.Upsample(scale_factor=self.factor, mode='bilinear', align_corners=None)\n \n self.halveChannels = nn.Conv2d(in_channels=self.in_channels,\n out_channels=self.in_channels//self.factor,\n kernel_size=(1,1),\n stride=1,\n padding=0,\n dilation=1,\n bias=False)\n def forward(self, x: Tensor) -> Tensor:\n out = self.upSample(x)\n out = self.halveChannels(out)\n return out\n \nclass Combine(nn.Module):\n \"\"\"\n \n \"\"\"\n def __init__(self, _in_channels):\n super().__init__()\n \n self.in_channels_per_tensor = _in_channels\n self.relu = nn.ReLU()\n self.conv2dn = Conv2DN( _in_channels = self.in_channels_per_tensor*2, _out_channels=self.in_channels_per_tensor, _kernel_size=(1,1), _stride=(1, 1), _padding=(0,0), _dilation_rate=(1,1), _norm_type='BatchNorm')\n \n def forward(self, decoder_tensor: Tensor, skip_tensor: Tensor) -> Tensor:\n # Upsample\n out = self.relu(decoder_tensor)\n out = torch.cat((out, skip_tensor), axis=1)\n out = self.conv2dn(out)\n return out\n \n \nclass Encoder_ResUNet_A_d7(nn.Module):\n def __init__(self, _input_channels, _input_array_shape, _norm_type='BatchNorm', _ADL_drop_rate=0.75, _ADL_gamma=0.9):\n super().__init__();\n \n self.initial_channels = _input_channels # Initial number of filters out from conv_first_normed\n self.initial_spatial = (_input_array_shape[2], _input_array_shape[3]) # (H,W)\n \"\"\"\n ResUNet Encoder Section from Diakogiannis et al.\n doi: 10.1016/j.isprsjprs.2020.01.013\n \"\"\"\n \n \n _out_channels_1 = self.initial_channels*2**(0)\n self.conv_first_normed = Conv2DN(_input_array_shape[1], _out_channels_1,\n _kernel_size=(1,1),\n _norm_type = _norm_type)\n \n self.EncResBlk1 = ResUNet_A_Block_4( _in_channels=_out_channels_1, _kernel_size=(3,3), _dilation_rates=[1,3,15,31], _norm_type=_norm_type)\n self.ADL1 = ADL(drop_rate=_ADL_drop_rate, gamma=_ADL_gamma)\n self.DnSmpl = DownSample(_in_channels=_out_channels_1, _kernel_size=(1,1) , stride=(2,2), padding=(0,0),_norm_type=_norm_type)\n spatial = tuple(map(lambda num: num *(0.5**1), self.initial_spatial))\n _out_channels_2 = self.initial_channels*2**(1)\n \n self.EncResBlk2 = ResUNet_A_Block_4(_in_channels=_out_channels_2, _kernel_size=(3,3), _dilation_rates=[1,3,15,31], _norm_type=_norm_type)\n self.ADL2 = ADL(drop_rate=_ADL_drop_rate, gamma=_ADL_gamma)\n self.DnSmp2 = DownSample(_in_channels=_out_channels_2, _kernel_size=(1,1) , stride=(2,2), padding=(0,0),_norm_type=_norm_type)\n spatial = tuple(map(lambda num: num *(0.5**2), self.initial_spatial))\n _out_channels_3 = self.initial_channels*2**(2)\n \n self.EncResBlk3 = ResUNet_A_Block_3( _in_channels=_out_channels_3, _kernel_size=(3,3), _dilation_rates=[1,3,15], _norm_type=_norm_type)\n self.ADL3 = ADL(drop_rate=_ADL_drop_rate, gamma=_ADL_gamma)\n self.DnSmp3 = DownSample(_in_channels=_out_channels_3, _kernel_size=(1,1) , stride=(2,2), padding=(0,0),_norm_type=_norm_type)\n spatial = tuple(map(lambda num: num *(0.5**3), self.initial_spatial))\n _out_channels_4 = self.initial_channels*2**(3)\n \n self.EncResBlk4 = ResUNet_A_Block_3( _in_channels=_out_channels_4, _kernel_size=(3,3), _dilation_rates=[1,3,15], _norm_type=_norm_type)\n self.ADL4 = ADL(drop_rate=_ADL_drop_rate, gamma=_ADL_gamma)\n self.DnSmp4 = DownSample(_in_channels=_out_channels_4, _kernel_size=(1,1) , stride=(2,2), padding=(0,0),_norm_type=_norm_type)\n spatial = tuple(map(lambda num: num *(0.5**4), self.initial_spatial))\n _out_channels_5 = self.initial_channels*2**(4)\n \n self.EncResBlk5 = ResUNet_A_Block_1( _in_channels=_out_channels_5, _kernel_size=(3,3), _dilation_rates=[1], _norm_type=_norm_type)\n self.ADL5 = ADL(drop_rate=_ADL_drop_rate, gamma=_ADL_gamma)\n self.DnSmp5 = DownSample(_in_channels=_out_channels_5, _kernel_size=(1,1) , stride=(2,2), padding=(0,0),_norm_type=_norm_type)\n spatial = tuple(map(lambda num: num *(0.5**5), self.initial_spatial))\n _out_channels_6 = self.initial_channels*2**(5)\n \n self.EncResBlk6 = ResUNet_A_Block_1( _in_channels=_out_channels_6, _kernel_size=(3,3), _dilation_rates=[1], _norm_type=_norm_type)\n self.ADL6 = ADL(drop_rate=_ADL_drop_rate, gamma=_ADL_gamma)\n self.DnSmp6 = DownSample(_in_channels=_out_channels_6, _kernel_size=(1,1) , stride=(2,2), padding=(0,0),_norm_type=_norm_type)\n spatial = tuple(map(lambda num: num *(0.5**6), self.initial_spatial))\n _out_channels_7 = self.initial_channels*2**(6)\n \n self.EncResBlk7 = ResUNet_A_Block_1( _in_channels=_out_channels_7, _kernel_size=(3,3), _dilation_rates=[1], _norm_type=_norm_type)\n self.ADL7 = ADL(drop_rate=_ADL_drop_rate, gamma=_ADL_gamma)\n \n self.output_array_size = tuple(map(lambda x: int(x), (0, _out_channels_7, spatial[0], spatial[1])))\n \n def forward(self, x: Tensor) -> Tensor:\n \"\"\"\n Encoder Section\n \"\"\"\n out = self.conv_first_normed(x)\n out = self.EncResBlk1(out)\n out = self.ADL1(out)\n out = self.DnSmpl(out)\n out = self.EncResBlk2(out)\n out = self.ADL2(out)\n out = self.DnSmp2(out)\n out = self.EncResBlk3(out)\n out = self.ADL3(out)\n out = self.DnSmp3(out)\n out = self.EncResBlk4(out)\n out = self.ADL4(out)\n out = self.DnSmp4(out)\n out = self.EncResBlk5(out)\n out = self.ADL5(out)\n out = self.DnSmp5(out)\n out = self.EncResBlk6(out)\n out = self.ADL6(out)\n out = self.DnSmp6(out)\n out = self.EncResBlk7(out)\n out = self.ADL7(out)\n return out\n \n\nclass MultiScale_Classifier(nn.Module):\n \"\"\"\n Consists of 2 parts:\n \n Multi-Scale ResUNet encoder from Diakogiannis et al. until the bridge part.\n doi: 10.1016/j.isprsjprs.2020.01.013\n \n Classifier section from Wang et al. COVID classification from CT.\n doi: 10.1183/13993003.00775-2020\n \"\"\"\n def __init__(self, _input_channels, _input_array_shape, _classifier_out_channels=64, _norm_type='BatchNorm', _ADL_drop_rate=0.75, _ADL_gamma=0.9):\n super().__init__();\n \n self.initial_channels = _input_channels # Initial number of filters output from the encoder's first layer\n self.input_array_shape = _input_array_shape\n self.classifier_out_channels = _classifier_out_channels\n self.norm_type = _norm_type\n self.ADL_drop_rate = _ADL_drop_rate\n self.ADL_gamma = _ADL_gamma\n \n \"\"\"\n ResUNet Encoder Section from Diakogiannis et al.\n doi: 10.1016/j.isprsjprs.2020.01.013\n \"\"\"\n \n self.enc = Encoder_ResUNet_A_d7(_input_channels=self.initial_channels, _input_array_shape=self.input_array_shape,\n _norm_type=self.norm_type, \n _ADL_drop_rate=self.ADL_drop_rate, _ADL_gamma=self.ADL_gamma)\n _out_channels = self.enc.output_array_size[1]\n \"\"\"\n Classifier Section from Wang et al. 2020. A fully automatic deep learning system for COVID-19 diagnostic and prognostic analysis.\n DOI: 10.1183/13993003.00775-2020\n \"\"\"\n # Max Pool\n self.MaxPool = nn.MaxPool2d(kernel_size=(2,2), stride=(2,2), padding=0, dilation=1, return_indices=False, ceil_mode=False)\n self.batchnorm = nn.BatchNorm2d(num_features=_out_channels, affine=False)\n self.relu = nn.ReLU()\n self.conv2dense = nn.Conv2d(in_channels=_out_channels, out_channels=_classifier_out_channels, kernel_size=(1,1),stride=(1,1),padding=0, dilation=1, bias=False)\n self.GlobalAvgPool2d = nn.AdaptiveAvgPool2d((1,1))\n self.flatten = nn.Flatten()\n self.dense_classifier = nn.Linear(in_features=_classifier_out_channels, out_features=1, bias=True)\n #self.output_activation = nn.Sigmoid()\n def forward(self, x: Tensor) -> int:\n \"\"\"\n The input tensor x (Torch Tensor) contains both the real/fake image and the conditioning image, concatenated in the channel axis\n \"\"\"\n out = self.enc(x)\n \n \"\"\"\n Classifier Section\n \"\"\" \n out = self.MaxPool(out)\n out = self.batchnorm(out)\n out = self.relu(out)\n out = self.conv2dense(out)\n out = self.GlobalAvgPool2d(out)\n out = self.flatten(out)\n out = self.dense_classifier(out)\n #out = self.output_activation(out)\n return out\n\nclass Generator_ResUNet_A(nn.Module):\n def __init__(self, _input_channels, _input_array_shape, _norm_type='BatchNorm', _ADL_drop_rate=0.75, _ADL_gamma=0.9):\n super().__init__()\n \n # ENCODER\n self.enc = Encoder_ResUNet_A_d7(_input_channels, _input_array_shape, _norm_type=_norm_type, _ADL_drop_rate=_ADL_drop_rate, _ADL_gamma=_ADL_gamma)\n _tensor_array_shape = self.enc.output_array_size\n initial_spatial = (_tensor_array_shape[2], _tensor_array_shape[3])\n \n # BRIDGE\n self.bridge = PSPPooling(_tensor_array_shape)\n \n #DECODER\n self.upsh1 = UpSampleAndHalveChannels(_in_channels=_tensor_array_shape[1], _factor=2)\n out_channels_8 = int(_tensor_array_shape[1]*0.5**(1))\n spatial = tuple(map(lambda num: num *(2**1), initial_spatial))\n self.comb1 = Combine(_in_channels=out_channels_8)\n self.DecResBlk1 = ResUNet_A_Block_1(_in_channels=out_channels_8, _kernel_size=(3,3), _dilation_rates=[1], _norm_type=_norm_type)\n \n self.upsh2 = UpSampleAndHalveChannels(_in_channels=out_channels_8, _factor=2)\n out_channels_9 = int(_tensor_array_shape[1]*0.5**(2))\n spatial = tuple(map(lambda num: num *(2**2), initial_spatial))\n self.comb2 = Combine(_in_channels=out_channels_9)\n self.DecResBlk2 = ResUNet_A_Block_1(_in_channels=out_channels_9, _kernel_size=(3,3), _dilation_rates=[1], _norm_type=_norm_type)\n \n self.upsh3 = UpSampleAndHalveChannels(_in_channels=out_channels_9, _factor=2)\n out_channels_10 = int(_tensor_array_shape[1]*0.5**(3))\n spatial = tuple(map(lambda num: num *(2**3), initial_spatial))\n self.comb3 = Combine(_in_channels=out_channels_10)\n self.DecResBlk3 = ResUNet_A_Block_1(_in_channels=out_channels_10, _kernel_size=(3,3), _dilation_rates=[1, 3, 15], _norm_type=_norm_type)\n \n self.upsh4 = UpSampleAndHalveChannels(_in_channels=out_channels_10, _factor=2)\n out_channels_11 = int(_tensor_array_shape[1]*0.5**(4))\n spatial = tuple(map(lambda num: num *(2**4), initial_spatial))\n self.comb4 = Combine(_in_channels=out_channels_11)\n self.DecResBlk4 = ResUNet_A_Block_1(_in_channels=out_channels_11, _kernel_size=(3,3), _dilation_rates=[1, 3, 15], _norm_type=_norm_type)\n \n self.upsh5 = UpSampleAndHalveChannels(_in_channels=out_channels_11, _factor=2)\n out_channels_12 = int(_tensor_array_shape[1]*0.5**(5))\n spatial = tuple(map(lambda num: num *(2**5), initial_spatial))\n self.comb5 = Combine(_in_channels=out_channels_12)\n self.DecResBlk5 = ResUNet_A_Block_1(_in_channels=out_channels_12, _kernel_size=(3,3), _dilation_rates=[1, 3, 15, 31], _norm_type=_norm_type)\n \n self.upsh6 = UpSampleAndHalveChannels(_in_channels=out_channels_12, _factor=2)\n out_channels_13 = int(_tensor_array_shape[1]*0.5**(6))\n spatial = tuple(map(lambda num: num *(2**6), initial_spatial))\n self.comb6 = Combine(_in_channels=out_channels_13)\n self.DecResBlk6 = ResUNet_A_Block_1(_in_channels=out_channels_13, _kernel_size=(3,3), _dilation_rates=[1, 3, 15, 31], _norm_type=_norm_type)\n \n self.comb_last = Combine(_in_channels=out_channels_13)\n \n self.PSPpool_last = PSPPooling((0, out_channels_13, int(spatial[0]), int(spatial[1]) ) )\n \n self.conv2d_final = nn.Conv2d(in_channels=out_channels_13, out_channels=1, kernel_size=(1,1),stride=(1,1),padding=0, dilation=1, bias=False)\n self.output_image = nn.Tanh()\n def forward(self, x: Tensor) -> Tensor:\n \"\"\"\n Encoder Section\n \"\"\"\n out1 = self.enc.conv_first_normed(x)\n out2 = self.enc.EncResBlk1(out1)\n out = self.enc.ADL1(out2)\n out = self.enc.DnSmpl(out)\n out4 = self.enc.EncResBlk2(out)\n out = self.enc.ADL2(out4)\n out = self.enc.DnSmp2(out)\n out6 = self.enc.EncResBlk3(out)\n out = self.enc.ADL3(out6)\n out = self.enc.DnSmp3(out)\n out8 = self.enc.EncResBlk4(out)\n out = self.enc.ADL4(out8)\n out = self.enc.DnSmp4(out)\n out10 = self.enc.EncResBlk5(out)\n out = self.enc.ADL5(out10)\n out = self.enc.DnSmp5(out)\n out12 = self.enc.EncResBlk6(out)\n out = self.enc.ADL6(out12)\n out = self.enc.DnSmp6(out)\n out = self.enc.EncResBlk7(out)\n out = self.enc.ADL7(out)\n \n out = self.bridge(out)\n \n \n out = self.upsh1(out)\n out = self.comb1(out, out12)\n out = self.DecResBlk1(out)\n \n out = self.upsh2(out)\n out = self.comb2(out, out10)\n out = self.DecResBlk2(out)\n \n out = self.upsh3(out)\n out = self.comb3(out, out8)\n out = self.DecResBlk3(out)\n \n out = self.upsh4(out)\n out = self.comb4(out, out6)\n out = self.DecResBlk4(out)\n \n out = self.upsh5(out)\n out = self.comb5(out, out4)\n out = self.DecResBlk5(out)\n \n out = self.upsh6(out)\n out = self.comb6(out, out2)\n out = self.DecResBlk6(out)\n \n out = self.comb_last(out, out1)\n out = self.PSPpool_last(out)\n out = self.conv2d_final(out)\n out = self.output_image(out)\n return out\n\n####################################\n# Deep Residual U-Net\n# Zhang, Z., Liu, Q., & Wang, Y. (2018). Road Extraction by Deep Residual U-Net. IEEE Geoscience and Remote Sensing Letters, 15(5), 749–753. https://doi.org/10.1109/LGRS.2018.2802944\n####################################\nclass ResUNet_block(nn.Module):\n def __init__(self, _in_channels, _out_channels, _kernel_size, _stride, _padding, _reluType, _normType=\"BatchNorm\"):\n super().__init__()\n self.in_channels = _in_channels\n self.out_channels = _out_channels\n self.kernel_size=_kernel_size\n self.stride=_stride\n self.padding = _padding\n self.reluType = _reluType\n self.normType = _normType\n \n # BN\n if self.normType==\"BatchNorm\":\n self.norm = nn.BatchNorm2d(num_features=self.in_channels, affine=False)\n if self.normType == \"InstanceNorm\":\n self.norm = nn.InstanceNorm2d(num_features=self.in_channels, affine=False)\n \n # ReLU\n if self.reluType == \"normal\":\n self.relu = nn.ReLU()\n if self.reluType == \"leaky\":\n self.relu = nn.LeakyReLU(negative_slope=0.2, inplace=False)\n # Conv2d\n self.conv2d = nn.Conv2d(in_channels=self.in_channels, out_channels=self.out_channels,\n kernel_size=self.kernel_size, stride=self.stride, padding=self.padding, \n dilation=1, bias=True)\n def forward(self, x: Tensor) -> Tensor:\n out = self.norm(x)\n out = self.relu(out)\n out = self.conv2d(out)\n return out\n \nclass ResUNet_shortcut(nn.Module):\n def __init__(self, _input_tensor_channels, _output_channels, _stride, _normType=\"BatchNorm\"):\n super().__init__()\n self.shortcut_conv = nn.Conv2d(in_channels=_input_tensor_channels, out_channels=_output_channels,\n kernel_size=1, stride=_stride, padding=0, \n dilation=1, bias=False)\n if self.normType==\"BatchNorm\":\n self.shortcut_norm = nn.BatchNorm2d(num_features=_output_channels, affine=False)\n if self.normType == \"InstanceNorm\":\n self.shortcut_norm = nn.InstanceNorm2d(num_features=_output_channels, affine=False)\n \n def forward(self, x: Tensor) -> Tensor:\n out = self.shortcut_conv(x)\n out = self.shortcut_norm(out)\n return out\n\nclass Generator_ResUNet(nn.Module):\n \"\"\"\n Zhang, Z., Liu, Q., & Wang, Y. (2018). Road Extraction by Deep Residual U-Net. IEEE Geoscience and Remote Sensing Letters, 15(5), 749–753. https://doi.org/10.1109/LGRS.2018.2802944\n \"\"\"\n def __init__(self, input_array_shape, _first_out_channels=64, _reluType=\"leaky\", _normType=\"BatchNorm\"):\n super().__init__()\n self.first_out_channels = _first_out_channels\n self.input_array_shape = input_array_shape\n self.reluType = _reluType\n self.normType = _normType\n \n # Encoder\n self.conv1 = nn.Conv2d(in_channels=input_array_shape[1], out_channels=self.first_out_channels,\n kernel_size=(3,3), stride=(1,1), padding=(1,1),\n dilation=1, bias=False)\n self.convblock12 = ResUNet_block(_in_channels=self.first_out_channels*(2**0),\n _out_channels=self.first_out_channels*(2**0),\n _kernel_size=(3,3), _stride=(1,1), _padding=(1,1),\n _reluType=self.reluType, _normType=self.normType)\n \n self.convblock21 = ResUNet_block(_in_channels=self.first_out_channels*(2**0),\n _out_channels=self.first_out_channels*(2**1),\n _kernel_size=(3,3), _stride=(2,2), _padding=(1,1),\n _reluType=self.reluType, _normType=self.normType)\n self.convblock22 = ResUNet_block(_in_channels=self.first_out_channels*(2**1),\n _out_channels=self.first_out_channels*(2**1),\n _kernel_size=(3,3), _stride=(1,1), _padding=(1,1),\n _reluType=self.reluType, _normType=self.normType)\n self.shortcut2 = ResUNet_shortcut(_input_tensor_channels=self.first_out_channels*(2**0),\n _output_channels=self.first_out_channels*(2**1), _stride=2, _normType=self.normType)\n \n self.convblock31 = ResUNet_block(_in_channels=self.first_out_channels*(2**1),\n _out_channels=self.first_out_channels*(2**2),\n _kernel_size=(3,3), _stride=(2,2), _padding=(1,1),\n _reluType=self.reluType, _normType=self.normType)\n self.convblock32 = ResUNet_block(_in_channels=self.first_out_channels*(2**2),\n _out_channels=self.first_out_channels*(2**2),\n _kernel_size=(3,3), _stride=(1,1), _padding=(1,1),\n _reluType=self.reluType, _normType=self.normType)\n self.shortcut3 = ResUNet_shortcut(_input_tensor_channels=self.first_out_channels*(2**1),\n _output_channels=self.first_out_channels*(2**2), _stride=2, _normType=self.normType)\n # Bridge\n self.convblockB1 = ResUNet_block(_in_channels=self.first_out_channels*(2**2),\n _out_channels=self.first_out_channels*(2**3),\n _kernel_size=(3,3), _stride=(2,2), _padding=(1,1),\n _reluType=self.reluType, _normType=self.normType)\n self.convblockB2 = ResUNet_block(_in_channels=self.first_out_channels*(2**3),\n _out_channels=self.first_out_channels*(2**3),\n _kernel_size=(3,3), _stride=(1,1), _padding=(1,1),\n _reluType=self.reluType, _normType=self.normType)\n self.shortcutB = ResUNet_shortcut(_input_tensor_channels=self.first_out_channels*(2**2),\n _output_channels=self.first_out_channels*(2**3), _stride=2, _normType=self.normType)\n \n # Decoder\n self.upSample = nn.Upsample(scale_factor=2, mode='nearest', align_corners=None)\n self.convblock51 = ResUNet_block(_in_channels=self.first_out_channels*(2**3)+self.first_out_channels*(2**2),\n _out_channels=self.first_out_channels*(2**2),\n _kernel_size=(3,3), _stride=(1,1), _padding=(1,1),\n _reluType=self.reluType, _normType=self.normType)\n self.convblock52 = ResUNet_block(_in_channels=self.first_out_channels*(2**2),\n _out_channels=self.first_out_channels*(2**2),\n _kernel_size=(3,3), _stride=(1,1), _padding=(1,1),\n _reluType=self.reluType, _normType=self.normType)\n self.shortcut5 = ResUNet_shortcut(_input_tensor_channels=self.first_out_channels*(2**3)+self.first_out_channels*(2**2),\n _output_channels=self.first_out_channels*(2**2), _stride=1, _normType=self.normType)\n \n self.convblock61 = ResUNet_block(_in_channels=self.first_out_channels*(2**2)+self.first_out_channels*(2**1),\n _out_channels=self.first_out_channels*(2**1),\n _kernel_size=(3,3), _stride=(1,1), _padding=(1,1),\n _reluType=self.reluType, _normType=self.normType)\n self.convblock62 = ResUNet_block(_in_channels=self.first_out_channels*(2**1),\n _out_channels=self.first_out_channels*(2**1),\n _kernel_size=(3,3), _stride=(1,1), _padding=(1,1),\n _reluType=self.reluType, _normType=self.normType)\n self.shortcut6 = ResUNet_shortcut(_input_tensor_channels=self.first_out_channels*(2**2)+self.first_out_channels*(2**1),\n _output_channels=self.first_out_channels*(2**1), _stride=1, _normType=self.normType)\n self.convblock71 = ResUNet_block(_in_channels=self.first_out_channels*(2**1)+self.first_out_channels*(2**0),\n _out_channels=self.first_out_channels*(2**0),\n _kernel_size=(3,3), _stride=(1,1), _padding=(1,1),\n _reluType=self.reluType, _normType=self.normType)\n self.convblock72 = ResUNet_block(_in_channels=self.first_out_channels*(2**0),\n _out_channels=self.first_out_channels*(2**0),\n _kernel_size=(3,3), _stride=(1,1), _padding=(1,1),\n _reluType=self.reluType, _normType=self.normType)\n self.shortcut7 = ResUNet_shortcut(_input_tensor_channels=self.first_out_channels*(2**1)+self.first_out_channels*(2**0),\n _output_channels=self.first_out_channels*(2**0), _stride=1, _normType=self.normType)\n self.output_conv = nn.Conv2d(in_channels=self.first_out_channels*(2**0), out_channels=input_array_shape[1],\n kernel_size=(1,1), stride=(1,1), padding=(0,0), \n dilation=1, bias=False)\n self.output_activation = nn.Sigmoid()\n \n \n def forward(self, x: Tensor) -> Tensor:\n # Encoder\n out = self.conv1(x)\n out = self.convblock12(out)\n out1 = out + x\n \n out = self.convblock21(out1)\n out = self.convblock22(out)\n shortcut = self.shortcut2(out1)\n out2 = out + shortcut\n \n out = self.convblock31(out2)\n out = self.convblock32(out)\n shortcut = self.shortcut3(out2)\n out3 = out + shortcut\n \n # Bridge\n out = self.convblockB1(out3)\n out = self.convblockB2(out)\n shortcut = self.shortcutB(out3)\n out = out + shortcut\n \n # Decoder\n out = self.upSample(out)\n out5 = torch.cat((out, out3), axis=1)\n out = self.convblock51(out5)\n out = self.convblock52(out)\n shortcut = self.shortcut5(out5)\n out = out + shortcut\n \n out = self.upSample(out)\n out6 = torch.cat((out, out2), axis=1)\n out = self.convblock61(out6)\n out = self.convblock62(out)\n shortcut = self.shortcut6(out6)\n out = out + shortcut\n \n out = self.upSample(out)\n out7 = torch.cat((out, out1), axis=1)\n out = self.convblock71(out7)\n out = self.convblock72(out)\n shortcut = self.shortcut7(out7)\n out = out + shortcut\n \n out = self.output_conv(out)\n out = self.output_activation(out)\n return out\n\n\n###############################\n# Special Blocks\n# i.e. Custom Nets\n###############################\n\nclass Generator_ResUNet_modified(nn.Module):\n \"\"\"\n Use the ResUNet as a starting point, then add attention modules, etc.\n Added:\n 1) 2021-04-13: dropout in decoder, like Pix2Pix, after the first 3 conv layers -- use '_dropoutType=\"normal\"' to activate.\n 2) 2021-04-13: attention-dropout layer implemented -- switch _dropoutType to \"ADL\" to activate.\n \"\"\"\n def __init__(self, input_array_shape, _first_out_channels=64, _reluType=\"leaky\", _normType = \"BatchNorm\", _dropoutType=\"ADL\", _drop_rate=0.5, _output_activation=\"Sigmoid\"):\n super().__init__()\n self.first_out_channels = _first_out_channels\n self.input_array_shape = input_array_shape\n self.reluType = _reluType\n self.dropoutType = _dropoutType\n self.outputActivationType = _output_activation\n self.normType = _normType\n # Dropouts\n if self.dropoutType == \"ADL\":\n self.dropout1 = ADL(drop_rate=_drop_rate, gamma=0.9)\n self.dropout2 = ADL(drop_rate=_drop_rate, gamma=0.9)\n self.dropout3 = ADL(drop_rate=_drop_rate, gamma=0.9)\n if self.dropoutType == \"normal\":\n self.dropout1 = nn.Dropout(p=_drop_rate)\n self.dropout2 = nn.Dropout(p=_drop_rate)\n self.dropout3 = nn.Dropout(p=_drop_rate)\n \n # Encoder\n self.conv1 = nn.Conv2d(in_channels=input_array_shape[1], out_channels=self.first_out_channels,\n kernel_size=(3,3), stride=(1,1), padding=(1,1),\n dilation=1, bias=False)\n self.convblock12 = ResUNet_block(_in_channels=self.first_out_channels*(2**0),\n _out_channels=self.first_out_channels*(2**0),\n _kernel_size=(3,3), _stride=(1,1), _padding=(1,1),\n _reluType=self.reluType, _normType=self.normType)\n \n self.convblock21 = ResUNet_block(_in_channels=self.first_out_channels*(2**0),\n _out_channels=self.first_out_channels*(2**1),\n _kernel_size=(3,3), _stride=(2,2), _padding=(1,1),\n _reluType=self.reluType, _normType=self.normType)\n self.convblock22 = ResUNet_block(_in_channels=self.first_out_channels*(2**1),\n _out_channels=self.first_out_channels*(2**1),\n _kernel_size=(3,3), _stride=(1,1), _padding=(1,1),\n _reluType=self.reluType, _normType=self.normType)\n self.shortcut2 = ResUNet_shortcut(_input_tensor_channels=self.first_out_channels*(2**0),\n _output_channels=self.first_out_channels*(2**1), _stride=2, _normType=self.normType)\n \n self.convblock31 = ResUNet_block(_in_channels=self.first_out_channels*(2**1),\n _out_channels=self.first_out_channels*(2**2),\n _kernel_size=(3,3), _stride=(2,2), _padding=(1,1),\n _reluType=self.reluType, _normType=self.normType)\n self.convblock32 = ResUNet_block(_in_channels=self.first_out_channels*(2**2),\n _out_channels=self.first_out_channels*(2**2),\n _kernel_size=(3,3), _stride=(1,1), _padding=(1,1),\n _reluType=self.reluType, _normType=self.normType)\n self.shortcut3 = ResUNet_shortcut(_input_tensor_channels=self.first_out_channels*(2**1),\n _output_channels=self.first_out_channels*(2**2), _stride=2)\n # Bridge\n self.convblockB1 = ResUNet_block(_in_channels=self.first_out_channels*(2**2),\n _out_channels=self.first_out_channels*(2**3),\n _kernel_size=(3,3), _stride=(2,2), _padding=(1,1),\n _reluType=self.reluType, _normType=self.normType)\n self.convblockB2 = ResUNet_block(_in_channels=self.first_out_channels*(2**3),\n _out_channels=self.first_out_channels*(2**3),\n _kernel_size=(3,3), _stride=(1,1), _padding=(1,1),\n _reluType=self.reluType, _normType=self.normType)\n self.shortcutB = ResUNet_shortcut(_input_tensor_channels=self.first_out_channels*(2**2),\n _output_channels=self.first_out_channels*(2**3), _stride=2, _normType=self.normType)\n \n # Decoder\n self.upSample = nn.Upsample(scale_factor=2, mode='nearest', align_corners=None)\n self.convblock51 = ResUNet_block(_in_channels=self.first_out_channels*(2**3)+self.first_out_channels*(2**2),\n _out_channels=self.first_out_channels*(2**2),\n _kernel_size=(3,3), _stride=(1,1), _padding=(1,1),\n _reluType=self.reluType, _normType=self.normType)\n self.convblock52 = ResUNet_block(_in_channels=self.first_out_channels*(2**2),\n _out_channels=self.first_out_channels*(2**2),\n _kernel_size=(3,3), _stride=(1,1), _padding=(1,1),\n _reluType=self.reluType, _normType=self.normType)\n self.shortcut5 = ResUNet_shortcut(_input_tensor_channels=self.first_out_channels*(2**3)+self.first_out_channels*(2**2),\n _output_channels=self.first_out_channels*(2**2), _stride=1, _normType=self.normType)\n \n self.convblock61 = ResUNet_block(_in_channels=self.first_out_channels*(2**2)+self.first_out_channels*(2**1),\n _out_channels=self.first_out_channels*(2**1),\n _kernel_size=(3,3), _stride=(1,1), _padding=(1,1),\n _reluType=self.reluType, _normType=self.normType)\n self.convblock62 = ResUNet_block(_in_channels=self.first_out_channels*(2**1),\n _out_channels=self.first_out_channels*(2**1),\n _kernel_size=(3,3), _stride=(1,1), _padding=(1,1),\n _reluType=self.reluType, _normType=self.normType)\n self.shortcut6 = ResUNet_shortcut(_input_tensor_channels=self.first_out_channels*(2**2)+self.first_out_channels*(2**1),\n _output_channels=self.first_out_channels*(2**1), _stride=1, _normType=self.normType)\n self.convblock71 = ResUNet_block(_in_channels=self.first_out_channels*(2**1)+self.first_out_channels*(2**0),\n _out_channels=self.first_out_channels*(2**0),\n _kernel_size=(3,3), _stride=(1,1), _padding=(1,1),\n _reluType=self.reluType, _normType=self.normType)\n self.convblock72 = ResUNet_block(_in_channels=self.first_out_channels*(2**0),\n _out_channels=self.first_out_channels*(2**0),\n _kernel_size=(3,3), _stride=(1,1), _padding=(1,1),\n _reluType=self.reluType, _normType=self.normType)\n self.shortcut7 = ResUNet_shortcut(_input_tensor_channels=self.first_out_channels*(2**1)+self.first_out_channels*(2**0),\n _output_channels=self.first_out_channels*(2**0), _stride=1, _normType=self.normType)\n self.output_conv = nn.Conv2d(in_channels=self.first_out_channels*(2**0), out_channels=input_array_shape[1],\n kernel_size=(1,1), stride=(1,1), padding=(0,0), \n dilation=1, bias=False)\n \n if self.outputActivationType == \"Sigmoid\":\n self.output_activation = nn.Sigmoid()\n if self.outputActivationType == \"Tanh\":\n self.output_activation = nn.Tanh()\n \n def forward(self, x: Tensor) -> Tensor:\n # Encoder\n out = self.conv1(x)\n out = self.convblock12(out)\n out1 = out + x\n \n out = self.convblock21(out1)\n out = self.convblock22(out)\n shortcut = self.shortcut2(out1)\n out2 = out + shortcut\n \n out = self.convblock31(out2)\n out = self.convblock32(out)\n shortcut = self.shortcut3(out2)\n out3 = out + shortcut\n \n # Bridge\n out = self.convblockB1(out3)\n out = self.convblockB2(out)\n shortcut = self.shortcutB(out3)\n out = out + shortcut\n \n # Decoder\n out = self.upSample(out)\n out5 = torch.cat((out, out3), axis=1)\n out = self.convblock51(out5)\n out = self.dropout1(out)\n out = self.convblock52(out)\n out = self.dropout2(out)\n shortcut = self.shortcut5(out5)\n out = out + shortcut\n \n out = self.upSample(out)\n out6 = torch.cat((out, out2), axis=1)\n out = self.convblock61(out6)\n out = self.dropout3(out)\n out = self.convblock62(out)\n shortcut = self.shortcut6(out6)\n out = out + shortcut\n \n out = self.upSample(out)\n out7 = torch.cat((out, out1), axis=1)\n out = self.convblock71(out7)\n out = self.convblock72(out)\n shortcut = self.shortcut7(out7)\n out = out + shortcut\n \n out = self.output_conv(out)\n out = self.output_activation(out)\n return out\n\nclass Generator_ResUNet_PixelShuffle(nn.Module):\n \"\"\"\n A ResUNet that uses PixelShuffle to upsample. This was found to provide poor results in pre-training.\n \"\"\"\n def __init__(self, input_array_shape, _first_out_channels=64, _reluType=\"leaky\", _dropoutType=\"ADL\", _drop_rate=0.5, _output_activation=\"Sigmoid\"):\n super().__init__()\n self.first_out_channels = _first_out_channels\n self.input_array_shape = input_array_shape\n self.reluType = _reluType\n self.dropoutType = _dropoutType\n self.outputActivationType = _output_activation\n \n # Dropouts\n if self.dropoutType == \"ADL\":\n self.dropout1 = ADL(drop_rate=_drop_rate, gamma=0.9)\n self.dropout2 = ADL(drop_rate=_drop_rate, gamma=0.9)\n self.dropout3 = ADL(drop_rate=_drop_rate, gamma=0.9)\n if self.dropoutType == \"normal\":\n self.dropout1 = nn.Dropout(p=_drop_rate)\n self.dropout2 = nn.Dropout(p=_drop_rate)\n self.dropout3 = nn.Dropout(p=_drop_rate)\n \n # Encoder\n self.conv1 = nn.Conv2d(in_channels=input_array_shape[1], out_channels=self.first_out_channels,\n kernel_size=(3,3), stride=(1,1), padding=(1,1),\n dilation=1, bias=False)\n self.convblock12 = ResUNet_block(_in_channels=self.first_out_channels*(2**0),\n _out_channels=self.first_out_channels*(2**0),\n _kernel_size=(3,3), _stride=(1,1), _padding=(1,1),\n _reluType=self.reluType)\n \n self.convblock21 = ResUNet_block(_in_channels=self.first_out_channels*(2**0),\n _out_channels=self.first_out_channels*(2**2),\n _kernel_size=(3,3), _stride=(2,2), _padding=(1,1),\n _reluType=self.reluType)\n self.convblock22 = ResUNet_block(_in_channels=self.first_out_channels*(2**2),\n _out_channels=self.first_out_channels*(2**2),\n _kernel_size=(3,3), _stride=(1,1), _padding=(1,1),\n _reluType=self.reluType)\n self.shortcut2 = ResUNet_shortcut(_input_tensor_channels=self.first_out_channels*(2**0),\n _output_channels=self.first_out_channels*(2**2), _stride=2)\n \n self.convblock31 = ResUNet_block(_in_channels=self.first_out_channels*(2**2),\n _out_channels=self.first_out_channels*(2**4),\n _kernel_size=(3,3), _stride=(2,2), _padding=(1,1),\n _reluType=self.reluType)\n self.convblock32 = ResUNet_block(_in_channels=self.first_out_channels*(2**4),\n _out_channels=self.first_out_channels*(2**4),\n _kernel_size=(3,3), _stride=(1,1), _padding=(1,1),\n _reluType=self.reluType)\n self.shortcut3 = ResUNet_shortcut(_input_tensor_channels=self.first_out_channels*(2**2),\n _output_channels=self.first_out_channels*(2**4), _stride=2)\n # Bridge\n self.convblockB1 = ResUNet_block(_in_channels=self.first_out_channels*(2**4),\n _out_channels=self.first_out_channels*(2**6),\n _kernel_size=(3,3), _stride=(2,2), _padding=(1,1),\n _reluType=self.reluType)\n self.convblockB2 = ResUNet_block(_in_channels=self.first_out_channels*(2**6),\n _out_channels=self.first_out_channels*(2**6),\n _kernel_size=(3,3), _stride=(1,1), _padding=(1,1),\n _reluType=self.reluType)\n # Decoder\n self.upSample = nn.PixelShuffle(upscale_factor=2)\n \n self.convblock51 = ResUNet_block(_in_channels=self.first_out_channels*(2**4)+self.first_out_channels*(2**4),\n _out_channels=self.first_out_channels*(2**4),\n _kernel_size=(3,3), _stride=(1,1), _padding=(1,1),\n _reluType=self.reluType)\n self.convblock52 = ResUNet_block(_in_channels=self.first_out_channels*(2**4),\n _out_channels=self.first_out_channels*(2**4),\n _kernel_size=(3,3), _stride=(1,1), _padding=(1,1),\n _reluType=self.reluType)\n self.shortcut5 = ResUNet_shortcut(_input_tensor_channels=self.first_out_channels*(2**4)+self.first_out_channels*(2**4),\n _output_channels=self.first_out_channels*(2**4), _stride=1)\n \n self.convblock61 = ResUNet_block(_in_channels=self.first_out_channels*(2**2)+self.first_out_channels*(2**2),\n _out_channels=self.first_out_channels*(2**2),\n _kernel_size=(3,3), _stride=(1,1), _padding=(1,1),\n _reluType=self.reluType)\n self.convblock62 = ResUNet_block(_in_channels=self.first_out_channels*(2**2),\n _out_channels=self.first_out_channels*(2**2),\n _kernel_size=(3,3), _stride=(1,1), _padding=(1,1),\n _reluType=self.reluType)\n self.shortcut6 = ResUNet_shortcut(_input_tensor_channels=self.first_out_channels*(2**2)+self.first_out_channels*(2**2),\n _output_channels=self.first_out_channels*(2**2), _stride=1)\n \n self.convblock71 = ResUNet_block(_in_channels=self.first_out_channels*(2**0)+self.first_out_channels*(2**0),\n _out_channels=self.first_out_channels*(2**0),\n _kernel_size=(3,3), _stride=(1,1), _padding=(1,1),\n _reluType=self.reluType)\n self.convblock72 = ResUNet_block(_in_channels=self.first_out_channels*(2**0),\n _out_channels=self.first_out_channels*(2**0),\n _kernel_size=(3,3), _stride=(1,1), _padding=(1,1),\n _reluType=self.reluType)\n self.shortcut7 = ResUNet_shortcut(_input_tensor_channels=self.first_out_channels*(2**0)+self.first_out_channels*(2**0),\n _output_channels=self.first_out_channels*(2**0), _stride=1)\n self.output_conv = nn.Conv2d(in_channels=self.first_out_channels*(2**0), out_channels=input_array_shape[1],\n kernel_size=(1,1), stride=(1,1), padding=(0,0), \n dilation=1, bias=False)\n \n if self.outputActivationType == \"Sigmoid\":\n self.output_activation = nn.Sigmoid()\n if self.outputActivationType == \"Tanh\":\n self.output_activation = nn.Tanh()\n\n def forward(self, x: Tensor) -> Tensor:\n # Encoder\n out = self.conv1(x)\n out = self.convblock12(out)\n out1 = out + x\n \n out = self.convblock21(out1)\n out = self.convblock22(out)\n shortcut = self.shortcut2(out1)\n out2 = out + shortcut\n \n out = self.convblock31(out2)\n out = self.convblock32(out)\n shortcut = self.shortcut3(out2)\n out3 = out + shortcut\n \n # Bridge\n out = self.convblockB1(out3)\n out = self.convblockB2(out)\n \n \n # Decoder\n out = self.upSample(out)\n out5 = torch.cat((out, out3), axis=1)\n out = self.convblock51(out5)\n out = self.dropout1(out)\n out = self.convblock52(out)\n out = self.dropout2(out)\n shortcut = self.shortcut5(out5)\n out = out + shortcut\n \n out = self.upSample(out)\n out6 = torch.cat((out, out2), axis=1)\n out = self.convblock61(out6)\n out = self.dropout3(out)\n out = self.convblock62(out)\n shortcut = self.shortcut6(out6)\n out = out + shortcut\n \n out = self.upSample(out)\n out7 = torch.cat((out, out1), axis=1)\n out = self.convblock71(out7)\n out = self.convblock72(out)\n shortcut = self.shortcut7(out7)\n out = out + shortcut\n \n out = self.output_conv(out)\n out = self.output_activation(out)\n return out\n \n############################################################\n# MEDGAN\n# U-net blocks as generator\n# 1) Perceptual loss instead of pixel-distance loss\n# 2) use VGG19 network for feature extraction\n############################################################\n" ]
[ [ "torch.nn.Linear", "torch.cat", "torch.nn.BatchNorm2d", "torch.nn.LeakyReLU", "torch.exp", "torch.sigmoid", "torch.mul", "torch.nn.MaxPool2d", "torch.unsqueeze", "torch.abs", "torch.nn.init.normal_", "torch.Tensor", "torch.nn.Flatten", "torch.nn.Tanh", "torch.nn.ReLU", "torch.nn.PixelShuffle", "torch.nn.Conv2d", "torch.nn.InstanceNorm2d", "torch.rand", "torch.nn.Dropout", "torch.nn.Sigmoid", "torch.nn.Upsample", "torch.nn.AdaptiveAvgPool2d", "torch.mean" ] ]
Guan-t7/DeepAR
[ "d7d76a7e90df7a69ad23b736caac9ab79f926717" ]
[ "evaluate.py" ]
[ "import argparse\nimport logging\nimport os\n\nimport numpy as np\nimport torch\nfrom torch.utils.data.sampler import RandomSampler\nfrom tqdm import tqdm\n\nimport utils\nimport model.net as net\nfrom dataloader import *\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\nlogger = logging.getLogger('DeepAR.Eval')\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--dataset', default='elect', help='Name of the dataset')\nparser.add_argument('--data-folder', default='data', help='Parent dir of the dataset')\nparser.add_argument('--model-name', default='base_model', help='Directory containing params.json')\nparser.add_argument('--relative-metrics', action='store_true', help='Whether to normalize the metrics by label scales')\nparser.add_argument('--sampling', action='store_true', help='Whether to sample during evaluation')\nparser.add_argument('--restore-file', default='best',\n help='Optional, name of the file in --model_dir containing weights to reload before \\\n training') # 'best' or 'epoch_#'\n\n\ndef evaluate(model, loss_fn, test_loader, params, plot_num, sample=True):\n '''Evaluate the model on the test set.\n Args:\n model: (torch.nn.Module) the Deep AR model\n loss_fn: a function that takes outputs and labels per timestep, and then computes the loss for the batch\n test_loader: load test data and labels\n params: (Params) hyperparameters\n plot_num: (-1): evaluation from evaluate.py; else (epoch): evaluation on epoch\n sample: (boolean) do ancestral sampling or directly use output mu from last time step\n '''\n model.eval()\n with torch.no_grad():\n plot_batch = np.random.randint(len(test_loader)-1)\n\n summary_metric = {}\n raw_metrics = utils.init_metrics(sample=sample)\n\n # Test_loader: \n # test_batch ([batch_size, train_window, 1+cov_dim]): z_{0:T-1} + x_{1:T}, note that z_0 = 0;\n # id_batch ([batch_size]): one integer denoting the time series id;\n # v ([batch_size, 2]): scaling factor for each window;\n # labels ([batch_size, train_window]): z_{1:T}.\n for i, (test_batch, id_batch, v, labels) in enumerate(tqdm(test_loader)):\n test_batch = test_batch.permute(1, 0, 2).to(torch.float32).to(params.device)\n id_batch = id_batch.unsqueeze(0).to(params.device)\n v_batch = v.to(torch.float32).to(params.device)\n labels = labels.to(torch.float32).to(params.device)\n batch_size = test_batch.shape[1]\n input_mu = torch.zeros(batch_size, params.test_predict_start, device=params.device) # scaled\n input_sigma = torch.zeros(batch_size, params.test_predict_start, device=params.device) # scaled\n hidden = model.init_hidden(batch_size)\n cell = model.init_cell(batch_size)\n\n for t in range(params.test_predict_start):\n # if z_t is missing, replace it by output mu from the last time step\n zero_index = (test_batch[t,:,0] == 0)\n if t > 0 and torch.sum(zero_index) > 0:\n test_batch[t,zero_index,0] = mu[zero_index]\n\n mu, sigma, hidden, cell = model(test_batch[t].unsqueeze(0), id_batch, hidden, cell)\n input_mu[:,t] = v_batch[:, 0] * mu + v_batch[:, 1]\n input_sigma[:,t] = v_batch[:, 0] * sigma\n\n if sample:\n samples, sample_mu, sample_sigma = model.test(test_batch, v_batch, id_batch, hidden, cell, sampling=True)\n raw_metrics = utils.update_metrics(raw_metrics, input_mu, input_sigma, sample_mu, labels, params.test_predict_start, samples, relative = params.relative_metrics)\n else:\n sample_mu, sample_sigma = model.test(test_batch, v_batch, id_batch, hidden, cell)\n raw_metrics = utils.update_metrics(raw_metrics, input_mu, input_sigma, sample_mu, labels, params.test_predict_start, relative = params.relative_metrics)\n\n if i == plot_batch:\n if sample:\n sample_metrics = utils.get_metrics(sample_mu, labels, params.test_predict_start, samples, relative = params.relative_metrics)\n else:\n sample_metrics = utils.get_metrics(sample_mu, labels, params.test_predict_start, relative = params.relative_metrics) \n # select 10 from samples with highest error and 10 from the rest\n top_10_nd_sample = (-sample_metrics['ND']).argsort()[:batch_size // 10] # hard coded to be 10\n chosen = set(top_10_nd_sample.tolist())\n all_samples = set(range(batch_size))\n not_chosen = np.asarray(list(all_samples - chosen))\n if batch_size < 100: # make sure there are enough unique samples to choose top 10 from\n random_sample_10 = np.random.choice(top_10_nd_sample, size=10, replace=True)\n else:\n random_sample_10 = np.random.choice(top_10_nd_sample, size=10, replace=False)\n if batch_size < 12: # make sure there are enough unique samples to choose bottom 90 from\n random_sample_90 = np.random.choice(not_chosen, size=10, replace=True)\n else:\n random_sample_90 = np.random.choice(not_chosen, size=10, replace=False)\n combined_sample = np.concatenate((random_sample_10, random_sample_90))\n\n label_plot = labels[combined_sample].data.cpu().numpy()\n predict_mu = sample_mu[combined_sample].data.cpu().numpy()\n predict_sigma = sample_sigma[combined_sample].data.cpu().numpy()\n plot_mu = np.concatenate((input_mu[combined_sample].data.cpu().numpy(), predict_mu), axis=1)\n plot_sigma = np.concatenate((input_sigma[combined_sample].data.cpu().numpy(), predict_sigma), axis=1)\n plot_metrics = {_k: _v[combined_sample] for _k, _v in sample_metrics.items()}\n plot_eight_windows(params.plot_dir, plot_mu, plot_sigma, label_plot, params.test_window, params.test_predict_start, plot_num, plot_metrics, sample)\n\n summary_metric = utils.final_metrics(raw_metrics, sampling=sample)\n metrics_string = '; '.join('{}: {:05.3f}'.format(k, v) for k, v in summary_metric.items())\n logger.info('- Full test metrics: ' + metrics_string)\n return summary_metric\n\n\ndef plot_eight_windows(plot_dir,\n predict_values,\n predict_sigma,\n labels,\n window_size,\n predict_start,\n plot_num,\n plot_metrics,\n sampling=False):\n\n x = np.arange(window_size)\n f = plt.figure(figsize=(8, 42), constrained_layout=True)\n nrows = 21\n ncols = 1\n ax = f.subplots(nrows, ncols)\n\n for k in range(nrows):\n if k == 10:\n ax[k].plot(x, x, color='g')\n ax[k].plot(x, x[::-1], color='g')\n ax[k].set_title('This separates top 10 and bottom 90', fontsize=10)\n continue\n m = k if k < 10 else k - 1\n ax[k].plot(x, predict_values[m], color='b')\n ax[k].fill_between(x[predict_start:], predict_values[m, predict_start:] - 2 * predict_sigma[m, predict_start:],\n predict_values[m, predict_start:] + 2 * predict_sigma[m, predict_start:], color='blue',\n alpha=0.2)\n ax[k].plot(x, labels[m, :], color='r')\n ax[k].axvline(predict_start, color='g', linestyle='dashed')\n\n #metrics = utils.final_metrics_({_k: [_i[k] for _i in _v] for _k, _v in plot_metrics.items()})\n\n\n plot_metrics_str = f'ND: {plot_metrics[\"ND\"][m]: .3f} ' \\\n f'RMSE: {plot_metrics[\"RMSE\"][m]: .3f}'\n if sampling:\n plot_metrics_str += f' rou90: {plot_metrics[\"rou90\"][m]: .3f} ' \\\n f'rou50: {plot_metrics[\"rou50\"][m]: .3f}'\n\n ax[k].set_title(plot_metrics_str, fontsize=10)\n\n f.savefig(os.path.join(plot_dir, str(plot_num) + '.png'))\n plt.close()\n\nif __name__ == '__main__':\n # Load the parameters\n args = parser.parse_args()\n model_dir = os.path.join('experiments', args.model_name) \n json_path = os.path.join(model_dir, 'params.json')\n data_dir = os.path.join(args.data_folder, args.dataset)\n assert os.path.isfile(json_path), 'No json configuration file found at {}'.format(json_path)\n params = utils.Params(json_path)\n\n utils.set_logger(os.path.join(model_dir, 'eval.log'))\n\n params.relative_metrics = args.relative_metrics\n params.sampling = args.sampling\n params.model_dir = model_dir\n params.plot_dir = os.path.join(model_dir, 'figures')\n \n cuda_exist = torch.cuda.is_available() # use GPU is available\n\n # Set random seeds for reproducible experiments if necessary\n if cuda_exist:\n params.device = torch.device('cuda')\n # torch.cuda.manual_seed(240)\n logger.info('Using Cuda...')\n model = net.Net(params).cuda()\n else:\n params.device = torch.device('cpu')\n # torch.manual_seed(230)\n logger.info('Not using cuda...')\n model = net.Net(params)\n\n # Create the input data pipeline\n logger.info('Loading the datasets...')\n\n test_set = TestDataset(data_dir, args.dataset, params.num_class)\n test_loader = DataLoader(test_set, batch_size=params.predict_batch, sampler=RandomSampler(test_set), num_workers=0)\n logger.info('- done.')\n\n print('model: ', model)\n loss_fn = net.loss_fn\n\n logger.info('Starting evaluation')\n\n # Reload weights from the saved file\n utils.load_checkpoint(os.path.join(model_dir, args.restore_file + '.pth.tar'), model)\n\n test_metrics = evaluate(model, loss_fn, test_loader, params, -1, params.sampling)\n save_path = os.path.join(model_dir, 'metrics_test_{}.json'.format(args.restore_file))\n utils.save_dict_to_json(test_metrics, save_path)\n" ]
[ [ "matplotlib.use", "torch.device", "torch.zeros", "numpy.concatenate", "torch.utils.data.sampler.RandomSampler", "numpy.random.choice", "torch.no_grad", "matplotlib.pyplot.close", "matplotlib.pyplot.figure", "torch.cuda.is_available", "numpy.arange", "torch.sum" ] ]
dovallev/pooling-network
[ "fecbd6c8d10cb458d44c40b741c2f5d3f73cfc8e" ]
[ "tests/test_haverly.py" ]
[ "import numpy as np\nimport pyomo.environ as pe\nfrom galini.relaxations.relax import relax, RelaxationData\n\nfrom pooling_network.formulation.pq_block import PoolingPQFormulation\nfrom pooling_network.instances.data import pooling_problem_from_data\nfrom pooling_network.instances.literature import literature_problem_data\nfrom pooling_network.pooling import (\n problem_pool_output_qualities, compute_gamma_ijk, compute_beta_kl_bounds, compute_gamma_kl_bounds, index_set_ilj\n)\n\n\ndef test_haverly1():\n # Solvers setup\n mip_solver = pe.SolverFactory('cplex_direct')\n global_solver = pe.SolverFactory('gurobi')\n global_solver.options['NonConvex'] = 2\n\n # Build PQ-formulation\n problem = pooling_problem_from_data(literature_problem_data('haverly1'))\n model = pe.ConcreteModel()\n\n model.pooling = PoolingPQFormulation()\n model.pooling.set_pooling_problem(problem)\n model.pooling.rebuild()\n\n objective = model.pooling.add_objective(use_flow_cost=False)\n\n # Solve globally with gams to check the model is correct\n global_solver.solve(model)\n assert pe.value(objective) == -400.0\n\n # Build linear relaxation and solve it\n relaxation_data = RelaxationData(model)\n relaxed_model = relax(model, relaxation_data)\n relaxed_objective = relaxed_model.find_component(objective.getname(fully_qualified=True))\n mip_solver.solve(relaxed_model)\n\n # Compute error between quadratic formulation and its relaxation\n expected_err = {\n ('c1', 'o1', 'p1'): 25.0,\n ('c1', 'o1', 'p2'): 50.0,\n ('c2', 'o1', 'p1'): 25.0,\n ('c2', 'o1', 'p2'): 50.0,\n }\n for i, l, j in index_set_ilj(problem):\n err_expr = abs(\n relaxed_model.pooling.v[i, l, j] - relaxed_model.pooling.q[i, l] * relaxed_model.pooling.y[l, j]\n )\n err = pe.value(err_expr)\n np.testing.assert_almost_equal(err, expected_err[i, l, j])\n\n # Check qualities\n expected_gamma = {\n ('c1', 'p1', 'q1'): 0.5,\n ('c2', 'p1', 'q1'): -1.5,\n ('c3', 'p1', 'q1'): -0.5,\n ('c1', 'p2', 'q1'): 1.5,\n ('c2', 'p2', 'q1'): -0.5,\n ('c3', 'p2', 'q1'): 0.5,\n }\n for input in problem.nodes_at_layer(0):\n for output in problem.successors(input.name, layer=2):\n for k, q in output.attr['quality_upper'].items():\n gamma = compute_gamma_ijk(input, output, k)\n np.testing.assert_almost_equal(gamma, expected_gamma[input.name, output.name, k])\n\n expected_gamma_lower = {\n ('o1', 'p1', 'q1'): -1.5,\n ('o1', 'p2', 'q1'): -0.5,\n }\n expected_gamma_upper = {\n ('o1', 'p1', 'q1'): 0.5,\n ('o1', 'p2', 'q1'): 1.5,\n }\n expected_beta_lower = {\n ('o1', 'p1', 'q1'): -0.5,\n ('o1', 'p2', 'q1'): 0.5,\n }\n expected_beta_upper = {\n ('o1', 'p1', 'q1'): -0.5,\n ('o1', 'p2', 'q1'): 0.5,\n }\n for l, j, k in problem_pool_output_qualities(problem):\n gamma_lower, gamma_upper = compute_gamma_kl_bounds(l, j, k, problem)\n beta_lower, beta_upper = compute_beta_kl_bounds(l, j, k, problem)\n\n np.testing.assert_almost_equal(gamma_lower, expected_gamma_lower[l, j, k])\n np.testing.assert_almost_equal(gamma_upper, expected_gamma_upper[l, j, k])\n np.testing.assert_almost_equal(beta_lower, expected_beta_lower[l, j, k])\n np.testing.assert_almost_equal(beta_upper, expected_beta_upper[l, j, k])\n\n # Now add variables inequalities only\n relaxed_model.pooling.add_inequalities(add_inequalities=False, add_uxt=True)\n mip_solver.solve(relaxed_model)\n np.testing.assert_almost_equal(-500.0, pe.value(relaxed_objective))\n\n ineq_block = relaxed_model.pooling.inequalities\n # Check the value of variables after solve\n expected_z = {\n ('o1', 'p1', 'q1'): 0.5,\n ('o1', 'p2', 'q1'): 0.5,\n }\n expected_t = {\n ('o1', 'p1', 'q1'): -0.5,\n ('o1', 'p2', 'q1'): 0.5,\n }\n expected_u = {\n ('o1', 'p1', 'q1'): 0.25,\n ('o1', 'p2', 'q1'): -0.25,\n }\n expected_y = {\n ('o1', 'p1', 'q1'): -0.25,\n ('o1', 'p2', 'q1'): 0.25,\n }\n expected_s = {\n ('o1', 'p1'): 0.5,\n ('o1', 'p2'): 0.5,\n }\n for l, j, k in problem_pool_output_qualities(problem):\n np.testing.assert_almost_equal(ineq_block.z[l, j].value, expected_z[l, j, k])\n np.testing.assert_almost_equal(ineq_block.t[j, k, l].value, expected_t[l, j, k])\n np.testing.assert_almost_equal(ineq_block.u[j, k, l].value, expected_u[l, j, k])\n np.testing.assert_almost_equal(ineq_block.y[j, k, l].value, expected_y[l, j, k])\n np.testing.assert_almost_equal(ineq_block.s[l, j].value, expected_s[l, j])\n\n # Now add everything.\n relaxed_model.pooling.add_inequalities(add_inequalities=True, add_uxt=True)\n assert 2 == len(relaxed_model.pooling.inequalities._inequalities)\n mip_solver.solve(relaxed_model)\n np.testing.assert_almost_equal(pe.value(relaxed_objective), -500.0)\n\n # Now add the cuts, in the first iteration it will add Equation 15 and 18.\n relaxed_model.pooling.add_cuts()\n assert 2 == len(relaxed_model.pooling.inequalities._cuts)\n mip_solver.solve(relaxed_model)\n relaxed_model.pooling.add_cuts()\n # No cuts added\n assert 2 == len(relaxed_model.pooling.inequalities._cuts)\n np.testing.assert_almost_equal(pe.value(relaxed_objective), -400.0)" ]
[ [ "numpy.testing.assert_almost_equal" ] ]
cyoon1729/distributedRL
[ "1338bc2655e9af31fd21d40996153515aa8d75f9" ]
[ "common/abstract/worker.py" ]
[ "import asyncio\nimport random\nimport time\nfrom abc import ABC, abstractmethod\nfrom collections import deque\nfrom copy import deepcopy\nfrom datetime import datetime\nfrom typing import Deque\n\nimport numpy as np\nimport pyarrow as pa\nimport torch\nimport torch.nn as nn\nimport zmq\n\nfrom common.utils.utils import create_env\n\n\nclass Worker(ABC):\n def __init__(\n self, worker_id: int, worker_brain: nn.Module, worker_cfg: dict, comm_cfg: dict\n ):\n self.worker_id = worker_id\n self.cfg = worker_cfg\n self.device = worker_cfg[\"worker_device\"]\n self.brain = deepcopy(worker_brain)\n self.brain.to(self.device)\n\n # create env\n random.seed(self.worker_id)\n self.env = create_env(\n self.cfg[\"env_name\"], self.cfg[\"atari\"], self.cfg[\"max_episode_steps\"]\n )\n self.seed = random.randint(1, 999)\n self.env.seed(self.seed)\n\n # unpack communication configs\n self.pubsub_port = comm_cfg[\"pubsub_port\"]\n self.pullpush_port = comm_cfg[\"pullpush_port\"]\n\n # initialize zmq sockets\n print(f\"[Worker {self.worker_id}]: initializing sockets..\")\n self.initialize_sockets()\n\n @abstractmethod\n def write_log(self):\n \"\"\"Log performance (e.g. using Tensorboard)\"\"\"\n pass\n\n @abstractmethod\n def select_action(self, state: np.ndarray) -> np.ndarray:\n \"\"\"Select action with worker's brain\"\"\"\n pass\n\n @abstractmethod\n def preprocess_data(self, data) -> list:\n \"\"\"Preprocess collected data if necessary (e.g. n-step)\"\"\"\n pass\n\n @abstractmethod\n def collect_data(self) -> list:\n \"\"\"Run environment and collect data until stopping criterion satisfied\"\"\"\n pass\n\n @abstractmethod\n def test_run(self):\n \"\"\"Specifically for the performance-testing worker\"\"\"\n pass\n\n def synchronize(self, new_params: list):\n \"\"\"Synchronize worker brain with parameter server\"\"\"\n for param, new_param in zip(self.brain.parameters(), new_params):\n new_param = torch.FloatTensor(new_param).to(self.device)\n param.data.copy_(new_param)\n\n def initialize_sockets(self):\n # for receiving params from learner\n context = zmq.Context()\n self.sub_socket = context.socket(zmq.SUB)\n self.sub_socket.setsockopt_string(zmq.SUBSCRIBE, \"\")\n self.sub_socket.setsockopt(zmq.CONFLATE, 1)\n self.sub_socket.connect(f\"tcp://127.0.0.1:{self.pubsub_port}\")\n\n # for sending replay data to buffer\n time.sleep(1)\n context = zmq.Context()\n self.push_socket = context.socket(zmq.PUSH)\n self.push_socket.connect(f\"tcp://127.0.0.1:{self.pullpush_port}\")\n\n def send_replay_data(self, replay_data):\n replay_data_id = pa.serialize(replay_data).to_buffer()\n self.push_socket.send(replay_data_id)\n\n def receive_new_params(self):\n new_params_id = False\n try:\n new_params_id = self.sub_socket.recv(zmq.DONTWAIT)\n except zmq.Again:\n return False\n\n if new_params_id:\n new_params = pa.deserialize(new_params_id)\n self.synchronize(new_params)\n return True\n\n def run(self):\n while True:\n local_buffer = self.collect_data()\n self.send_replay_data(local_buffer)\n self.receive_new_params()\n\n\nclass ApeXWorker(Worker):\n \"\"\"Abstract class for ApeX distrbuted workers \"\"\"\n\n def __init__(\n self, worker_id: int, worker_brain: nn.Module, cfg: dict, comm_cfg: dict\n ):\n super().__init__(worker_id, worker_brain, cfg, comm_cfg)\n self.nstep_queue = deque(maxlen=self.cfg[\"num_step\"])\n self.worker_buffer_size = self.cfg[\"worker_buffer_size\"]\n self.gamma = self.cfg[\"gamma\"]\n self.num_step = self.cfg[\"num_step\"]\n\n def preprocess_data(self, nstepqueue: Deque) -> tuple:\n discounted_reward = 0\n _, _, _, last_state, done = nstepqueue[-1]\n for transition in list(reversed(nstepqueue)):\n state, action, reward, _, _ = transition\n discounted_reward = reward + self.gamma * discounted_reward\n nstep_data = (state, action, discounted_reward, last_state, done)\n\n q_value = self.brain.forward(\n torch.FloatTensor(state).unsqueeze(0).to(self.device)\n )[0][action]\n\n bootstrap_q = torch.max(\n self.brain.forward(\n torch.FloatTensor(last_state).unsqueeze(0).to(self.device)\n ),\n 1,\n )\n\n target_q_value = (\n discounted_reward + self.gamma ** self.num_step * bootstrap_q[0]\n )\n\n priority_value = torch.abs(target_q_value - q_value).detach().view(-1)\n priority_value = torch.clamp(priority_value, min=1e-8)\n priority_value = priority_value.cpu().numpy().tolist()\n\n return nstep_data, priority_value\n\n def collect_data(self, verbose=True):\n \"\"\"Fill worker buffer until some stopping criterion is satisfied\"\"\"\n local_buffer = []\n nstep_queue = deque(maxlen=self.num_step)\n\n while len(local_buffer) < self.worker_buffer_size:\n episode_reward = 0\n done = False\n state = self.env.reset()\n while not done:\n self.env.render()\n action = self.select_action(state)\n transition = self.environment_step(state, action)\n next_state = transition[-2]\n done = transition[-1]\n reward = transition[-3]\n episode_reward = episode_reward + reward\n\n nstep_queue.append(transition)\n if (len(nstep_queue) == self.num_step) or done:\n nstep_data, priorities = self.preprocess_data(nstep_queue)\n local_buffer.append([nstep_data, priorities])\n\n state = next_state\n\n if verbose:\n print(f\"Worker {self.worker_id}: {episode_reward}\")\n\n return local_buffer\n" ]
[ [ "torch.abs", "torch.FloatTensor", "torch.clamp" ] ]
Lornatang/PyTorch-Tutorials
[ "eaca673c46b012fb0f4741d96a5f797715a0c7d5" ]
[ "v2/beginner_source/examples_nn/two_layer_net_optim.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nPyTorch: optim\n--------------\n\nA fully-connected ReLU network with one hidden layer, trained to predict y from x\nby minimizing squared Euclidean distance.\n\nThis implementation uses the nn package from PyTorch to build the network.\n\nRather than manually updating the weights of the model as we have been doing,\nwe use the optim package to define an Optimizer that will update the weights\nfor us. The optim package defines many optimization algorithms that are commonly\nused for deep learning, including SGD+momentum, RMSProp, Adam, etc.\n\"\"\"\nimport torch\n\n# N is batch size; D_in is input dimension;\n# H is hidden dimension; D_out is output dimension.\nN, D_in, H, D_out = 64, 1000, 100, 10\n\n# Create random Tensors to hold inputs and outputs\nx = torch.randn(N, D_in)\ny = torch.randn(N, D_out)\n\n# Use the nn package to define our model and loss function.\nmodel = torch.nn.Sequential(\n torch.nn.Linear(D_in, H),\n torch.nn.ReLU(),\n torch.nn.Linear(H, D_out),\n)\nloss_fn = torch.nn.MSELoss(reduction='sum')\n\n# Use the optim package to define an Optimizer that will update the weights of\n# the model for us. Here we will use Adam; the optim package contains many other\n# optimization algoriths. The first argument to the Adam constructor tells the\n# optimizer which Tensors it should update.\nlearning_rate = 1e-4\noptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\nfor t in range(500):\n # Forward pass: compute predicted y by passing x to the model.\n y_pred = model(x)\n \n # Compute and print loss.\n loss = loss_fn(y_pred, y)\n print(t, loss.item())\n \n # Before the backward pass, use the optimizer object to zero all of the\n # gradients for the variables it will update (which are the learnable\n # weights of the model). This is because by default, gradients are\n # accumulated in buffers( i.e, not overwritten) whenever .backward()\n # is called. Checkout docs of torch.autograd.backward for more details.\n optimizer.zero_grad()\n \n # Backward pass: compute gradient of the loss with respect to model\n # parameters\n loss.backward()\n \n # Calling the step function on an Optimizer makes an update to its\n # parameters\n optimizer.step()\n" ]
[ [ "torch.nn.MSELoss", "torch.nn.Linear", "torch.randn", "torch.nn.ReLU" ] ]
mamdamin/MVCNN-TensorFlow
[ "6d8dd3f8d68feccd90ce1b82de31c4b8524b59ab" ]
[ "input.py" ]
[ "import cv2\n\nfrom PIL import Image\nimport random\nimport numpy as np\nimport time\nimport queue as Queue\nimport threading\nimport globals as g_\nfrom concurrent.futures import ThreadPoolExecutor\nfrom augment import augmentImages\nimport tensorflow as tf\n\nW = H = 256\n\nclass Shape:\n def __init__(self, list_file):\n with open(list_file) as f:\n self.label = int(f.readline())\n self.V = int(f.readline())\n view_files = [l.strip() for l in f.readlines()]\n\n self.views = self._load_views(view_files, self.V)\n self.done_mean = False\n\n\n def _load_views(self, view_files, V):\n views = []\n for f in view_files:\n im = cv2.imread(f) #im = Image.open(f)#cv2.imread(f)\n #im = cv2.resize(im, (W, H))\n #im = im*500.0\n #im = np.random.rand(W,H,3)*1000-500\n #print('Shape: {}, Min: {}, Max: {}, Mean: {}'.format(im.shape,np.amin(im),np.amax(im),np.mean(im)))\n #halt\n #im = cv2.cvtColor(im, cv2.COLOR_GRAY2BGR) #BGR!!\n\n assert im.shape == (W,H,3), 'BGR!'\n im = im.astype('float32')\n views.append(im)\n views = np.asarray(views)\n\n '''\n print('Input: ', views.dtype)\n with tf.Session() as sess:\n with tf.device('/cpu:0'):\n views = augmentImages(views,\n horizontal_flip=True, vertical_flip=True, translate = 64, rotate=20, crop_probability=0, mixup=0)\n views = sess.run(views)\n\n print('Output: ', views.dtype)\n '''\n return views\n\n def subtract_mean(self):\n if not self.done_mean:\n mean_bgr = (104., 116., 122.)\n for i in range(3):\n self.views[:,:,:,i] -= mean_bgr[i]\n\n self.done_mean = True\n\n def crop_center(self, size=(227,227)):\n w, h = self.views.shape[1], self.views.shape[2]\n wn, hn = size\n\n left = int(w / 2 - wn / 2)\n top = int(h / 2 - hn / 2)\n right = left + wn\n bottom = top + hn\n #print(left,right,top,bottom)\n self.views = self.views[:, left:right, top:bottom, :]\n\n\nclass Dataset:\n def __init__(self, listfiles, labels, subtract_mean, V):\n self.listfiles = listfiles\n self.labels = labels\n self.shuffled = False\n self.subtract_mean = subtract_mean\n self.V = V\n\n print('dataset inited')\n print(' total size:', len(listfiles))\n\n def shuffle(self):\n z = list(zip(self.listfiles, self.labels))\n random.shuffle(z)\n self.listfiles, self.labels = [list(l) for l in zip(*z)]\n self.shuffled = True\n\n\n def batches(self, batch_size):\n for x,y in self._batches_fast(self.listfiles, batch_size):\n yield x,y\n\n def sample_batches(self, batch_size, n):\n listfiles = random.sample(self.listfiles, n)\n for x,y in self._batches_fast(listfiles, batch_size):\n yield x,y\n\n def _batches(self, listfiles, batch_size):\n n = len(listfiles)\n for i in range(0, n, batch_size):\n starttime = time.time()\n\n lists = listfiles[i : i+batch_size]\n x = np.zeros((batch_size, self.V, 227, 227, 3))\n y = np.zeros(batch_size)\n\n for j,l in enumerate(lists):\n s = Shape(l)\n s.crop_center()\n if self.subtract_mean:\n s.subtract_mean()\n x[j, ...] = s.views\n y[j] = s.label\n\n print('load batch time:', time.time()-starttime, 'sec')\n yield x, y\n\n def _load_shape(self, listfile):\n s = Shape(listfile)\n s.crop_center()\n if self.subtract_mean:\n s.subtract_mean()\n return s\n\n def _batches_fast(self, listfiles, batch_size):\n subtract_mean = self.subtract_mean\n n = len(listfiles)\n\n def load(listfiles, q, batch_size):\n n = len(listfiles)\n with ThreadPoolExecutor(max_workers=32) as pool:\n for i in range(0, n, batch_size):\n sub = listfiles[i: i + batch_size] if i < n-1 else [listfiles[-1]]\n shapes = list(pool.map(self._load_shape, sub))\n views = np.array([s.views for s in shapes])\n labels = np.array([s.label for s in shapes])\n q.put((views, labels))\n\n # indicate that I'm done\n q.put(None)\n\n # This must be larger than twice the batch_size\n q = Queue.Queue(maxsize=g_.INPUT_QUEUE_SIZE)\n\n # background loading Shapes process\n p = threading.Thread(target=load, args=(listfiles, q, batch_size))\n # daemon child is killed when parent exits\n p.daemon = True\n p.start()\n\n\n x = np.zeros((batch_size, self.V, 227, 227, 3))\n y = np.zeros(batch_size)\n\n for i in range(0, n, batch_size):\n starttime = time.time()\n\n item = q.get()\n if item is None:\n break\n x, y = item\n\n # print('load batch time:', time.time()-starttime, 'sec')\n yield x, y\n\n def size(self):\n \"\"\" size of listfiles (if splitted, only count 'train', not 'val')\"\"\"\n return len(self.listfiles)\n" ]
[ [ "numpy.array", "numpy.asarray", "numpy.zeros" ] ]
QAMP-Spring-2022-Transpiler-Hackathon/qiskit-terra
[ "aee0dc4d538991560f212411db92cde5f511f65b" ]
[ "test/python/quantum_info/operators/symplectic/test_clifford.py" ]
[ "# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2020.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n# pylint: disable=invalid-name\n\"\"\"Tests for Clifford class.\"\"\"\n\nimport unittest\nfrom test import combine\nfrom ddt import ddt\n\nimport numpy as np\n\nfrom qiskit.test import QiskitTestCase\nfrom qiskit.exceptions import QiskitError\nfrom qiskit.circuit import Gate, QuantumRegister, QuantumCircuit\nfrom qiskit.circuit.library import (\n IGate,\n XGate,\n YGate,\n ZGate,\n HGate,\n SGate,\n SdgGate,\n CXGate,\n CZGate,\n SwapGate,\n)\nfrom qiskit.quantum_info.operators import Clifford, Operator\nfrom qiskit.quantum_info.operators.symplectic.clifford_circuits import _append_circuit\nfrom qiskit.quantum_info.synthesis.clifford_decompose import (\n decompose_clifford_ag,\n decompose_clifford_bm,\n decompose_clifford_greedy,\n)\nfrom qiskit.quantum_info import random_clifford\n\n\nclass VGate(Gate):\n \"\"\"V Gate used in Clifford synthesis.\"\"\"\n\n def __init__(self):\n \"\"\"Create new V Gate.\"\"\"\n super().__init__(\"v\", 1, [])\n\n def _define(self):\n \"\"\"V Gate definition.\"\"\"\n q = QuantumRegister(1, \"q\")\n qc = QuantumCircuit(q)\n qc.data = [(SdgGate(), [q[0]], []), (HGate(), [q[0]], [])]\n self.definition = qc\n\n\nclass WGate(Gate):\n \"\"\"W Gate used in Clifford synthesis.\"\"\"\n\n def __init__(self):\n \"\"\"Create new W Gate.\"\"\"\n super().__init__(\"w\", 1, [])\n\n def _define(self):\n \"\"\"W Gate definition.\"\"\"\n q = QuantumRegister(1, \"q\")\n qc = QuantumCircuit(q)\n qc.data = [(VGate(), [q[0]], []), (VGate(), [q[0]], [])]\n self.definition = qc\n\n\ndef random_clifford_circuit(num_qubits, num_gates, gates=\"all\", seed=None):\n \"\"\"Generate a pseudo random Clifford circuit.\"\"\"\n\n if gates == \"all\":\n if num_qubits == 1:\n gates = [\"i\", \"x\", \"y\", \"z\", \"h\", \"s\", \"sdg\", \"v\", \"w\"]\n else:\n gates = [\"i\", \"x\", \"y\", \"z\", \"h\", \"s\", \"sdg\", \"v\", \"w\", \"cx\", \"cz\", \"swap\"]\n\n instructions = {\n \"i\": (IGate(), 1),\n \"x\": (XGate(), 1),\n \"y\": (YGate(), 1),\n \"z\": (ZGate(), 1),\n \"h\": (HGate(), 1),\n \"s\": (SGate(), 1),\n \"sdg\": (SdgGate(), 1),\n \"v\": (VGate(), 1),\n \"w\": (WGate(), 1),\n \"cx\": (CXGate(), 2),\n \"cz\": (CZGate(), 2),\n \"swap\": (SwapGate(), 2),\n }\n\n if isinstance(seed, np.random.Generator):\n rng = seed\n else:\n rng = np.random.default_rng(seed)\n\n samples = rng.choice(gates, num_gates)\n\n circ = QuantumCircuit(num_qubits)\n\n for name in samples:\n gate, nqargs = instructions[name]\n qargs = rng.choice(range(num_qubits), nqargs, replace=False).tolist()\n circ.append(gate, qargs)\n\n return circ\n\n\n@ddt\nclass TestCliffordGates(QiskitTestCase):\n \"\"\"Tests for clifford append gate functions.\"\"\"\n\n def test_append_1_qubit_gate(self):\n \"\"\"Tests for append of 1-qubit gates\"\"\"\n\n target_table = {\n \"i\": np.array([[[True, False], [False, True]]], dtype=bool),\n \"id\": np.array([[[True, False], [False, True]]], dtype=bool),\n \"iden\": np.array([[[True, False], [False, True]]], dtype=bool),\n \"x\": np.array([[[True, False], [False, True]]], dtype=bool),\n \"y\": np.array([[[True, False], [False, True]]], dtype=bool),\n \"z\": np.array([[[True, False], [False, True]]], dtype=bool),\n \"h\": np.array([[[False, True], [True, False]]], dtype=bool),\n \"s\": np.array([[[True, True], [False, True]]], dtype=bool),\n \"sdg\": np.array([[[True, True], [False, True]]], dtype=bool),\n \"sinv\": np.array([[[True, True], [False, True]]], dtype=bool),\n \"v\": np.array([[[True, True], [True, False]]], dtype=bool),\n \"w\": np.array([[[False, True], [True, True]]], dtype=bool),\n }\n\n target_phase = {\n \"i\": np.array([[False, False]], dtype=bool),\n \"id\": np.array([[False, False]], dtype=bool),\n \"iden\": np.array([[False, False]], dtype=bool),\n \"x\": np.array([[False, True]], dtype=bool),\n \"y\": np.array([[True, True]], dtype=bool),\n \"z\": np.array([[True, False]], dtype=bool),\n \"h\": np.array([[False, False]], dtype=bool),\n \"s\": np.array([[False, False]], dtype=bool),\n \"sdg\": np.array([[True, False]], dtype=bool),\n \"sinv\": np.array([[True, False]], dtype=bool),\n \"v\": np.array([[False, False]], dtype=bool),\n \"w\": np.array([[False, False]], dtype=bool),\n }\n\n target_stabilizer = {\n \"i\": \"+Z\",\n \"id\": \"+Z\",\n \"iden\": \"+Z\",\n \"x\": \"-Z\",\n \"y\": \"-Z\",\n \"z\": \"+Z\",\n \"h\": \"+X\",\n \"s\": \"+Z\",\n \"sdg\": \"+Z\",\n \"sinv\": \"+Z\",\n \"v\": \"+X\",\n \"w\": \"+Y\",\n }\n\n target_destabilizer = {\n \"i\": \"+X\",\n \"id\": \"+X\",\n \"iden\": \"+X\",\n \"x\": \"+X\",\n \"y\": \"-X\",\n \"z\": \"-X\",\n \"h\": \"+Z\",\n \"s\": \"+Y\",\n \"sdg\": \"-Y\",\n \"sinv\": \"-Y\",\n \"v\": \"+Y\",\n \"w\": \"+Z\",\n }\n\n for gate_name in (\"i\", \"id\", \"iden\", \"x\", \"y\", \"z\", \"h\", \"s\", \"sdg\", \"v\", \"w\"):\n with self.subTest(msg=\"append gate %s\" % gate_name):\n cliff = Clifford([[1, 0], [0, 1]])\n cliff = _append_circuit(cliff, gate_name, [0])\n value_table = cliff.table._array\n value_phase = cliff.table._phase\n value_stabilizer = cliff.stabilizer.to_labels()\n value_destabilizer = cliff.destabilizer.to_labels()\n self.assertTrue(np.all(np.array(value_table == target_table[gate_name])))\n self.assertTrue(np.all(np.array(value_phase == target_phase[gate_name])))\n self.assertTrue(\n np.all(np.array(value_stabilizer == [target_stabilizer[gate_name]]))\n )\n self.assertTrue(\n np.all(np.array(value_destabilizer == [target_destabilizer[gate_name]]))\n )\n\n def test_1_qubit_identity_relations(self):\n \"\"\"Tests identity relations for 1-qubit gates\"\"\"\n\n for gate_name in (\"x\", \"y\", \"z\", \"h\"):\n with self.subTest(msg=\"identity for gate %s\" % gate_name):\n cliff = Clifford([[1, 0], [0, 1]])\n cliff1 = cliff.copy()\n cliff = _append_circuit(cliff, gate_name, [0])\n cliff = _append_circuit(cliff, gate_name, [0])\n self.assertEqual(cliff, cliff1)\n\n gates = [\"s\", \"s\", \"v\"]\n inv_gates = [\"sdg\", \"sinv\", \"w\"]\n\n for gate_name, inv_gate in zip(gates, inv_gates):\n with self.subTest(msg=\"identity for gate %s\" % gate_name):\n cliff = Clifford([[1, 0], [0, 1]])\n cliff1 = cliff.copy()\n cliff = _append_circuit(cliff, gate_name, [0])\n cliff = _append_circuit(cliff, inv_gate, [0])\n self.assertEqual(cliff, cliff1)\n\n def test_1_qubit_mult_relations(self):\n \"\"\"Tests multiplicity relations for 1-qubit gates\"\"\"\n\n rels = [\n \"x * y = z\",\n \"x * z = y\",\n \"y * z = x\",\n \"s * s = z\",\n \"sdg * sdg = z\",\n \"sinv * sinv = z\",\n \"sdg * h = v\",\n \"h * s = w\",\n ]\n\n for rel in rels:\n with self.subTest(msg=\"relation %s\" % rel):\n split_rel = rel.split()\n cliff = Clifford([[1, 0], [0, 1]])\n cliff1 = cliff.copy()\n cliff = _append_circuit(cliff, split_rel[0], [0])\n cliff = _append_circuit(cliff, split_rel[2], [0])\n cliff1 = _append_circuit(cliff1, split_rel[4], [0])\n self.assertEqual(cliff, cliff1)\n\n def test_1_qubit_conj_relations(self):\n \"\"\"Tests conjugation relations for 1-qubit gates\"\"\"\n\n rels = [\n \"h * x * h = z\",\n \"h * y * h = y\",\n \"s * x * sdg = y\",\n \"w * x * v = y\",\n \"w * y * v = z\",\n \"w * z * v = x\",\n ]\n\n for rel in rels:\n with self.subTest(msg=\"relation %s\" % rel):\n split_rel = rel.split()\n cliff = Clifford([[1, 0], [0, 1]])\n cliff1 = cliff.copy()\n cliff = _append_circuit(cliff, split_rel[0], [0])\n cliff = _append_circuit(cliff, split_rel[2], [0])\n cliff = _append_circuit(cliff, split_rel[4], [0])\n cliff1 = _append_circuit(cliff1, split_rel[6], [0])\n self.assertEqual(cliff, cliff1)\n\n @combine(gate_name=(\"cx\", \"cz\", \"swap\"), qubits=([0, 1], [1, 0]))\n def test_append_2_qubit_gate(self, gate_name, qubits):\n \"\"\"Tests for append of 2-qubit gate {gate_name} {qubits}.\"\"\"\n\n targets_cliffords = {\n \"cx [0, 1]\": Clifford(\n [\n [True, True, False, False],\n [False, True, False, False],\n [False, False, True, False],\n [False, False, True, True],\n ]\n ),\n \"cx [1, 0]\": Clifford(\n [\n [True, False, False, False],\n [True, True, False, False],\n [False, False, True, True],\n [False, False, False, True],\n ]\n ),\n \"cz [0, 1]\": Clifford(\n [\n [True, False, False, True],\n [False, True, True, False],\n [False, False, True, False],\n [False, False, False, True],\n ]\n ),\n \"cz [1, 0]\": Clifford(\n [\n [True, False, False, True],\n [False, True, True, False],\n [False, False, True, False],\n [False, False, False, True],\n ]\n ),\n \"swap [0, 1]\": Clifford(\n [\n [False, True, False, False],\n [True, False, False, False],\n [False, False, False, True],\n [False, False, True, False],\n ]\n ),\n \"swap [1, 0]\": Clifford(\n [\n [False, True, False, False],\n [True, False, False, False],\n [False, False, False, True],\n [False, False, True, False],\n ]\n ),\n }\n\n gate_qubits = gate_name + \" \" + str(qubits)\n cliff = _append_circuit(Clifford(np.eye(4)), gate_name, qubits)\n target = targets_cliffords[gate_qubits]\n self.assertEqual(target, cliff)\n\n def test_2_qubit_identity_relations(self):\n \"\"\"Tests identity relations for 2-qubit gates\"\"\"\n\n for gate_name in (\"cx\", \"cz\", \"swap\"):\n for qubits in ([0, 1], [1, 0]):\n with self.subTest(msg=f\"append gate {gate_name} {qubits}\"):\n cliff = Clifford(np.eye(4))\n cliff1 = cliff.copy()\n cliff = _append_circuit(cliff, gate_name, qubits)\n cliff = _append_circuit(cliff, gate_name, qubits)\n self.assertEqual(cliff, cliff1)\n\n def test_2_qubit_relations(self):\n \"\"\"Tests relations for 2-qubit gates\"\"\"\n\n with self.subTest(msg=\"relation between cx, h and cz\"):\n cliff = Clifford(np.eye(4))\n cliff1 = cliff.copy()\n cliff = _append_circuit(cliff, \"h\", [1])\n cliff = _append_circuit(cliff, \"cx\", [0, 1])\n cliff = _append_circuit(cliff, \"h\", [1])\n cliff = _append_circuit(cliff, \"cz\", [0, 1])\n self.assertEqual(cliff, cliff1)\n\n with self.subTest(msg=\"relation between cx and swap\"):\n cliff = Clifford(np.eye(4))\n cliff1 = cliff.copy()\n cliff = _append_circuit(cliff, \"cx\", [0, 1])\n cliff = _append_circuit(cliff, \"cx\", [1, 0])\n cliff = _append_circuit(cliff, \"cx\", [0, 1])\n cliff = _append_circuit(cliff, \"swap\", [0, 1])\n self.assertEqual(cliff, cliff1)\n\n with self.subTest(msg=\"relation between cx and x\"):\n cliff = Clifford(np.eye(4))\n cliff1 = cliff.copy()\n cliff = _append_circuit(cliff, \"cx\", [0, 1])\n cliff = _append_circuit(cliff, \"x\", [0])\n cliff = _append_circuit(cliff, \"cx\", [0, 1])\n cliff = _append_circuit(cliff, \"x\", [0])\n cliff = _append_circuit(cliff, \"x\", [1])\n self.assertEqual(cliff, cliff1)\n\n with self.subTest(msg=\"relation between cx and z\"):\n cliff = Clifford(np.eye(4))\n cliff1 = cliff.copy()\n cliff = _append_circuit(cliff, \"cx\", [0, 1])\n cliff = _append_circuit(cliff, \"z\", [1])\n cliff = _append_circuit(cliff, \"cx\", [0, 1])\n cliff = _append_circuit(cliff, \"z\", [0])\n cliff = _append_circuit(cliff, \"z\", [1])\n self.assertEqual(cliff, cliff1)\n\n with self.subTest(msg=\"relation between cx and s\"):\n cliff = Clifford(np.eye(4))\n cliff1 = cliff.copy()\n cliff = _append_circuit(cliff, \"cx\", [1, 0])\n cliff = _append_circuit(cliff, \"cx\", [0, 1])\n cliff = _append_circuit(cliff, \"s\", [1])\n cliff = _append_circuit(cliff, \"cx\", [0, 1])\n cliff = _append_circuit(cliff, \"cx\", [1, 0])\n cliff = _append_circuit(cliff, \"sdg\", [0])\n self.assertEqual(cliff, cliff1)\n\n def test_barrier_delay_sim(self):\n \"\"\"Test barrier and delay instructions can be simulated\"\"\"\n target_circ = QuantumCircuit(2)\n target_circ.h(0)\n target_circ.cx(0, 1)\n target = Clifford(target_circ)\n\n circ = QuantumCircuit(2)\n circ.h(0)\n circ.delay(100, 0)\n circ.barrier([0, 1])\n circ.cx(0, 1)\n value = Clifford(circ)\n self.assertEqual(value, target)\n\n\n@ddt\nclass TestCliffordSynthesis(QiskitTestCase):\n \"\"\"Test Clifford synthesis methods.\"\"\"\n\n def _cliffords_1q(self):\n clifford_dicts = [\n {\"stabilizer\": [\"+Z\"], \"destabilizer\": [\"-X\"]},\n {\"stabilizer\": [\"-Z\"], \"destabilizer\": [\"+X\"]},\n {\"stabilizer\": [\"-Z\"], \"destabilizer\": [\"-X\"]},\n {\"stabilizer\": [\"+Z\"], \"destabilizer\": [\"+Y\"]},\n {\"stabilizer\": [\"+Z\"], \"destabilizer\": [\"-Y\"]},\n {\"stabilizer\": [\"-Z\"], \"destabilizer\": [\"+Y\"]},\n {\"stabilizer\": [\"-Z\"], \"destabilizer\": [\"-Y\"]},\n {\"stabilizer\": [\"+X\"], \"destabilizer\": [\"+Z\"]},\n {\"stabilizer\": [\"+X\"], \"destabilizer\": [\"-Z\"]},\n {\"stabilizer\": [\"-X\"], \"destabilizer\": [\"+Z\"]},\n {\"stabilizer\": [\"-X\"], \"destabilizer\": [\"-Z\"]},\n {\"stabilizer\": [\"+X\"], \"destabilizer\": [\"+Y\"]},\n {\"stabilizer\": [\"+X\"], \"destabilizer\": [\"-Y\"]},\n {\"stabilizer\": [\"-X\"], \"destabilizer\": [\"+Y\"]},\n {\"stabilizer\": [\"-X\"], \"destabilizer\": [\"-Y\"]},\n {\"stabilizer\": [\"+Y\"], \"destabilizer\": [\"+X\"]},\n {\"stabilizer\": [\"+Y\"], \"destabilizer\": [\"-X\"]},\n {\"stabilizer\": [\"-Y\"], \"destabilizer\": [\"+X\"]},\n {\"stabilizer\": [\"-Y\"], \"destabilizer\": [\"-X\"]},\n {\"stabilizer\": [\"+Y\"], \"destabilizer\": [\"+Z\"]},\n {\"stabilizer\": [\"+Y\"], \"destabilizer\": [\"-Z\"]},\n {\"stabilizer\": [\"-Y\"], \"destabilizer\": [\"+Z\"]},\n {\"stabilizer\": [\"-Y\"], \"destabilizer\": [\"-Z\"]},\n ]\n return [Clifford.from_dict(i) for i in clifford_dicts]\n\n def test_decompose_1q(self):\n \"\"\"Test synthesis for all 1-qubit Cliffords\"\"\"\n for cliff in self._cliffords_1q():\n with self.subTest(msg=f\"Test circuit {cliff}\"):\n target = cliff\n value = Clifford(cliff.to_circuit())\n self.assertEqual(target, value)\n\n @combine(num_qubits=[2, 3])\n def test_decompose_2q_bm(self, num_qubits):\n \"\"\"Test B&M synthesis for set of {num_qubits}-qubit Cliffords\"\"\"\n rng = np.random.default_rng(1234)\n samples = 50\n for _ in range(samples):\n circ = random_clifford_circuit(num_qubits, 5 * num_qubits, seed=rng)\n target = Clifford(circ)\n value = Clifford(decompose_clifford_bm(target))\n self.assertEqual(value, target)\n\n @combine(num_qubits=[2, 3, 4, 5])\n def test_decompose_2q_ag(self, num_qubits):\n \"\"\"Test A&G synthesis for set of {num_qubits}-qubit Cliffords\"\"\"\n rng = np.random.default_rng(1234)\n samples = 50\n for _ in range(samples):\n circ = random_clifford_circuit(num_qubits, 5 * num_qubits, seed=rng)\n target = Clifford(circ)\n value = Clifford(decompose_clifford_ag(target))\n self.assertEqual(value, target)\n\n @combine(num_qubits=[1, 2, 3, 4, 5])\n def test_decompose_2q_greedy(self, num_qubits):\n \"\"\"Test greedy synthesis for set of {num_qubits}-qubit Cliffords\"\"\"\n rng = np.random.default_rng(1234)\n samples = 50\n for _ in range(samples):\n circ = random_clifford_circuit(num_qubits, 5 * num_qubits, seed=rng)\n target = Clifford(circ)\n value = Clifford(decompose_clifford_greedy(target))\n self.assertEqual(value, target)\n\n\n@ddt\nclass TestCliffordDecomposition(QiskitTestCase):\n \"\"\"Test Clifford decompositions.\"\"\"\n\n @combine(\n gates=[\n [\"h\", \"s\"],\n [\"h\", \"s\", \"i\", \"x\", \"y\", \"z\"],\n [\"h\", \"s\", \"sdg\"],\n [\"h\", \"s\", \"v\"],\n [\"h\", \"s\", \"w\"],\n [\"h\", \"s\", \"sdg\", \"i\", \"x\", \"y\", \"z\", \"v\", \"w\"],\n ]\n )\n def test_to_operator_1qubit_gates(self, gates):\n \"\"\"Test 1-qubit circuit with gates {gates}\"\"\"\n samples = 10\n num_gates = 10\n seed = 100\n for i in range(samples):\n circ = random_clifford_circuit(1, num_gates, gates=gates, seed=seed + i)\n value = Clifford(circ).to_operator()\n target = Operator(circ)\n self.assertTrue(target.equiv(value))\n\n @combine(\n gates=[\n [\"cx\"],\n [\"cz\"],\n [\"swap\"],\n [\"cx\", \"cz\"],\n [\"cx\", \"swap\"],\n [\"cz\", \"swap\"],\n [\"cx\", \"cz\", \"swap\"],\n ]\n )\n def test_to_operator_2qubit_gates(self, gates):\n \"\"\"Test 2-qubit circuit with gates {gates}\"\"\"\n samples = 10\n num_gates = 10\n seed = 200\n for i in range(samples):\n circ = random_clifford_circuit(2, num_gates, gates=gates, seed=seed + i)\n value = Clifford(circ).to_operator()\n target = Operator(circ)\n self.assertTrue(target.equiv(value))\n\n @combine(\n gates=[[\"h\", \"s\", \"cx\"], [\"h\", \"s\", \"cz\"], [\"h\", \"s\", \"swap\"], \"all\"], num_qubits=[2, 3, 4]\n )\n def test_to_operator_nqubit_gates(self, gates, num_qubits):\n \"\"\"Test {num_qubits}-qubit circuit with gates {gates}\"\"\"\n samples = 10\n num_gates = 20\n seed = 300\n for i in range(samples):\n circ = random_clifford_circuit(num_qubits, num_gates, gates=gates, seed=seed + i)\n value = Clifford(circ).to_operator()\n target = Operator(circ)\n self.assertTrue(target.equiv(value))\n\n @combine(num_qubits=[1, 2, 3])\n def test_to_matrix(self, num_qubits):\n \"\"\"Test to_matrix method\"\"\"\n samples = 10\n num_gates = 10\n seed = 333\n gates = \"all\"\n for i in range(samples):\n circ = random_clifford_circuit(num_qubits, num_gates, gates=gates, seed=seed + i)\n mat = Clifford(circ).to_matrix()\n self.assertIsInstance(mat, np.ndarray)\n self.assertEqual(mat.shape, 2 * (2**num_qubits,))\n value = Operator(mat)\n target = Operator(circ)\n self.assertTrue(value.equiv(target))\n\n @combine(num_qubits=[1, 2, 3, 4, 5])\n def test_to_circuit(self, num_qubits):\n \"\"\"Test to_circuit method\"\"\"\n samples = 10\n num_gates = 10\n seed = 700\n gates = \"all\"\n for i in range(samples):\n circ = random_clifford_circuit(num_qubits, num_gates, gates=gates, seed=seed + i)\n target = Clifford(circ)\n decomp = target.to_circuit()\n self.assertIsInstance(decomp, QuantumCircuit)\n self.assertEqual(decomp.num_qubits, circ.num_qubits)\n # Convert back to clifford and check it is the same\n self.assertEqual(Clifford(decomp), target)\n\n @combine(num_qubits=[1, 2, 3, 4, 5])\n def test_to_instruction(self, num_qubits):\n \"\"\"Test to_instruction method\"\"\"\n samples = 10\n num_gates = 10\n seed = 800\n gates = \"all\"\n for i in range(samples):\n circ = random_clifford_circuit(num_qubits, num_gates, gates=gates, seed=seed + i)\n target = Clifford(circ)\n decomp = target.to_instruction()\n self.assertIsInstance(decomp, Gate)\n self.assertEqual(decomp.num_qubits, circ.num_qubits)\n # Convert back to clifford and check it is the same\n self.assertEqual(Clifford(decomp), target)\n\n\n@ddt\nclass TestCliffordOperators(QiskitTestCase):\n \"\"\"Test Clifford operator class methods.\"\"\"\n\n @combine(num_qubits=[1, 2, 3])\n def test_is_unitary(self, num_qubits):\n \"\"\"Test is_unitary method\"\"\"\n samples = 10\n num_gates = 10\n seed = 700\n gates = \"all\"\n for i in range(samples):\n circ = random_clifford_circuit(num_qubits, num_gates, gates=gates, seed=seed + i)\n value = Clifford(circ).is_unitary()\n self.assertTrue(value)\n # tests a false clifford\n cliff = Clifford([[0, 0], [0, 1]], validate=False)\n value = cliff.is_unitary()\n self.assertFalse(value)\n\n @combine(num_qubits=[1, 2, 3])\n def test_conjugate(self, num_qubits):\n \"\"\"Test conjugate method\"\"\"\n samples = 10\n num_gates = 10\n seed = 400\n gates = \"all\"\n for i in range(samples):\n circ = random_clifford_circuit(num_qubits, num_gates, gates=gates, seed=seed + i)\n value = Clifford(circ).conjugate().to_operator()\n target = Operator(circ).conjugate()\n self.assertTrue(target.equiv(value))\n\n @combine(num_qubits=[1, 2, 3])\n def test_transpose(self, num_qubits):\n \"\"\"Test transpose method\"\"\"\n samples = 10\n num_gates = 1\n seed = 500\n gates = \"all\"\n for i in range(samples):\n circ = random_clifford_circuit(num_qubits, num_gates, gates=gates, seed=seed + i)\n value = Clifford(circ).transpose().to_operator()\n target = Operator(circ).transpose()\n self.assertTrue(target.equiv(value))\n\n @combine(num_qubits=[1, 2, 3])\n def test_compose_method(self, num_qubits):\n \"\"\"Test compose method\"\"\"\n samples = 10\n num_gates = 10\n seed = 600\n gates = \"all\"\n for i in range(samples):\n circ1 = random_clifford_circuit(num_qubits, num_gates, gates=gates, seed=seed + i)\n circ2 = random_clifford_circuit(\n num_qubits, num_gates, gates=gates, seed=seed + samples + i\n )\n cliff1 = Clifford(circ1)\n cliff2 = Clifford(circ2)\n value = cliff1.compose(cliff2)\n target = Clifford(circ1.compose(circ2))\n self.assertEqual(target, value)\n\n @combine(num_qubits=[1, 2, 3])\n def test_dot_method(self, num_qubits):\n \"\"\"Test dot method\"\"\"\n samples = 10\n num_gates = 10\n seed = 600\n gates = \"all\"\n for i in range(samples):\n circ1 = random_clifford_circuit(num_qubits, num_gates, gates=gates, seed=seed + i)\n circ2 = random_clifford_circuit(\n num_qubits, num_gates, gates=gates, seed=seed + samples + i\n )\n cliff1 = Clifford(circ1)\n cliff2 = Clifford(circ2)\n value = cliff1.dot(cliff2)\n target = Clifford(circ2.compose(circ1))\n self.assertEqual(target, value)\n\n @combine(num_qubits_1=[1, 2, 3], num_qubits_2=[1, 2, 3])\n def test_tensor_method(self, num_qubits_1, num_qubits_2):\n \"\"\"Test tensor method\"\"\"\n samples = 5\n num_gates = 10\n seed = 800\n gates = \"all\"\n for i in range(samples):\n circ1 = random_clifford_circuit(num_qubits_1, num_gates, gates=gates, seed=seed + i)\n circ2 = random_clifford_circuit(\n num_qubits_2, num_gates, gates=gates, seed=seed + samples + i\n )\n cliff1 = Clifford(circ1)\n cliff2 = Clifford(circ2)\n value = cliff1.tensor(cliff2)\n circ = QuantumCircuit(num_qubits_1 + num_qubits_2)\n circ.append(circ2, range(num_qubits_2))\n circ.append(circ1, range(num_qubits_2, num_qubits_1 + num_qubits_2))\n target = Clifford(circ)\n self.assertEqual(target, value)\n\n @combine(num_qubits_1=[1, 2, 3], num_qubits_2=[1, 2, 3])\n def test_expand_method(self, num_qubits_1, num_qubits_2):\n \"\"\"Test expand method\"\"\"\n samples = 5\n num_gates = 10\n seed = 800\n gates = \"all\"\n for i in range(samples):\n circ1 = random_clifford_circuit(num_qubits_1, num_gates, gates=gates, seed=seed + i)\n circ2 = random_clifford_circuit(\n num_qubits_2, num_gates, gates=gates, seed=seed + samples + i\n )\n cliff1 = Clifford(circ1)\n cliff2 = Clifford(circ2)\n value = cliff1.expand(cliff2)\n circ = QuantumCircuit(num_qubits_1 + num_qubits_2)\n circ.append(circ1, range(num_qubits_1))\n circ.append(circ2, range(num_qubits_1, num_qubits_1 + num_qubits_2))\n target = Clifford(circ)\n self.assertEqual(target, value)\n\n @combine(num_qubits_1=[4, 5, 6], num_qubits_2=[1, 2, 3])\n def test_compose_subsystem(self, num_qubits_1, num_qubits_2):\n \"\"\"Test compose method of subsystems\"\"\"\n samples = 10\n num_gates = 10\n seed = 600\n gates = \"all\"\n for i in range(samples):\n circ1 = random_clifford_circuit(num_qubits_1, num_gates, gates=gates, seed=seed + i)\n circ2 = random_clifford_circuit(\n num_qubits_2, num_gates, gates=gates, seed=seed + samples + i\n )\n qargs = sorted(np.random.choice(range(num_qubits_1), num_qubits_2, replace=False))\n circ = circ1.copy()\n circ.append(circ2.to_instruction(), qargs)\n value = Clifford(circ1).compose(Clifford(circ2), qargs)\n target = Clifford(circ)\n self.assertEqual(target, value)\n\n @combine(num_qubits_1=[4, 5, 6], num_qubits_2=[1, 2, 3])\n def test_dot_subsystem(self, num_qubits_1, num_qubits_2):\n \"\"\"Test dot method of subsystems\"\"\"\n samples = 10\n num_gates = 10\n seed = 600\n gates = \"all\"\n for i in range(samples):\n circ1 = random_clifford_circuit(num_qubits_1, num_gates, gates=gates, seed=seed + i)\n circ2 = random_clifford_circuit(\n num_qubits_2, num_gates, gates=gates, seed=seed + samples + i\n )\n qargs = sorted(np.random.choice(range(num_qubits_1), num_qubits_2, replace=False))\n circ = QuantumCircuit(num_qubits_1)\n circ.append(circ2.to_instruction(), qargs)\n circ.append(circ1.to_instruction(), range(num_qubits_1))\n value = Clifford(circ1).dot(Clifford(circ2), qargs)\n target = Clifford(circ)\n self.assertEqual(target, value)\n\n def test_to_dict(self):\n \"\"\"Test to_dict method\"\"\"\n\n with self.subTest(msg=\"Identity\"):\n cliff = Clifford(np.eye(8))\n value = cliff.to_dict()\n\n keys_value = set(value.keys())\n keys_target = {\"destabilizer\", \"stabilizer\"}\n self.assertEqual(keys_value, keys_target)\n\n stabilizer_value = set(value[\"stabilizer\"])\n stabilizer_target = {\"+IIIZ\", \"+IIZI\", \"+IZII\", \"+ZIII\"}\n self.assertEqual(stabilizer_value, stabilizer_target)\n\n destabilizer_value = set(value[\"destabilizer\"])\n destabilizer_target = {\"+IIIX\", \"+IIXI\", \"+IXII\", \"+XIII\"}\n self.assertEqual(destabilizer_value, destabilizer_target)\n\n with self.subTest(msg=\"bell\"):\n qc = QuantumCircuit(2)\n qc.h(0)\n qc.cx(0, 1)\n cliff = Clifford(qc)\n value = cliff.to_dict()\n\n keys_value = set(value.keys())\n keys_target = {\"destabilizer\", \"stabilizer\"}\n self.assertEqual(keys_value, keys_target)\n\n stabilizer_value = set(value[\"stabilizer\"])\n stabilizer_target = {\"+XX\", \"+ZZ\"}\n self.assertEqual(stabilizer_value, stabilizer_target)\n\n destabilizer_value = set(value[\"destabilizer\"])\n destabilizer_target = {\"+IZ\", \"+XI\"}\n self.assertEqual(destabilizer_value, destabilizer_target)\n\n def test_from_dict(self):\n \"\"\"Test from_dict method\"\"\"\n\n with self.subTest(msg=\"test raises not unitary\"):\n cliff_dict = {\"stabilizer\": [\"+XX\", \"+ZZ\"], \"destabilizer\": [\"+IZ\", \"+IY\"]}\n self.assertRaises(QiskitError, Clifford.from_dict, cliff_dict)\n\n with self.subTest(msg=\"test raises wrong shape\"):\n cliff_dict = {\n \"stabilizer\": [\"+XX\", \"+ZZ\", \"+YY\"],\n \"destabilizer\": [\"+IZ\", \"+XI\", \"+IY\"],\n }\n self.assertRaises(QiskitError, Clifford.from_dict, cliff_dict)\n\n @combine(num_qubits=[1, 2, 3, 4, 5])\n def test_dict_round_trip(self, num_qubits):\n \"\"\"Test round trip conversion to and from dict\"\"\"\n num_gates = 10\n seed = 655\n gates = \"all\"\n circ = random_clifford_circuit(num_qubits, num_gates, gates=gates, seed=seed + num_qubits)\n target = Clifford(circ)\n value = Clifford.from_dict(target.to_dict())\n self.assertEqual(value, target)\n\n def test_from_label(self):\n \"\"\"Test from_label method\"\"\"\n label = \"IXYZHS\"\n CI = Clifford(IGate())\n CX = Clifford(XGate())\n CY = Clifford(YGate())\n CZ = Clifford(ZGate())\n CH = Clifford(HGate())\n CS = Clifford(SGate())\n target = CI.tensor(CX).tensor(CY).tensor(CZ).tensor(CH).tensor(CS)\n self.assertEqual(Clifford.from_label(label), target)\n\n @combine(num_qubits=[1, 2, 3, 4, 5])\n def test_instruction_name(self, num_qubits):\n \"\"\"Test to verify the correct clifford name is maintained\n after converting to instruction\"\"\"\n clifford = random_clifford(num_qubits, seed=777)\n self.assertEqual(clifford.to_instruction().name, str(clifford))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "numpy.array", "numpy.random.default_rng", "numpy.eye" ] ]
brandonkrull/matminer
[ "17e52ea03080c9517681774b45fda800f3329aeb" ]
[ "matminer/utils/data.py" ]
[ "from __future__ import division, unicode_literals, print_function\n\n\"\"\"\nUtility classes for retrieving elemental properties. Provides\na uniform interface to several different elemental property resources\nincluding ``pymatgen`` and ``Magpie``.\n\"\"\"\n\nimport os\nimport json\nimport six\nimport abc\nimport numpy as np\nimport pandas as pd\nfrom glob import glob\n\nfrom pymatgen import Element\nfrom pymatgen.core.periodic_table import _pt_data\n\n__author__ = 'Kiran Mathew, Jiming Chen, Logan Ward, Anubhav Jain, Alex Dunn'\n\nmodule_dir = os.path.dirname(os.path.abspath(__file__))\n\n\nclass AbstractData(six.with_metaclass(abc.ABCMeta)):\n \"\"\"Abstract class for retrieving elemental properties\n\n All classes must implement the `get_elemental_property` operation. These operations\n should return scalar values (ideally floats) and `nan` if a property does not exist\"\"\"\n\n @abc.abstractmethod\n def get_elemental_property(self, elem, property_name):\n \"\"\"Get a certain elemental property for a certain element.\n\n Args:\n elem - (Element) element to be assessed\n property_name - (str) property to be retreived\n Returns:\n float, property of that element\n \"\"\"\n pass\n\n def get_elemental_properties(self, elems, property_name):\n \"\"\"Get elemental properties for a list of elements\n\n Args:\n elems - ([Element]) list of elements\n property_name - (str) property to be retrieved\n Returns:\n [float], properties of elements\n \"\"\"\n return [self.get_elemental_property(e, property_name) for e in elems]\n\n\nclass OxidationStatesMixin(six.with_metaclass(abc.ABCMeta)):\n \"\"\"Abstract class interface for retrieving the oxidation states\n of each element\"\"\"\n\n @abc.abstractmethod\n def get_oxidation_states(self, elem):\n \"\"\"Retrieve the possible oxidation states of an element\n\n Args:\n elem - (Element), Target element\n Returns:\n [int] - oxidation states\n \"\"\"\n pass\n\n\nclass OxidationStateDependentData(AbstractData):\n \"\"\"Abstract class that also includes oxidation-state-dependent properties\"\"\"\n\n @abc.abstractmethod\n def get_charge_dependent_property(self, element, charge, property_name):\n \"\"\"Retrieve a oxidation-state dependent elemental property\n\n Args:\n element - (Element), Target element\n charge - (int), Oxidation state\n property_name - (string), name of property\n Return:\n (float) - Value of property\n \"\"\"\n pass\n\n def get_charge_dependent_property_from_specie(self, specie, property_name):\n \"\"\"Retrieve a oxidation-state dependent elemental property\n\n Args:\n specie - (Specie), Specie of interest\n property_name - (string), name of property\n Return:\n (float) - Value of property\n \"\"\"\n\n return self.get_charge_dependent_property(specie.element,\n specie.oxi_state,\n property_name)\n\n\nclass CohesiveEnergyData(AbstractData):\n \"\"\"Get the cohesive energy of an element.\n\n Data is extracted from KnowledgeDoor Cohesive Energy Handbook online\n (http://www.knowledgedoor.com/2/elements_handbook/cohesive_energy.html),\n which in turn got the data from Introduction to Solid State Physics,\n 8th Edition, by Charles Kittel (ISBN 978-0-471-41526-8), 2005.\n \"\"\"\n\n def __init__(self):\n # Load elemental cohesive energy data from json file\n with open(os.path.join(module_dir, 'data_files',\n 'cohesive_energies.json'), 'r') as f:\n self.cohesive_energy_data = json.load(f)\n\n def get_elemental_property(self, elem, property_name='cohesive energy'):\n \"\"\"\n Args:\n elem: (Element) Element of interest\n property_name (str): unused, always returns cohesive energy\n\n Returns:\n (float): cohesive energy of the element\n \"\"\"\n return self.cohesive_energy_data[elem]\n\n\nclass DemlData(OxidationStateDependentData, OxidationStatesMixin):\n \"\"\"\n Class to get data from Deml data file. See also: A.M. Deml,\n R. O'Hayre, C. Wolverton, V. Stevanovic, Predicting density functional\n theory total energies and enthalpies of formation of metal-nonmetal\n compounds by linear regression, Phys. Rev. B - Condens. Matter Mater. Phys.\n 93 (2016).\n \"\"\"\n\n def __init__(self):\n from matminer.utils.data_files.deml_elementdata import properties\n self.all_props = properties\n self.available_props = list(self.all_props.keys()) + \\\n [\"formal_charge\", \"valence_s\", \"valence_p\",\n \"valence_d\", \"first_ioniz\", \"total_ioniz\"]\n\n # Compute the FERE correction energy\n fere_corr = {}\n for k, v in self.all_props[\"GGAU_Etot\"].items():\n fere_corr[k] = self.all_props[\"mus_fere\"][k] - v\n self.all_props[\"FERE correction\"] = fere_corr\n\n # List out the available charge-dependent properties\n self.charge_dependent_properties = [\"xtal_field_split\", \"magn_moment\",\n \"so_coupling\", \"sat_magn\"]\n\n def get_elemental_property(self, elem, property_name):\n if \"valence\" in property_name:\n valence_dict = self.all_props[\"valence_e\"][\n self.all_props[\"col_num\"][elem.symbol]]\n if property_name[-1] in [\"s\", \"p\", \"d\"]:\n # Return one of the shells\n return valence_dict[property_name[-1]]\n else:\n return sum(valence_dict.values())\n elif property_name == \"first_ioniz\":\n return self.all_props[\"ionization_en\"][elem.symbol][0]\n else:\n return self.all_props[property_name].get(elem.symbol, float(\"NaN\"))\n\n def get_oxidation_states(self, elem):\n return self.all_props[\"charge_states\"][elem.symbol]\n\n def get_charge_dependent_property(self, element, charge, property_name):\n if property_name == \"total_ioniz\":\n if charge < 0:\n raise ValueError(\n \"total ionization energy only defined for charge > 0\")\n return sum(self.all_props[\"ionization_en\"][element.symbol][:charge])\n else:\n return self.all_props[property_name].get(element.symbol, {}).get(\n charge, np.nan)\n\n\nclass MagpieData(AbstractData, OxidationStatesMixin):\n \"\"\"\n Class to get data from Magpie files. See also:\n L. Ward, A. Agrawal, A. Choudhary, C. Wolverton, A general-purpose machine\n learning framework for predicting properties of inorganic materials,\n Npj Comput. Mater. 2 (2016) 16028.\n \"\"\"\n\n def __init__(self):\n self.all_elemental_props = dict()\n available_props = []\n self.data_dir = os.path.join(module_dir, \"data_files\",\n 'magpie_elementdata')\n\n # Make a list of available properties\n for datafile in glob(os.path.join(self.data_dir, \"*.table\")):\n available_props.append(\n os.path.basename(datafile).replace('.table', ''))\n\n # parse and store elemental properties\n for descriptor_name in available_props:\n with open(os.path.join(self.data_dir,\n '{}.table'.format(descriptor_name)),\n 'r') as f:\n self.all_elemental_props[descriptor_name] = dict()\n lines = f.readlines()\n for atomic_no in range(1, len(_pt_data) + 1): # max Z=103\n try:\n if descriptor_name in [\"OxidationStates\"]:\n prop_value = [float(i) for i in\n lines[atomic_no - 1].split()]\n else:\n prop_value = float(lines[atomic_no - 1])\n except (ValueError, IndexError):\n prop_value = float(\"NaN\")\n self.all_elemental_props[descriptor_name][\n Element.from_Z(atomic_no).symbol] = prop_value\n\n def get_elemental_property(self, elem, property_name):\n return self.all_elemental_props[property_name][elem.symbol]\n\n def get_oxidation_states(self, elem):\n return self.all_elemental_props[\"OxidationStates\"][elem.symbol]\n\n\nclass PymatgenData(OxidationStateDependentData, OxidationStatesMixin):\n \"\"\"\n Class to get data from pymatgen. See also:\n S.P. Ong, W.D. Richards, A. Jain, G. Hautier, M. Kocher, S. Cholia, et al.,\n Python Materials Genomics (pymatgen): A robust, open-source python library\n for materials analysis, Comput. Mater. Sci. 68 (2013) 314-319.\n \"\"\"\n\n def __init__(self, use_common_oxi_states=True):\n self.use_common_oxi_states = use_common_oxi_states\n\n def get_elemental_property(self, elem, property_name):\n if property_name == \"block\":\n block_key = {\"s\": 1.0, \"p\": 2.0, \"d\": 3.0, \"f\": 3.0}\n return block_key[getattr(elem, property_name)]\n else:\n value = getattr(elem, property_name)\n return np.nan if value is None else value\n\n def get_oxidation_states(self, elem):\n \"\"\"Get the oxidation states of an element\n\n Args:\n elem - (Element) target element\n common - (boolean), whether to return only the common oxidation states,\n or all known oxidation states\n Returns:\n [int] list of oxidation states\n \"\"\"\n return elem.common_oxidation_states if self.use_common_oxi_states \\\n else elem.oxidation_states\n\n def get_charge_dependent_property(self, element, charge, property_name):\n return getattr(element, property_name)[charge]\n\n\nclass MixingEnthalpy:\n \"\"\"\n Values of :math:`\\Delta H^{max}_{AB}` for different pairs of elements.\n\n Based on the Miedema model. Tabulated by:\n A. Takeuchi, A. Inoue, Classification of Bulk Metallic Glasses by Atomic\n Size Difference, Heat of Mixing and Period of Constituent Elements and\n Its Application to Characterization of the Main Alloying Element.\n Mater. Trans. 46, 2817–2829 (2005).\n\n Attributes:\n valid_element_list ([Element]): A list of elements for which the\n mixing enthalpy parameters are defined (although no guarantees\n are provided that all combinations of this list will be available).\n \"\"\"\n\n def __init__(self):\n mixing_dataset = pd.read_csv(os.path.join(module_dir, 'data_files',\n 'MiedemaLiquidDeltaHf.tsv'),\n delim_whitespace=True)\n self.mixing_data = {}\n for a, b, dHf in mixing_dataset.itertuples(index=False):\n key = tuple(sorted((a, b)))\n self.mixing_data[key] = dHf\n valid_elements = [\n \"Dy\", \"Mn\", \"Y\", \"Nd\", \"Ag\", \"Cs\", \"Tm\", \"Pd\", \"Sn\", \"Rh\", \"Pr\",\n \"Er\", \"K\", \"In\", \"Tb\", \"Rb\", \"H\", \"N\", \"Ni\", \"Hg\", \"Ca\", \"Mo\", \"Li\",\n \"Th\", \"U\", \"At\", \"Ga\", \"La\", \"Ru\", \"Lu\", \"Eu\", \"Si\", \"B\", \"Zr\",\n \"Ce\", \"Pm\", \"Ge\", \"Sm\", \"Ta\", \"Ti\", \"Po\", \"Sc\", \"Mg\", \"Sr\", \"P\",\n \"C\", \"Ir\", \"Pa\", \"V\", \"Zn\", \"Sb\", \"Na\", \"W\", \"Re\", \"Tl\", \"Pt\", \"Gd\",\n \"Cr\", \"Co\", \"Ba\", \"Os\", \"Hf\", \"Pb\", \"Cu\", \"Tc\", \"Al\", \"As\", \"Ho\",\n \"Yb\", \"Au\", \"Be\", \"Nb\", \"Cd\", \"Fe\", \"Bi\"]\n self.valid_element_list = [Element(e) for e in valid_elements]\n\n def get_mixing_enthalpy(self, elemA, elemB):\n \"\"\"\n Get the mixing enthalpy between different elements\n\n Args:\n elemA (Element): An element\n elemB (Element): Second element\n Returns:\n (float) mixing enthalpy, nan if pair is not in a table\n \"\"\"\n\n key = tuple(sorted((elemA.symbol, elemB.symbol)))\n return self.mixing_data.get(key, np.nan)\n\n\nclass MatscholarElementData(AbstractData):\n \"\"\"\n Class to get word embedding vectors of elements. These word embeddings were\n generated using NLP + Neural Network techniques on more than 3 million\n scientific abstracts.\n\n #TODO: add citation (expected mid-2019).\n \"\"\"\n\n def __init__(self):\n dfile = os.path.join(module_dir,\n \"data_files/matscholar_els.json\")\n with open(dfile, \"r\") as fp:\n embeddings = json.load(fp)\n self.prop_names = [\"embedding {}\".format(i) for i in range(1, 201)]\n all_element_data = {}\n for el, embedding in embeddings.items():\n all_element_data[el] = dict(zip(self.prop_names, embedding))\n self.all_element_data = all_element_data\n\n def get_elemental_property(self, elem, property_name):\n return self.all_element_data[str(elem)][property_name]\n\n\nclass MEGNetElementData(AbstractData):\n \"\"\"\n Class to get neural network embeddings of elements. These embeddings were\n generated using the Materials Graph Network (MEGNet) developed by the\n MaterialsVirtualLab at U.C. San Diego and described in the following\n preprint:\n\n https://arxiv.org/abs/1812.05055\n\n The code for MEGNet can be found at:\n https://github.com/materialsvirtuallab/megnet\n\n #TODO: add publication reference when MEGNet is published\n\n The embeddings were generated by training the MEGNet Graph Network on\n 60,000 structures from the Materials Project for predicting formation\n energy, and may be an effective way of applying transfer learning to\n smaller datasets using crystal-graph-based networks.\n \"\"\"\n\n def __init__(self):\n dfile = os.path.join(module_dir,\n \"data_files/megnet_elemental_embedding.json\")\n self._dummy = \"Dummy\"\n with open(dfile, \"r\") as fp:\n embeddings = json.load(fp)\n self.prop_names = [\"embedding {}\".format(i) for i in range(1, 17)]\n self.all_element_data = {}\n for i in range(95):\n embedding_dict = dict(zip(self.prop_names, embeddings[i]))\n if i == 0:\n self.all_element_data[self._dummy] = embedding_dict\n else:\n self.all_element_data[str(Element.from_Z(i))] = embedding_dict\n\n def get_elemental_property(self, elem, property_name):\n estr = str(elem)\n if estr not in self.all_element_data.keys():\n estr = self._dummy\n return self.all_element_data[estr][property_name]\n\n\nclass IUCrBondValenceData:\n \"\"\"Get empirical bond valence parameters.\n\n Data come from International Union of Crystallography 2016 tables.\n (https://www.iucr.org/resources/data/datasets/bond-valence-parameters)\n Both the raw source CIF and cleaned csv file are made accessible here.\n Within the source CIF, there are citations for every set of parameters.\n\n The copyright notice and disclaimer are reproduced below\n #***************************************************************\n # COPYRIGHT NOTICE\n # This table may be used and distributed without fee for\n # non-profit purposes providing\n # 1) that this copyright notice is included and\n # 2) no fee is charged for the table and\n # 3) details of any changes made in this list by anyone other than\n # the copyright owner are suitably noted in the _audit_update record\n # Please consult the copyright owner regarding any other uses.\n #\n # The copyright is owned by I. David Brown, Brockhouse Institute for\n # Materials Research, McMaster University, Hamilton, Ontario Canada.\n # [email protected]\n #\n #*****************************DISCLAIMER************************\n #\n # The values reported here are taken from the literature and\n # other sources and the author does not warrant their correctness\n # nor accept any responsibility for errors. Users are advised to\n # consult the primary sources.\n #\n #***************************************************************\n \"\"\"\n\n def __init__(self, interpolate_soft=True):\n \"\"\"\n Load bond valence parameters as pandas dataframe.\n\n If interpolate_soft is True, fill in some missing values\n for anions such as I, Br, N, S, Se, etc. with the assumption\n that bond valence parameters of such anions don't depend on\n cation oxidation state. This assumption comes from Brese and O'Keeffe,\n (1991), Acta Cryst. B47, 194, which states \"with less electronegative\n anions, ... R is not very different for different oxidation states in\n general.\" In the original data source file, only one set of parameters\n is usually provided for those less electronegative anions in a 9+\n oxidation state, indicating they can be used with all oxidation states.\n \"\"\"\n filepath = os.path.join(\n module_dir,\n \"data_files\",\n \"bvparm2016.cif\")\n self.params = pd.read_csv(filepath, sep='\\s+',\n header=None,\n names=['Atom1', 'Atom1_valence',\n 'Atom2', 'Atom2_valence',\n 'Ro', 'B',\n 'ref_id', 'details'],\n skiprows=172,\n skipfooter=1,\n index_col=False,\n engine=\"python\")\n if interpolate_soft:\n self.params = self.interpolate_soft_anions()\n\n def interpolate_soft_anions(self):\n \"\"\"Fill in missing parameters for oxidation states of soft anions.\"\"\"\n high_electroneg = '|'.join(['O', 'Cl', 'F'])\n subset = self.params.loc[(self.params['Atom1_valence'] == 9) & (~self.params['Atom2'].str.contains(high_electroneg))]\n cation_subset = subset['Atom1'].unique()\n data = []\n for cation in cation_subset:\n anions = subset.loc[subset['Atom1'] == cation]['Atom2'].unique()\n for anion in anions:\n an_val, Ro, b, ref_id = subset.loc[(subset['Atom1'] == cation)\n & (subset['Atom2']==anion)][['Atom2_valence', 'Ro', 'B', 'ref_id']].values[0]\n for n in range(1, 7):\n entry = {'Atom1': cation,\n 'Atom1_valence': n,\n 'Atom2': anion,\n 'Atom2_valence': an_val,\n 'Ro': Ro,\n 'B': b,\n 'ref_id': ref_id,\n 'details': 'Interpolated'\n }\n data.append(entry)\n new_data = pd.DataFrame(data)\n new_params = self.params.append(new_data, sort=True, ignore_index=True)\n return new_params\n\n def get_bv_params(self, cation, anion, cat_val, an_val):\n \"\"\"Lookup bond valence parameters from IUPAC table.\n Args:\n cation (Element): cation element\n anion (Element): anion element\n cat_val (Integer): cation formal oxidation state\n an_val (Integer): anion formal oxidation state\n Returns:\n bond_val_list: dataframe of bond valence parameters\n \"\"\"\n\n bv_data = self.params\n bond_val_list = self.params.loc[(bv_data['Atom1'] == str(cation)) \\\n & (bv_data['Atom1_valence'] == cat_val) \\\n & (bv_data['Atom2'] == str(anion)) \\\n & (bv_data['Atom2_valence'] == an_val)]\n return bond_val_list.iloc[0] # If multiple values exist, take first one\n # as recommended for reliability.\n\n" ]
[ [ "pandas.DataFrame", "pandas.read_csv" ] ]
icheft/MGT2001_package
[ "8be8f76bef803bc58eab46a788851598ec5a6e1d" ]
[ "mgt2001/team.py" ]
[ "from matplotlib import pyplot as plt\nimport seaborn as sns\nimport pandas as pd\nimport numpy as np\nimport scipy.stats as stats\nimport statsmodels.api as sm\nimport statsmodels.stats.api as sms\nimport statsmodels.formula.api as smf\nimport statsmodels.stats.multicomp as smm\nimport statsmodels.stats.outliers_influence as sso\nimport statsmodels\nimport statistics\nimport math\nimport time\nimport itertools\nfrom scipy.optimize import curve_fit\nfrom statsmodels.tsa.ar_model import AutoReg, ar_select_order\nfrom statsmodels.tsa.api import acf, pacf, graphics\n\n##################### MICHAEL #####################\n\n\ndef Outlier_and_InfObs(standard_resid=None, x_data=None, y_data=None, Multi=True, df=None):\n outlier_index = []\n infobs_index = []\n # print ('Outliers :')\n for i in range(len(standard_resid)):\n if (standard_resid[i] < -2 or standard_resid[i] > 2):\n outlier_index.append(i)\n # print (i, standard_resid[i])\n\n print(\"\\n\")\n\n if (not Multi):\n cov_mat = np.cov(y_data, x_data)\n x_bar = x_data.mean()\n nobs = len(x_data)\n h_val = 1 / nobs + (x_data - x_bar) ** 2 / (nobs - 1) / cov_mat[1, 1]\n # print(h_val)\n df1 = pd.DataFrame(h_val, columns=['hi'])\n filter = (df1['hi'] > 6 / nobs)\n print(\"\\nInfluential Observations by hi :\")\n print(df1['hi'].loc[filter])\n else:\n H = np.matmul(x_data, np.linalg.solve(\n np.matmul(x_data.T, x_data), x_data.T))\n df_w_h = df.copy().reset_index().rename(columns={'index': 'ID'})\n df_w_h['ID'] += 1\n df_w_h['h_ii'] = np.diagonal(H)\n # print (x_data.shape[1])\n k = x_data.shape[1]-1\n n = len(df_w_h['h_ii'])\n h_level = 3 * (k+1) / n\n # print(\"h_level = \", h_level)\n # print(\" \\n\")\n for i in range(0, df_w_h.shape[0]):\n if df_w_h['h_ii'][i] > h_level:\n infobs_index.append(i)\n filter = (df_w_h['h_ii'] > h_level)\n # print(\"Influential Observations by hi = \\n\")\n # print(df_w_h['h_ii'].loc[filter])\n return outlier_index, infobs_index\n\n\n##################### DEREK #####################\n# Outliers DIY\n\n\ndef simple_outliers_DIY(df, xname, yname, alpha=0.05):\n # Fit regression model\n result = smf.ols(yname + '~' + xname, data=df).fit()\n\n # studentized residual\n st1, data1, ss3 = sso.summary_table(result, alpha=alpha)\n Residual = data1[:, 8]\n STD_Residual = data1[:, 10]\n mu = np.mean(STD_Residual)\n sigma = np.std(STD_Residual)\n\n print(\"◆ Outliers Finding\\n\")\n print(\"(remove by yourself!)\\n\")\n df_out = pd.DataFrame(STD_Residual, columns=['SD'])\n filter = (df_out['SD'] < -2) | (df_out['SD'] > 2)\n print(\"Outliers by SD = \")\n print(df_out['SD'].loc[filter])\n print(\"\\nActual ID: \", df_out['SD'].loc[filter].index+1)\n return df_out['SD'].loc[filter].index\n\n# compute p value from t statistics\n\n\ndef tpv(stat, dof, tail):\n if (tail == 'r'):\n return 1 - stats.t.cdf(stat, df=dof) # right\n elif (tail == 'l'):\n return stats.t.cdf(stat, df=dof) # left\n elif (tail == 'db'):\n if(stats.t.cdf(stat, df=dof) > 0.5):\n return 2 * (1 - stats.t.cdf(stat, df=dof)) # double\n else:\n return 2 * stats.t.cdf(stat, df=dof) # double\n else:\n return -1 # error\n\n# p value interpretation\n\n\ndef pvSig(pv):\n print(\"\\n====== p value significance ======\")\n if (pv <= 0.01):\n print(\">>> highly sig, overwhelming evidence\\n sig, strong evidence\\n not sig, weak evidence\\n not sig, little to no evidence\")\n elif (pv <= 0.05 and pv > 0.01):\n print(\" highly sig, overwhelming evidence\\n>>> sig, strong evidence\\n not sig, weak evidence\\n not sig, little to no evidence\")\n elif (pv <= 0.1 and pv > 0.05):\n print(\" highly sig, overwhelming evidence\\n sig, strong evidence\\n>>> not sig, weak evidence\\n not sig, little to no evidence\")\n elif (pv > 0.1):\n print(\" highly sig, overwhelming evidence\\n sig, strong evidence\\n not sig, weak evidence\\n>>> not sig, little to no evidence\")\n else:\n print(\"BAD INPUT\")\n print(\"===================================\\n\")\n\n# r value interpretation\n\n\ndef rvInter(rv):\n print(\"\\n====== R value interpretation ======\")\n if (rv > 0):\n print(\" [positive]\")\n elif (rv < 0):\n print(\" [negative]\")\n else:\n print(\" [no linear rellation]\")\n return\n\n if (abs(rv) <= 0.25):\n print(\" very strong\\n moderately strong\\n moderately weak\\n>>> very weak\")\n elif (abs(rv) <= 0.5 and abs(rv) > 0.25):\n print(\" very strong\\n moderately strong\\n>>> moderately weak\\n very weak\")\n elif (abs(rv) <= 0.75 and abs(rv) > 0.5):\n print(\" very strong\\n>>> moderately strong\\n moderately weak\\n very weak\")\n elif (abs(rv) <= 1 and abs(rv) > 0.75):\n print(\">>> very strong\\n moderately strong\\n moderately weak\\n very weak\")\n else:\n print(\"BAD INPUT\")\n print(\"====================================\\n\")\n\n\ndef simple_regplot(df, xname, yname):\n _ = sns.regplot(x=xname, y=yname, data=df, color='b', ci=None)\n plt.title('Scatter Plot')\n plt.xlabel(xname)\n plt.ylabel(yname)\n plt.show()\n\n\ndef simple_regmod(df, xname, yname):\n # Fit regression model\n result1 = smf.ols(yname + '~ ' + xname, data=df).fit()\n # Inspect the results\n print(result1.summary())\n\n b1_1 = result1.params[1]\n b0_1 = result1.params[0]\n print(f\"Estimated model: y = {b0_1:.4f} + {b1_1:.4f} x\")\n\n\ndef simple_durbin_watson(df, xname, yname, alpha=0.05):\n print(\"\\n\\n========== Durbin-Watson ==========\\n\")\n\n y_data = df[yname]\n x_data_ar = []\n x_data_ar = np.asarray(df[xname])\n\n x_data_T = x_data_ar.T\n x_data = pd.DataFrame({xname: x_data_T})\n x_data2 = sm.add_constant(x_data)\n olsmod = sm.OLS(y_data, x_data2)\n result = olsmod.fit()\n\n st, data, ss2 = sso.summary_table(result, alpha=alpha)\n print(\"Columns in data are: %s\" % ss2)\n # Predicted value\n y_pre = data[:, 2]\n # Studentized Residual\n SD = data[:, 10]\n\n x_square_sum = np.vdot(SD, SD)\n print(\"x_square_sum = \", x_square_sum)\n size = SD.size\n print(\"size = \", size)\n x_d = np.zeros((size))\n print(\"x_d = \", x_d)\n l_size = size - 1\n for i in range(l_size):\n x_d[i + 1] = SD[i + 1] - SD[i]\n print(\"x_d = \", x_d)\n d = np.vdot(x_d, x_d) / x_square_sum\n print(\"d = \", d)\n\n\ndef chi2_normtest(stand_res, N, alpha=0.05):\n m = np.mean(stand_res)\n s = np.std(stand_res)\n prob_bins = np.zeros((N + 1))\n z_bins = np.zeros((N + 1))\n z_bins[0] = -4\n z_bins[N] = 4\n for i in range(0, N+1):\n prob_bins[i] = i/N\n for j in range(1, N):\n z_bins[j] = m + stats.norm.isf(1 - prob_bins[j]) * s\n counts, bins = np.histogram(stand_res, bins=z_bins)\n nobs = counts.sum()\n prob_e = np.zeros((N))\n for i in range(1, N+1):\n prob_e[i - 1] = prob_bins[i] - prob_bins[i - 1]\n freq_e = nobs * prob_e\n freq_o = counts\n if np.sum(freq_e < 5) > 0:\n print(\"Rule of five is not met.\")\n else:\n chi_stat, pval = stats.chisquare(freq_o, freq_e)\n chi_pval = stats.chi2.sf(chi_stat, N - 3)\n print(\"Chi-squared test: statistics = %0.4f, p-value = %0.4f\" %\n (chi_stat, chi_pval))\n df = freq_o.shape[0]-3\n crit_value = stats.chi2.ppf(1 - alpha, df)\n print(\"Critical value = %0.4f (defree of freedom = %d)\" % (crit_value, df))\n\n return chi_pval\n\n\ndef runsTest(l, l_median):\n runs, n1, n2 = 1, 0, 0\n if(l[0]) >= l_median:\n n1 += 1\n else:\n n2 += 1\n # Checking for start of new run\n for i in range(len(l)):\n # no. of runs\n if (l[i] >= l_median and l[i-1] < l_median) or (l[i] < l_median and l[i-1] >= l_median):\n runs += 1\n # print(i, runs)\n # no. of positive values\n if(l[i]) >= l_median:\n n1 += 1\n # no. of negative values\n else:\n n2 += 1\n runs_exp = ((2*n1*n2)/(n1+n2)) + 1\n stan_dev = math.sqrt((2*n1*n2*(2*n1*n2-n1-n2))/(((n1+n2)**2)*(n1+n2-1)))\n z = (runs-runs_exp)/stan_dev\n pval_z = stats.norm.sf(abs(z)) * 2\n print('runs = ', runs)\n print('n1 = ', n1)\n print('n2 = ', n2)\n print('runs_exp = ', runs_exp)\n print('stan_dev = ', stan_dev)\n print('z = ', z)\n print('pval_z = ', pval_z)\n return pval_z\n\n\ndef simple_residual(df, xname, yname, alpha=0.05, resd_all=False, nobins=6):\n print(\"\\n\\n----------------------------\\n|Residual Analysis - simple|\\n----------------------------\\n\")\n print(\"using alpha = \", alpha)\n print(\"\\n\\n ◆ Residuals\\n\")\n\n # Fit regression model\n result = smf.ols(yname + '~' + xname, data=df).fit()\n\n # studentized residual\n st1, data1, ss3 = sso.summary_table(result, alpha=alpha)\n Residual = data1[:, 8]\n STD_Residual = data1[:, 10]\n mu = np.mean(STD_Residual)\n sigma = np.std(STD_Residual)\n\n if(resd_all == True):\n print(\"Original Residuals: \\n\", Residual, \"\\n\")\n print(\"Standardized Residuals: \\n\", STD_Residual, \"\\n\")\n print(\"mu:\", mu)\n print(\"sigma:\", sigma)\n else:\n print(\"mu:\", mu)\n print(\"sigma:\", sigma)\n\n # Normality Test\n print(\"\\n\\n ◆ Error Normality Test\\n\")\n print(\"H0: Errors are normally distributed.\")\n print(\"H1: Errors are not normally distributed.\")\n\n # Histogram\n print(\"\\n\\n ◇ Histogram\\n\")\n counts, bins, patches = plt.hist(\n STD_Residual, nobins, density=False, facecolor='black', alpha=0.75)\n\n plt.xlabel('Standardized Residuals')\n plt.ylabel('Frequency')\n plt.title('Histogram of Standardized Residuals')\n plt.grid(True)\n bin_centers = [np.mean(k) for k in zip(bins[:-1], bins[1:])]\n plt.show()\n\n print(counts)\n print(bins)\n\n # Shapiro Test\n print(\"\\n\\n ◇ Shapiro Test\\n\")\n stat, spv = stats.shapiro(STD_Residual)\n print(f\"Statistics = {stat:.4f}, p-value = {spv:.4f}\")\n pvSig(spv)\n\n # Chi^2 Test\n print(\"\\n\\n ◇ Chi-squared Test\\n\")\n stand_res = STD_Residual\n N = nobins\n\n m = np.mean(stand_res)\n s = np.std(stand_res)\n prob_bins = np.zeros((N + 1))\n z_bins = np.zeros((N + 1))\n z_bins[0] = -4\n z_bins[N] = 4\n for i in range(0, N+1):\n prob_bins[i] = i/N\n for j in range(1, N):\n z_bins[j] = m + stats.norm.isf(1 - prob_bins[j]) * s\n counts, bins = np.histogram(stand_res, bins=z_bins)\n nobs = counts.sum()\n prob_e = np.zeros((N))\n for i in range(1, N+1):\n prob_e[i - 1] = prob_bins[i] - prob_bins[i - 1]\n freq_e = nobs * prob_e\n freq_o = counts\n if np.sum(freq_e < 5) > 0:\n print(\"Rule of five is not met.\")\n else:\n chi_stat, pval = stats.chisquare(freq_o, freq_e)\n chi_pval = stats.chi2.sf(chi_stat, N - 3)\n print(\"Chi-squared test: statistics = %0.4f, p-value = %0.4f\" %\n (chi_stat, chi_pval))\n df_fq = freq_o.shape[0]-3\n crit_value = stats.chi2.ppf(1 - alpha, df_fq)\n print(\"Critical value = %0.4f (defree of freedom = %d)\" %\n (crit_value, df_fq))\n\n # pvSig(chi_pval)\n\n # Homoscedasticity and Heteroscedasticity\n print(\"\\n\\n ◆ Homoscedasticity and Heteroscedasticity\\n\")\n print(\"H_0: Randomness exists\")\n print(\"H_1: Randomness doesn't exist\")\n Id1 = data1[:, 0]\n plt.plot(Id1, STD_Residual, 'o', color='gray')\n plt.axhline(y=0, color='blue')\n plt.axhline(y=2, color='red')\n plt.axhline(y=-2, color='red')\n plt.title('Standardized Residual Plot')\n plt.xlabel('Observation No.')\n plt.ylabel('Standardized Residual')\n plt.show()\n\n # Dependence of the Error Variable\n print(\"\\n\\n ◆ Dependence of the Error Variable (Run Test)\\n\")\n print(\"H_0: Sample is random\")\n print(\"H_1: Sample is not random\")\n SD_median = statistics.median(STD_Residual)\n Z_pval = runsTest(STD_Residual, SD_median)\n print('p-value for run test z-statistic= ', Z_pval)\n pvSig(Z_pval)\n\n # Outliers\n print(\"\\n\\n ◆ Outliers Finding\\n\")\n print(\"(remove by yourself!)\\n\")\n df_out = pd.DataFrame(STD_Residual, columns=['SD'])\n filter = (df_out['SD'] < -2) | (df_out['SD'] > 2)\n print(\"Outliers by SD = \")\n print(df_out['SD'].loc[filter])\n print(\"\\nActual ID: \", df_out['SD'].loc[filter].index+1)\n\n # Influential Observations\n print(\"\\n\\n ◆ Influential observations Finding\\n\")\n x_data = df[xname].values\n y_data = df[yname].values\n cov_mat1 = np.cov(y_data, x_data)\n x_data_bar = x_data.mean()\n data_nobs = len(x_data)\n h_val = 1 / data_nobs + (x_data - x_data_bar) ** 2 / \\\n (data_nobs - 1) / cov_mat1[1, 1]\n # print(h_val)\n df_hi = pd.DataFrame(h_val, columns=['hi'])\n filter = (df_hi['hi'] > nobins / data_nobs)\n print(\"Influential Observations by hi = \", df_hi['hi'].loc[filter])\n print(\"\\nAutal ID: \", df_hi['hi'].loc[filter].index+1)\n\n\ndef simple_modass(df, xname, yname, alpha=0.05, tail='db'):\n # Fit regression model\n result1 = smf.ols(yname + '~ ' + xname, data=df).fit()\n\n b1_1 = result1.params[1]\n b0_1 = result1.params[0]\n print(f\"Estimated model: y = {b0_1:.4f} + {b1_1:.4f} x\")\n\n print(\"\\n\\n---------------------------\\n| Model Assessing |\\n---------------------------\\n\")\n print(\"using alpha = \", alpha)\n\n print(\"\\n\\n ◆ Standard Error of Estimate\\n\")\n s2_e = result1.mse_resid\n print(f\"MSE = {s2_e:f}\")\n s_e = result1.mse_resid ** 0.5\n print(f\"Standard errors = {s_e:f}\")\n y_bar = df[yname].mean()\n print(f\"y mean = {y_bar:.4f}\")\n print(\n f\"The absolute value of standard errors is about {abs(s_e/y_bar)*100:.0f}% of mean of independent variables.\\n\")\n\n print(\"\\n\\n ◆ Coefficient of Determination\\n\")\n R2 = result1.rsquared\n print(f\"R^2 = {R2:f}\")\n R = np.sign(b1_1) * R2 ** 0.5\n print(f\"R = {R:f}\")\n\n print(\n f\"\\nR^2 value interpretation\\nAbout {R2*100:.0f}% of the variation in the dependent variables is explained by independent ones, the rest remains unexplained.\")\n rvInter(R)\n\n print(\"\\n\\n ◆ Studetn-t test for beta1(slope)\\n\")\n dof = len(df) - 2\n tv = R * ((dof - 2)/(1 - R ** 2)) ** 0.5\n LCL = stats.t.ppf(alpha / 2, dof - 2)\n UCL = stats.t.ppf(1 - alpha / 2, dof - 2)\n print('t = ', tv)\n print('t_LCL = ', LCL)\n print('t_UCL = ', UCL)\n\n print(f\"\\np-value of t-stat tail: {tail}\")\n tp = tpv(tv, dof, tail)\n print(\"p-value of t test = \", tp)\n pvSig(tp)\n\n print(\"\\n\\n ◆ Coefficient of Correlation\\n\")\n cor_mat = np.corrcoef(df[[xname, yname]].values, rowvar=False)\n n = df.shape[0]\n r = cor_mat[1, 0]\n tv_cc = r * ((n-2)/(1 - r**2)) ** 0.5\n t_critical = stats.t.ppf(0.975, n - 2)\n pval = stats.t.sf(np.abs(tv_cc), n - 2)*2\n\n print('r = ', r)\n print('t_critical = ', t_critical)\n print('t = ', tv_cc)\n print('p_value = ', pval)\n\n\ndef simple_CIPIPRE(x, y, x1, alpha=0.05):\n print(\"\\n\\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\\n|CI PI for simple regression|\\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\\n\")\n print(\"using alpha = \", alpha)\n\n x_new = np.array([1, x1])\n print(\"make Confidence Interval and Prediction Interval predictions at mean of x = \", x1)\n x2 = sm.add_constant(x)\n olsmod = sm.OLS(y, x2)\n result_reg = olsmod.fit()\n y_head = np.dot(result_reg.params, x_new)\n print(\"y_head = \", y_head)\n (t_minus, t_plus) = stats.t.interval(\n alpha=(1.0 - alpha), df=result_reg.df_resid)\n cov_mat1 = np.cov(y, x)\n x_bar = x.mean()\n core1 = (1 / result_reg.nobs + (x1 - x_bar) ** 2 /\n (result_reg.nobs - 1) / cov_mat1[1, 1]) ** 0.5\n core2 = (1 + 1 / result_reg.nobs + (x1 - x_bar) ** 2 /\n (result_reg.nobs - 1) / cov_mat1[1, 1]) ** 0.5\n lower_bound = y_head + t_minus * (result_reg.mse_resid ** 0.5) * core1\n upper_bound = y_head + t_plus * (result_reg.mse_resid ** 0.5) * core1\n half_interval = t_plus * (result_reg.mse_resid ** 0.5) * core1\n lower_bound2 = y_head + t_minus * (result_reg.mse_resid ** 0.5) * core2\n upper_bound2 = y_head + t_plus * (result_reg.mse_resid ** 0.5) * core2\n half_interval2 = t_plus * (result_reg.mse_resid ** 0.5) * core2\n\n print(\n f\"\\n{100*(1-alpha):.0f}% confidence interval for mean: [{lower_bound:.4f}, {upper_bound:.4f}], or {y_head:.4f} +- {half_interval:.4f}\")\n print(\n f\"\\n{100*(1-alpha):.0f}% prediction interval: [{lower_bound2:.4f}, {upper_bound2:.4f}], or {y_head:.4f} +- {half_interval2:.4f}\")\n\n\ndef simple_CIPIINT_regplot(df, xname, yname, alpha=0.05):\n print(\"\\n\\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\\n|CI PI Interval plot - simple|\\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\\n\")\n print(\"using alpha = \", alpha)\n\n df_sorted = df.sort_values([xname])\n result = smf.ols(yname + '~' + xname, data=df_sorted).fit()\n x = df_sorted[xname].values\n y = df_sorted[yname].values\n st, data, ss2 = sso.summary_table(result, alpha=alpha)\n fittedvalues = data[:, 2]\n predict_mean_se = data[:, 3]\n predict_mean_ci_low, predict_mean_ci_upp = data[:, 4:6].T\n predict_ci_low, predict_ci_upp = data[:, 6:8].T\n\n plt.plot(x, y, 'o', color='gray')\n plt.plot(x, fittedvalues, '-', lw=0.5)\n plt.plot(x, predict_mean_ci_low, 'r-', lw=0.4)\n plt.plot(x, predict_mean_ci_upp, 'r-', lw=0.4)\n plt.plot(x, predict_ci_low, 'b--', lw=0.4)\n plt.plot(x, predict_ci_upp, 'b--', lw=0.4)\n plt.title('CI PI plot')\n plt.xlabel(xname)\n plt.ylabel(yname)\n plt.legend(['data points', 'regression model', 'confidence interval',\n 'prediction interval'], title='Legends', bbox_to_anchor=(1.3, 1), prop={'size': 6})\n plt.show()\n\n\ndef simple(step, df, xname, yname, alpha=0.05, tail='db', nobins=6, resd_all=False):\n if step == 1:\n simple_regplot(df, xname, yname)\n elif step == 2:\n print(\"\\npropose a statistical model\\n\")\n elif step == 3:\n simple_regmod(df, xname, yname)\n elif step == 4:\n print(\"\\nfor autocorrelation and others, please determine by yourself!\\n\")\n simple_durbin_watson(df, xname, yname, alpha=alpha)\n elif step == 5:\n print(\"\\nremember to remove outliers or do some modifications.\\n\")\n simple_residual(df, xname, yname, alpha=alpha,\n resd_all=resd_all, nobins=nobins)\n elif step == 6:\n simple_modass(df, xname, yname, alpha=alpha, tail=tail)\n elif step == 7:\n print(\"\\ninterpretation\\n\")\n elif step == 8:\n print(\n \"\\nsimple_CIPIPRE(df[xname], df[yname], x_input...) won't run here\\n\")\n simple_CIPIINT_regplot(df, xname, yname, alpha=alpha)\n else:\n print(\"\\nbad input for step!\\n\")\n\n\ndef multiple_regplot(df, xnames, yname):\n for aname in xnames:\n x_var = df[aname].values\n _ = sns.regplot(x=x_var, y=df[yname].values,\n data=df, color='b', ci=None)\n plt.xlabel(aname)\n plt.ylabel(yname)\n plt.show()\n\n df_ = df[[yname] + xnames]\n corr1 = df_.corr()\n corr1\n _ = sns.heatmap(corr1, annot=True)\n\n\ndef multiple_modpropose(xnames, yname):\n print(\"\\n\\n---------- Model Proposal ----------\\n\")\n print(\"Model proposal,<br>\")\n\n mod = \"$y = \\\\beta_0 + \"\n for i in range(len(xnames)):\n coe = \"\\\\beta_\" + str(i+1) + \"x_\" + str(i+1) + \" + \"\n mod = mod + coe\n mod = mod + \"\\\\epsilon$<br>\"\n print(mod)\n\n print(\"where y is \", yname, \"<br>\")\n\n exp = \"and \"\n for j in range(len(xnames)):\n xexp = \"$x_\" + str(j+1) + \"$ is \" + xnames[j] + \", \"\n exp = exp + xexp\n\n print(exp)\n\n\ndef multiple_regmod(df, xnames, yname, alpha=0.05):\n y_data = df[yname]\n x_data_ar = []\n for i in range(len(xnames)):\n x_data_ar.append(df[xnames[i]])\n x_data_ar = np.asarray(x_data_ar)\n\n x_data_T = x_data_ar.T\n x_data = pd.DataFrame(x_data_T, columns=xnames)\n x_data2 = sm.add_constant(x_data)\n olsmod = sm.OLS(y_data, x_data2)\n result = olsmod.fit()\n print(f\"\\n\\nusing alpha = {alpha:.2f}\")\n print(result.summary())\n\n print(\"\\nEstimated model: y = %0.4f\" % (result.params[0]), end=' ')\n for c, x in zip(result.params[1:], list(range(1, len(xnames)+1))):\n print('+', end='') if c > 0 else print('-', end='')\n print(\" %0.4f x%d \" % (abs(c), x), end='')\n\n\ndef multiple_durbin_watson(df, xnames, yname, alpha=0.05):\n print(\"\\n\\n========== Durbin-Watson ==========\\n\")\n\n y_data = df[yname]\n x_data_ar = []\n for i in range(len(xnames)):\n x_data_ar.append(df[xnames[i]])\n x_data_ar = np.asarray(x_data_ar)\n\n x_data_T = x_data_ar.T\n x_data = pd.DataFrame(x_data_T, columns=xnames)\n x_data2 = sm.add_constant(x_data)\n olsmod = sm.OLS(y_data, x_data2)\n result = olsmod.fit()\n\n st, data, ss2 = sso.summary_table(result, alpha=alpha)\n print(\"Columns in data are: %s\" % ss2)\n # Predicted value\n y_pre = data[:, 2]\n # Studentized Residual\n SD = data[:, 10]\n\n x_square_sum = np.vdot(SD, SD)\n print(\"x_square_sum = \", x_square_sum)\n size = SD.size\n print(\"size = \", size)\n x_d = np.zeros((size))\n print(\"x_d = \", x_d)\n l_size = size - 1\n for i in range(l_size):\n x_d[i + 1] = SD[i + 1] - SD[i]\n print(\"x_d = \", x_d)\n d = np.vdot(x_d, x_d) / x_square_sum\n print(\"d = \", d)\n\n\ndef multiple_residual(df, xnames, yname, alpha=0.05, nobins=6):\n print(\"\\n\\n----------------------------\\n|Residual Analysis - multiple|\\n----------------------------\\n\")\n print(\"using alpha = \", alpha)\n print(\"\\n\\n ◆ Residuals\\n\")\n\n y_data = df[yname]\n x_data_ar = []\n for i in range(len(xnames)):\n x_data_ar.append(df[xnames[i]])\n x_data_ar = np.asarray(x_data_ar)\n\n x_data_T = x_data_ar.T\n x_data = pd.DataFrame(x_data_T, columns=xnames)\n x_data2 = sm.add_constant(x_data)\n olsmod = sm.OLS(y_data, x_data2)\n result = olsmod.fit()\n\n st, data, ss2 = sso.summary_table(result, alpha=alpha)\n print(\"Columns in data are: %s\" % ss2)\n # Predicted value\n y_pre = data[:, 2]\n # Studentized Residual\n SD = data[:, 10]\n\n mu = np.mean(SD)\n sigma = np.std(SD)\n\n # Normality Test\n print(\"\\n\\n ◆ Error Normality Test\\n\")\n print(\"H0: Errors are normally distributed.\")\n print(\"H1: Errors are not normally distributed.\")\n\n # Histogram\n print(\"\\n\\n ◇ Histogram\\n\")\n fig, ax = plt.subplots()\n counts, bins, patches = plt.hist(\n SD, nobins, density=False, facecolor='g', alpha=0.75)\n plt.xlabel('Standardized Residuals')\n plt.ylabel('Frequency')\n plt.title('Histogram of Standardized Residuals_Car Prices')\n plt.grid(True)\n bin_centers = [np.mean(k) for k in zip(bins[:-1], bins[1:])]\n plt.show()\n\n print(counts)\n print(bins)\n\n # qqplot\n print(\"\\n\\n ◇ QQ-plot\\n\")\n fig = sm.qqplot(SD, stats.norm, fit=True, line='45')\n plt.show()\n print()\n\n # Shapiro Test\n print(\"\\n\\n ◇ Shapiro Test\\n\")\n stat, spv = stats.shapiro(SD)\n print(f\"Statistics = {stat:.4f}, p-value = {spv:.4f}\")\n pvSig(spv)\n\n # Chi^2 Test\n print(\"\\n\\n ◇ Chi-squared Test\\n\")\n stand_res = SD\n N = nobins - 1\n\n m = np.mean(stand_res)\n s = np.std(stand_res)\n prob_bins = np.zeros((N + 1))\n z_bins = np.zeros((N + 1))\n z_bins[0] = -4\n z_bins[N] = 4\n for i in range(0, N+1):\n prob_bins[i] = i/N\n for j in range(1, N):\n z_bins[j] = m + stats.norm.isf(1 - prob_bins[j]) * s\n counts, bins = np.histogram(stand_res, bins=z_bins)\n nobs = counts.sum()\n prob_e = np.zeros((N))\n for i in range(1, N+1):\n prob_e[i - 1] = prob_bins[i] - prob_bins[i - 1]\n freq_e = nobs * prob_e\n freq_o = counts\n if np.sum(freq_e < 5) > 0:\n print(\"Rule of five is not met.\")\n else:\n chi_stat, pval = stats.chisquare(freq_o, freq_e)\n chi_pval = stats.chi2.sf(chi_stat, N - 3)\n print(\"Chi-squared test: statistics = %0.4f, p-value = %0.4f\" %\n (chi_stat, chi_pval))\n df_fq = freq_o.shape[0]-3\n crit_value = stats.chi2.ppf(1 - alpha, df_fq)\n print(\"Critical value = %0.4f (defree of freedom = %d)\" %\n (crit_value, df_fq))\n\n # pvSig(chi_pval)\n\n # Homoscedasticity and Heteroscedasticity\n print(\"\\n\\n ◆ Homoscedasticity and Heteroscedasticity\\n\")\n print(\"H_0:Randomness exists\")\n print(\"H_0:Randomness doesn't exist\")\n\n st, data, ss2 = sso.summary_table(result, alpha=alpha)\n print(\"\\nColumns in data are: %s\" % ss2)\n # Predicted value\n y_pre = data[:, 2]\n # Studentized Residual\n SD = data[:, 10]\n\n plt.plot(y_pre, SD, 'o', color='gray')\n plt.axhline(y=2, color='red', lw=0.8)\n plt.axhline(y=0, color='blue')\n plt.axhline(y=-2, color='red', lw=0.8)\n plt.title('Standardized Residual Plot')\n plt.xlabel('Predicted y value')\n plt.ylabel('Standardized Residual')\n plt.show()\n\n # autocorrelation\n # Dependence of the Error Variable\n print(\"\\n\\n ◆ Dependence of the Error Variable (Run Test)\\n\")\n print(\"H_0: Sample is random\")\n print(\"H_1: Sample is not random\")\n\n print(\"\\nColumns in data are: %s\" % ss2)\n\n Id1 = data[:, 0]\n plt.plot(Id1, SD, 'o', color='gray')\n plt.axhline(y=0, color='blue')\n plt.axhline(y=2, color='red')\n plt.axhline(y=-2, color='red')\n plt.title('Standardized Residual Plot')\n plt.xlabel('Observation No.')\n plt.ylabel('Standardized Residual')\n plt.show()\n\n SD_median = statistics.median(SD)\n Z_pval = runsTest(SD, SD_median)\n print('p_value for Z-statistic= ', Z_pval)\n\n pvSig(Z_pval)\n\n # Outliers\n print(\"\\n\\n ◆ Outliers Finding\\n\")\n df_out = pd.DataFrame(SD, columns=['SD'])\n filter = (df_out['SD'] < -2) | (df_out['SD'] > 2)\n print(\"Outliers by SD = \")\n print(df_out['SD'].loc[filter])\n print(\"\\nActual ID: \", df_out['SD'].loc[filter].index+1)\n\n # Influential Observations by hii\n print(\"\\n\\n ◆ Influential observations Finding by hii\\n\")\n x_data2 = np.array(x_data2)\n H = np.matmul(x_data2, np.linalg.solve(\n np.matmul(x_data2.T, x_data2), x_data2.T))\n df['hii'] = np.diagonal(H)\n df_1h = pd.DataFrame(df['hii'])\n k = result.df_model\n n = len(df_1h['hii'])\n h_level = 3 * (k+1) / n\n print(\"h_level = \", h_level)\n filter = (df_1h['hii'] > h_level)\n print(\"\\nInfluential Observations by hi =\\n\")\n print(df_1h['hii'].loc[filter])\n\n # Influential Observations by Cook's Distance\n print(\"\\n\\n ◆ Influential observations Finding by Cook's Distance\\n\")\n s2_e = result.mse_resid\n k = result.df_model\n y_a = data[:, 1]\n y_f = data[:, 2]\n h_i = df['hii']\n CD_arr = np.square(y_a - y_f) / s2_e / (k - 1) * h_i / np.square(1 - h_i)\n CD = np.array(CD_arr)\n df_cd = pd.DataFrame(CD, columns=['CD'])\n print(df_cd.head())\n filter = (df_cd['CD'] > 1)\n print(\"Influential Observations by Cook's Distances =\\n\")\n print(df_cd['CD'].loc[filter])\n\n\ndef multiple_modass(df, xnames, yname, alpha=0.05):\n y_data = df[yname]\n x_data_ar = []\n for i in range(len(xnames)):\n x_data_ar.append(df[xnames[i]])\n x_data_ar = np.asarray(x_data_ar)\n\n x_data_T = x_data_ar.T\n x_data = pd.DataFrame(x_data_T, columns=xnames)\n x_data2 = sm.add_constant(x_data)\n olsmod = sm.OLS(y_data, x_data2)\n result = olsmod.fit()\n\n print(\"\\n\\n---------------------------\\n| Model Assessing |\\n---------------------------\\n\")\n print(\"using alpha = \", alpha)\n\n print(\"\\n\\n ◆ Standard Error of Estimate\\n\")\n s2_e = result.mse_resid\n print(f\"MSE = {s2_e:f}\")\n s_e = result.mse_resid ** 0.5\n print(\"Standard error = \", s_e)\n y_bar = df[yname].mean()\n print(\"y mean = \", y_bar)\n print(\"y STD = \", df[yname].std())\n print(\n f\"The absolute value of standard errors is about {abs(s_e/y_bar)*100:.0f}% of mean of independent variables.\\n\")\n\n R2 = result.rsquared\n print(\"\\nCoefficient of Determination\")\n print(\"R^2 = \", result.rsquared)\n print(\"Adjusted R^2 = \", result.rsquared_adj)\n\n print(\n f\"\\nR^2 value interpretation\\nAbout {R2*100:.0f}% of the variation in the dependent variables is explained by the model, the rest remains unexplained.\")\n rvInter(R2**0.5)\n\n print(\"\\n\\n ◆ Over-fitting?\\n\")\n diffrra = abs(result.rsquared - result.rsquared_adj)\n print(\"|R^2 - Ra^2| = \", diffrra)\n if(diffrra > 0.06):\n print(\"|R^2 - Ra^2| >= 0.06 indicating that the model has the problem of over-fitting.\")\n else:\n print(\"|R^2 - Ra^2| < 0.06 indicating that the model doesn't have the problem of over-fitting.\")\n\n print(\"\\n\\n ◆ F-test of ANOVA\\n\")\n print(\"Testing hypothesis,\")\n print(\"H_0: \\beta_1 = \\beta_2 = \\dots = \\beta_n = 0<br>\")\n print(\"H_1: \\text{at least one } \\beta_i \\neq 0\")\n\n f_res = result.fvalue\n MSE = result.mse_resid\n df_model = result.df_model\n df_error = result.df_resid\n MSR = f_res * MSE\n SSR = MSR * df_model\n print(\"SSR = \", SSR, \"\\tdf = \", df_model, \"\\tMSR = \", MSR)\n print(\"SSE = \", MSE * df_error, \"\\tdf = \", df_error, \"\\tMSE = \", MSE)\n print(\"F = MSR / MSE = \", MSR / MSE)\n fpv = result.f_pvalue\n print(\"F p-value = \", fpv)\n\n pvSig(fpv)\n\n\ndef multiple_CIPIPRE_(xdata, yval, x1, alpha=0.05):\n print(\"\\n\\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\\n|CI PI for simple regression|\\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\\n\")\n print(\"using alpha = \", alpha)\n\n print(\"To make Confidence Interval and Prediction Interval prediction at mean of x = \", x1)\n x_data_T = xdata.T\n x_data2 = sm.add_constant(x_data_T)\n olsmod = sm.OLS(yval, x_data2)\n result_reg = olsmod.fit()\n y_head = np.dot(result_reg.params, x1)\n print(\"y_head = \", y_head)\n (t_minus, t_plus) = stats.t.interval(\n alpha=(1.0 - alpha), df=result_reg.df_resid)\n core1 = (result_reg.mse_resid * np.matmul(x1,\n np.linalg.solve(np.matmul(x_data2.T, x_data2), x1))) ** 0.5\n lower_bound = y_head + t_minus * core1\n upper_bound = y_head + t_plus * core1\n core2 = (result_reg.mse_resid * (1 + np.matmul(x1,\n np.linalg.solve(np.matmul(x_data2.T, x_data2), x1)))) ** 0.5\n lower_bound2 = y_head + t_minus * core2\n upper_bound2 = y_head + t_plus * core2\n\n print(\n f\"\\n{100*(1-alpha):.0f}% confidence interval for mean: [{lower_bound:.4f}, {upper_bound:.4f}]\")\n print(\n f\"\\n{100*(1-alpha):.0f}% prediction interval: [{lower_bound2:.4f}, {upper_bound2:.4f}]\")\n\n\ndef multiple_CIPIPRE(df, xnames, yname, xx, alpha=0.05):\n x0 = [1]\n x1 = x0 + xx\n\n yval = df[yname]\n xdata_ar = []\n\n for i in range(len(xnames)):\n xdata_ar.append(df[xnames[i]])\n xdata_ar = np.asarray(xdata_ar)\n\n x1_ = np.array(x1)\n\n multiple_CIPIPRE_(xdata_ar, yval, x1_, alpha)\n\n\ndef multiple(step, df, xnames, yname, alpha=0.05, tail='db', nobins=6):\n if step == 1:\n multiple_regplot(df, xnames, yname)\n elif step == 2:\n multiple_modpropose(xnames, yname)\n elif step == 3:\n multiple_regmod(df, xnames, yname)\n elif step == 4:\n print(\"\\nfor autocorrelation and others, please determine by yourself!\\n\")\n multiple_durbin_watson(df, xnames, yname, alpha=alpha)\n elif step == 5:\n print(\"\\nremember to remove outliers or do some modifications.\\n\")\n multiple_residual(df, xnames, yname, alpha=alpha, nobins=nobins)\n elif step == 6:\n multiple_modass(df, xnames, yname, alpha=alpha)\n elif step == 7:\n print(\"\\ninterpretation\\n\")\n elif step == 8:\n print(\"\\multiple_CIPIPRE (df, xnames, yname, xx...) won't run here\\n\")\n else:\n print(\"\\nbad input for step!\\n\")\n\n\ndef time_add(df, name='Time'):\n time = []\n for i in range(df.shape[0]):\n time.append(i)\n print(time)\n\n df[name] = time\n return df\n\n\ndef value_map_ln(df, target):\n lnv = []\n for i in range(df.shape[0]):\n lnv.append(math.log((df[target].values[i])))\n\n newname = \"ln_\" + target\n df[newname] = lnv\n return df\n\n\ndef outliers_rm(df, out):\n df = df.drop(df.index[out])\n df = df.reset_index()\n return df\n" ]
[ [ "numpy.dot", "numpy.mean", "numpy.sign", "scipy.stats.t.ppf", "numpy.histogram", "scipy.stats.chi2.sf", "pandas.DataFrame", "matplotlib.pyplot.subplots", "scipy.stats.chisquare", "scipy.stats.chi2.ppf", "scipy.stats.norm.isf", "numpy.vdot", "numpy.square", "numpy.array", "numpy.matmul", "numpy.zeros", "matplotlib.pyplot.axhline", "matplotlib.pyplot.title", "numpy.std", "matplotlib.pyplot.hist", "matplotlib.pyplot.show", "numpy.corrcoef", "scipy.stats.t.cdf", "numpy.cov", "numpy.asarray", "scipy.stats.t.interval", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.grid", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "numpy.sum", "numpy.diagonal", "matplotlib.pyplot.ylabel", "numpy.abs", "scipy.stats.shapiro" ] ]
achaar/autokeras
[ "7e10593b6ac88497150710a59c6807cc04d9f810" ]
[ "tests/utils.py" ]
[ "import kerastuner\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\n\nimport autokeras as ak\n\nSEED = 5\nCOLUMN_NAMES_FROM_NUMPY = [\n 'bool_',\n 'num_to_cat_',\n 'float_',\n 'int_',\n 'morethan_32_',\n 'col1_morethan_100_',\n 'col2_morethan_100_',\n 'col3_morethan_100_']\nCOLUMN_TYPES_FROM_NUMPY = {\n 'bool_': 'categorical',\n 'num_to_cat_': 'categorical',\n 'float_': 'numerical',\n 'int_': 'numerical',\n 'morethan_32_': 'categorical',\n 'col1_morethan_100_': 'categorical',\n 'col2_morethan_100_': 'categorical',\n 'col3_morethan_100_': 'categorical'}\nCOLUMN_NAMES_FROM_CSV = [\n 'sex',\n 'age',\n 'n_siblings_spouses',\n 'parch',\n 'fare',\n 'class',\n 'deck',\n 'embark_town',\n 'alone']\nLESS_COLUMN_NAMES_FROM_CSV = [\n 'age',\n 'n_siblings_spouses',\n 'parch',\n 'fare',\n 'class',\n 'deck',\n 'embark_town',\n 'alone']\nCOLUMN_TYPES_FROM_CSV = {\n 'sex': 'categorical',\n 'age': 'numerical',\n 'n_siblings_spouses': 'categorical',\n 'parch': 'categorical',\n 'fare': 'numerical',\n 'class': 'categorical',\n 'deck': 'categorical',\n 'embark_town': 'categorical',\n 'alone': 'categorical'}\nFALSE_COLUMN_TYPES_FROM_CSV = {\n 'sex': 'cat',\n 'age': 'num',\n 'n_siblings_spouses': 'cat',\n 'parch': 'categorical',\n 'fare': 'numerical',\n 'class': 'categorical',\n 'deck': 'categorical',\n 'embark_town': 'categorical',\n 'alone': 'categorical'}\nPARTIAL_COLUMN_TYPES_FROM_CSV = {\n 'fare': 'categorical',\n 'class': 'categorical',\n 'deck': 'categorical',\n 'embark_town': 'categorical',\n 'alone': 'categorical'}\nTRAIN_FILE_PATH = r'tests/fixtures/titanic/train.csv'\nTEST_FILE_PATH = r'tests/fixtures/titanic/eval.csv'\n\n\ndef generate_structured_data(num_instances=500, dtype='np'):\n # generate high_level dataset\n num_feature = 8\n num_nan = 100\n # 12 classes\n career = ['doctor', 'nurse', 'driver', 'chef', 'teacher', 'writer',\n 'actress', 'engineer', 'lawyer', 'realtor', 'agent', 'pilot']\n # 15 classes\n states = ['CA', 'FL', 'GA', 'IL', 'MD',\n 'MA', 'MI', 'MN', 'NJ', 'NY',\n 'NC', 'PA', 'TX', 'UT', 'VA']\n # 13 classes\n years = ['first', 'second', 'third', 'fourth', 'fifth',\n 'sixth', 'seventh', 'eighth', 'ninth', 'tenth',\n 'eleventh', 'twelfth', 'thirteenth']\n # 10 classes\n color = ['red', 'orange', 'yellow', 'green', 'blue',\n 'purple', 'beige', 'pink', 'silver', 'gold']\n # 3 classes\n size = ['S', 'M', 'L']\n boolean = ['True', 'False']\n career_states = [] # 180 classes\n career_years = [] # 156 classes\n career_color = [] # 120 classes\n career_size = [] # 36 classes\n for c in career:\n for s in states:\n career_states.append(c+'_'+s)\n for y in years:\n career_years.append(c+'_'+y)\n for r in color:\n career_color.append(c+'_'+r)\n for g in size:\n career_size.append(c+'_'+g)\n\n np.random.seed(0)\n col_bool = np.random.choice(boolean, num_instances).reshape(num_instances, 1)\n col_num_to_cat = np.random.randint(\n 20, 41, size=num_instances).reshape(num_instances, 1)\n col_float = 100*np.random.random(num_instances,).reshape(num_instances, 1)\n col_int = np.random.randint(\n 2000, 4000, size=num_instances).reshape(num_instances, 1)\n col_morethan_32 = np.random.choice(\n career_size, num_instances).reshape(num_instances, 1)\n col1_morethan_100 = np.random.choice(career_states,\n num_instances).reshape(num_instances, 1)\n col2_morethan_100 = np.random.choice(career_years,\n num_instances).reshape(num_instances, 1)\n col3_morethan_100 = np.random.choice(career_color,\n num_instances).reshape(num_instances, 1)\n data = np.concatenate((col_bool, col_num_to_cat, col_float, col_int,\n col_morethan_32, col1_morethan_100, col2_morethan_100,\n col3_morethan_100), axis=1)\n # generate np.nan data\n for i in range(num_nan):\n row = np.random.randint(0, num_instances)\n col = np.random.randint(0, num_feature)\n data[row][col] = np.nan\n if dtype == 'np':\n return data\n if dtype == 'dataset':\n return tf.data.Dataset.from_tensor_slices(data)\n\n\ndef dataframe_numpy():\n x = pd.read_csv(TRAIN_FILE_PATH)\n y = x.pop('survived').to_numpy()\n val_x = pd.read_csv(TEST_FILE_PATH)\n val_y = val_x.pop('survived').to_numpy()\n return (x, y), (val_x, val_y)\n\n\ndef dataframe_dataframe():\n x = pd.read_csv(TRAIN_FILE_PATH)\n y = pd.DataFrame(x.pop('survived'))\n val_x = pd.read_csv(TEST_FILE_PATH)\n val_y = pd.DataFrame(val_x.pop('survived'))\n return (x, y), (val_x, val_y)\n\n\ndef dataframe_series():\n x = pd.read_csv(TRAIN_FILE_PATH)\n y = x.pop('survived')\n val_x = pd.read_csv(TEST_FILE_PATH)\n val_y = val_x.pop('survived')\n return (x, y), (val_x, val_y)\n\n\ndef csv_test(target):\n x_test = pd.read_csv(TEST_FILE_PATH)\n if target == 'regression':\n x_test = x_test.drop('fare', axis=1)\n else:\n x_test = x_test.drop('survived', axis=1)\n return x_test\n\n\ndef generate_data(num_instances=100, shape=(32, 32, 3), dtype='np'):\n np.random.seed(SEED)\n data = np.random.rand(*((num_instances,) + shape))\n if data.dtype == np.float64:\n data = data.astype(np.float32)\n if dtype == 'np':\n return data\n if dtype == 'dataset':\n return tf.data.Dataset.from_tensor_slices(data)\n\n\ndef generate_one_hot_labels(num_instances=100, num_classes=10, dtype='np'):\n np.random.seed(SEED)\n labels = np.random.randint(num_classes, size=num_instances)\n data = tf.keras.utils.to_categorical(labels)\n if dtype == 'np':\n return data\n if dtype == 'dataset':\n return tf.data.Dataset.from_tensor_slices(data)\n\n\ndef fit_predict_with_graph(inputs, outputs, x, y):\n model = ak.graph.HyperBuiltGraphHyperModel(\n inputs, outputs).build(kerastuner.HyperParameters())\n model.fit(x, y,\n epochs=1,\n batch_size=100,\n verbose=False,\n validation_split=0.2)\n return model.predict(x)\n\n\ndef do_nothing(*args, **kwargs):\n pass\n\n\ndef imdb_raw(num_instances=100):\n index_offset = 3 # word index offset\n\n (x_train, y_train), (x_test, y_test) = tf.keras.datasets.imdb.load_data(\n num_words=1000,\n index_from=index_offset)\n x_train = x_train[:num_instances]\n y_train = y_train[:num_instances].reshape(-1, 1)\n x_test = x_test[:num_instances]\n y_test = y_test[:num_instances].reshape(-1, 1)\n\n word_to_id = tf.keras.datasets.imdb.get_word_index()\n word_to_id = {k: (v + index_offset) for k, v in word_to_id.items()}\n word_to_id[\"<PAD>\"] = 0\n word_to_id[\"<START>\"] = 1\n word_to_id[\"<UNK>\"] = 2\n\n id_to_word = {value: key for key, value in word_to_id.items()}\n x_train = list(map(lambda sentence: ' '.join(\n id_to_word[i] for i in sentence), x_train))\n x_test = list(map(lambda sentence: ' '.join(\n id_to_word[i] for i in sentence), x_test))\n x_train = np.array(x_train, dtype=np.str)\n x_test = np.array(x_test, dtype=np.str)\n return (x_train, y_train), (x_test, y_test)\n\n\ndef build_graph():\n tf.keras.backend.clear_session()\n image_input = ak.ImageInput(shape=(32, 32, 3))\n merged_outputs = ak.ImageBlock()(image_input)\n head = ak.ClassificationHead(num_classes=10)\n head.output_shape = (10,)\n classification_outputs = head(merged_outputs)\n return ak.graph.Graph(\n inputs=image_input,\n outputs=classification_outputs)\n" ]
[ [ "numpy.concatenate", "tensorflow.keras.utils.to_categorical", "tensorflow.keras.datasets.imdb.load_data", "numpy.array", "numpy.random.rand", "tensorflow.data.Dataset.from_tensor_slices", "numpy.random.choice", "numpy.random.seed", "numpy.random.randint", "tensorflow.keras.backend.clear_session", "tensorflow.keras.datasets.imdb.get_word_index", "pandas.read_csv", "numpy.random.random" ] ]
syeehyn/spug
[ "216976e0171bbc14042377fbbb535180bd2efaf3" ]
[ "spug/data_pipeline/extraction/twitter.py" ]
[ "\"\"\"Matrix Constructor for Twitter\n\"\"\"\nfrom tqdm import tqdm\nimport pandas as pd\nfrom .base import MatrixConstructor\n\n\nclass TwitterMatrixConstructor(MatrixConstructor):\n \"\"\"Twitter Matrix Constructor\"\"\"\n\n def __init__(self, **configs):\n super().__init__(**configs)\n\n def get_matrix(self, df, interval):\n if interval == \"month\":\n df = df.assign(\n interval=df.date.apply(lambda x: f\"{x.year}_{str(x.month).zfill(2)}\")\n )\n elif interval == \"quarter\":\n df = df.assign(interval=df.date.apply(lambda x: f\"{x.year}_q{x.quarter}\"))\n else:\n raise NotImplementedError\n\n time_intervals = sorted(df.interval.unique())\n companies = [list(com.keys())[0] for com in self.companies]\n alias = list(map(lambda x: list(x.items())[0][1][\"alias\"], self.companies))\n res = {\n interval: pd.DataFrame(0, index=companies, columns=companies)\n for interval in time_intervals\n }\n for T in tqdm(time_intervals):\n sub_df = df[df.interval == T]\n for company1, search_items1 in zip(companies, alias):\n for company2, search_items2 in zip(companies, alias):\n if company1 != company2:\n search_items = search_items1 + search_items2\n else:\n search_items = search_items1\n pat = \"|\".join(search_items)\n res[T][company1][company2] += sub_df.text.str.contains(pat).sum()\n to_return = {}\n for T, mat in res.items():\n to_return[T] = mat.values\n return to_return\n" ]
[ [ "pandas.DataFrame" ] ]
savagewil/SavageML
[ "d5aa9a5305b5de088e3bf32778252c877faec41d" ]
[ "savageml/utility/activation_functions.py" ]
[ "from typing import Union\nimport numpy as np\n\n\ndef sigmoid_der(y: Union[int, float, np.array]) -> Union[int, float, np.array]:\n return np.multiply(np.subtract(1.0, y), y)\n\n\ndef tanh_derivative(y: Union[int, float, np.array]) -> Union[int, float, np.array]:\n return np.subtract(1.0, np.square(y))\n\n\ndef sigmoid(x: Union[int, float, np.array]) -> Union[int, float, np.array]:\n return np.divide(1.0, np.add(1.0, np.exp(np.negative(x))))\n\n\ndef relu(x: Union[int, float, np.ndarray]) -> Union[int, float, np.ndarray]:\n if isinstance(x, np.ndarray):\n y = x.copy()\n y[x < 0] = 0.0\n return y\n elif isinstance(x, float):\n return x if x > 0.0 else 0.0\n else:\n return x if x > 0 else 0\n\n\ndef relu_derivative(y: Union[int, float, np.ndarray]) -> Union[int, float, np.ndarray]:\n if isinstance(y, np.ndarray):\n dx = y.copy()\n dx[y > 0] = 1.0\n return y\n elif isinstance(y, float):\n return 1.0 if y > 0.0 else 0.0\n else:\n return 1 if y > 0 else 0\n\n\nclass ActivationFunctions:\n TANH = np.tanh\n SIGMOID = sigmoid\n RELU = relu\n\n\nclass ActivationFunctionsDerivatives:\n TANH_DERIVATIVE = tanh_derivative\n SIGMOID_DERIVATIVE = sigmoid_der\n RELU_DERIVATIVE = relu_derivative\n\n" ]
[ [ "numpy.square", "numpy.negative", "numpy.subtract" ] ]
PhilippaHartley/sim-mid-pointing
[ "0f11d37e6fac231d7f20e4a7e20ee76e7d2d560f" ]
[ "beam_models/EMSS/with_elevation/SKADCBeamPatterns/2019_08_06_SKA_Ku/interpolated/interpolate_beam_Ku.py" ]
[ "import logging\nimport sys\n\nimport numpy\n\nfrom processing_library.image.operations import create_empty_image_like\nfrom rascil.processing_components.image.operations import export_image_to_fits, import_image_from_fits\n\nimport matplotlib.pyplot as plt\n\nlog = logging.getLogger()\nlog.setLevel(logging.INFO)\nlog.addHandler(logging.StreamHandler(sys.stdout))\nmpl_logger = logging.getLogger(\"matplotlib\")\nmpl_logger.setLevel(logging.WARNING)\n\nimport pprint\n\npp = pprint.PrettyPrinter()\n\nfrom scipy import interpolate\n\n# x = np.arange(0, 10)\n# y = np.exp(-x/3.0)\n# f = interpolate.interp1d(x, y)\n#\n# xnew = np.arange(0,9, 0.1)\n# ynew = f(xnew) # use interpolation function returned by `interp1d`\n# plt.plot(x, y, 'o', xnew, ynew, '-')\n# plt.show()\n\nelevations_in = numpy.array([15, 45, 90], dtype='float')\nelevations_out = numpy.array([15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90], dtype='float')\nelevations_out = numpy.arange(15.0, 90, 1.0)\ndefault = 1\nnchan = 1\nnpol = 4\nny = 1024\nnx = 1024\n\narray_in = numpy.zeros([nchan, npol, ny, ny, len(elevations_in)])\narray_out = numpy.zeros([nchan, npol, ny, ny, len(elevations_out)])\n\nim_in = \"../Ku_{el:d}_11700_{type}.fits\"\nim_out = \"Ku_{el:d}_11700_{type}_interpolated.fits\"\nim_diff_out = \"Ku_{el:d}_11700_{type}_interpolated_difference.fits\"\n\nim_template = None\n\nfor type in ['real', 'imag']:\n for iel, el in enumerate(elevations_in):\n print(\"Reading elevation %s part elevation %.0f\" % (type, el))\n im_in_file = im_in.format(el=int(el), type=type)\n im = import_image_from_fits(im_in_file)\n array_in[..., iel] = im.data\n if im_template is None:\n im_template = create_empty_image_like(im)\n \n f = interpolate.interp1d(elevations_in, array_in, axis=4, kind='quadratic')\n array_out = f(elevations_out)\n\n rms_vp = []\n max_vp = []\n min_vp = []\n rms_diff = []\n max_diff = []\n min_diff = []\n\n\n for iel, el in enumerate(elevations_out):\n print(\"Writing elevation %s part %.0f\" % (type, el))\n im_template.data = array_out[..., iel]\n im_out_file = im_out.format(el=int(el), type=type)\n export_image_to_fits(im_template, im_out_file)\n rms_vp.append(numpy.std(im_template.data[0,0:1,...]))\n max_vp.append(numpy.max(im_template.data[0,0:1,...]))\n min_vp.append(numpy.min(im_template.data[0,0:1,...]))\n im_template.data -= array_in[..., default]\n im_diff_out_file = im_diff_out.format(el=int(el), type=type)\n export_image_to_fits(im_template, im_diff_out_file)\n rms_diff.append(numpy.std(im_template.data[0,0:1,...]))\n max_diff.append(numpy.max(im_template.data[0,0:1,...]))\n min_diff.append(numpy.min(im_template.data[0,0:1,...]))\n\n plt.clf()\n plt.plot(elevations_out, rms_vp, '-', color='r', label='VP rms')\n if type == 'imag':\n plt.plot(elevations_out, max_vp, '.', color='g', label='VP max')\n plt.plot(elevations_out, min_vp, '-', color='b', label='VP min')\n plt.plot(elevations_out, rms_diff, '.', color='r', label='VP diff rms')\n plt.plot(elevations_out, max_diff, '.', color='g', label='VP diff max')\n plt.plot(elevations_out, min_diff, '.', color='b', label='VP diff min')\n plt.xlabel('Elevation')\n plt.ylabel('Value')\n plt.title('Statistics in %s part of 11700MHz voltage pattern' % type)\n plt.legend()\n plt.savefig('%s_vp_statistics.png' % type)\n plt.show(block=False)" ]
[ [ "numpy.max", "numpy.array", "scipy.interpolate.interp1d", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "matplotlib.pyplot.title", "matplotlib.pyplot.legend", "numpy.min", "numpy.std", "numpy.arange", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.clf" ] ]
mdai/mdai-client-py
[ "59595a023e73571f36e1c7951325476d73ef3823" ]
[ "mdai/utils/transforms.py" ]
[ "import os\nimport cv2\nimport numpy as np\nimport dicom2nifti\nimport pydicom\n\nDEFAULT_IMAGE_SIZE = (512.0, 512.0)\n\n\ndef sort_dicoms(dicoms):\n \"\"\"\n Sort the dicoms based on the image position patient.\n Find most significant axis to use during sorting. The original way of sorting\n (first x than y than z) does not work in certain border situations where for\n exampe the X will only slightly change causing the values to remain equal on\n multiple slices messing up the sorting completely.\n\n @dicoms: list of dicoms\n \"\"\"\n\n dicom_input_sorted_x = sorted(dicoms, key=lambda x: (x.ImagePositionPatient[0]))\n dicom_input_sorted_y = sorted(dicoms, key=lambda x: (x.ImagePositionPatient[1]))\n dicom_input_sorted_z = sorted(dicoms, key=lambda x: (x.ImagePositionPatient[2]))\n diff_x = abs(\n dicom_input_sorted_x[-1].ImagePositionPatient[0]\n - dicom_input_sorted_x[0].ImagePositionPatient[0]\n )\n diff_y = abs(\n dicom_input_sorted_y[-1].ImagePositionPatient[1]\n - dicom_input_sorted_y[0].ImagePositionPatient[1]\n )\n diff_z = abs(\n dicom_input_sorted_z[-1].ImagePositionPatient[2]\n - dicom_input_sorted_z[0].ImagePositionPatient[2]\n )\n if diff_x >= diff_y and diff_x >= diff_z:\n return dicom_input_sorted_x\n if diff_y >= diff_x and diff_y >= diff_z:\n return dicom_input_sorted_y\n if diff_z >= diff_x and diff_z >= diff_y:\n return dicom_input_sorted_z\n\n\ndef apply_slope_intercept(dicom_file):\n \"\"\"\n Applies rescale slope and rescale intercept transformation.\n \"\"\"\n array = dicom_file.pixel_array.copy()\n\n scale_slope = 1\n scale_intercept = 0\n if \"RescaleIntercept\" in dicom_file:\n scale_intercept = int(dicom_file.RescaleIntercept)\n if \"RescaleSlope\" in dicom_file:\n scale_slope = int(dicom_file.RescaleSlope)\n array = array * scale_slope\n array = array + scale_intercept\n return array\n\n\ndef remove_padding(array):\n \"\"\"\n Removes background/padding from an 8bit numpy array.\n \"\"\"\n arr = array.copy()\n nonzeros = np.nonzero(arr)\n x1 = np.min(nonzeros[0])\n x2 = np.max(nonzeros[0])\n y1 = np.min(nonzeros[1])\n y2 = np.max(nonzeros[1])\n return arr[x1:x2, y1:y2]\n\n\ndef get_window_from_dicom(dicom_file):\n \"\"\"\n Returns window width and window center values.\n If no window width/level is provided or available, returns None.\n \"\"\"\n width, level = None, None\n if \"WindowWidth\" in dicom_file:\n width = dicom_file.WindowWidth\n if isinstance(width, pydicom.multival.MultiValue):\n width = int(width[0])\n else:\n width = int(width)\n\n if \"WindowCenter\" in dicom_file:\n level = dicom_file.WindowCenter\n if isinstance(level, pydicom.multival.MultiValue):\n level = int(level[0])\n else:\n level = int(level)\n return width, level\n\n\ndef window(array, width, level):\n \"\"\"\n Applies windowing operation.\n If window width/level is None, returns the array itself.\n \"\"\"\n if width is not None and level is not None:\n array = np.clip(array, level - width // 2, level + width // 2)\n return array\n\n\ndef rescale_to_8bit(array):\n \"\"\"\n Convert an array to 8bit (0-255).\n \"\"\"\n array = array - np.min(array)\n array = array / np.max(array)\n array = (array * 255).astype(\"uint8\")\n return array\n\n\ndef load_dicom_array(dicom_file, apply_slope_intercept=True):\n \"\"\"\n Returns the dicom image as a Numpy array.\n \"\"\"\n array = dicom_file.pixel_array.copy()\n if apply_slope_intercept:\n array = apply_slope_intercept(dicom_file)\n return array\n\n\ndef convert_dicom_to_nifti(dicom_files, tempdir):\n \"\"\"\n Converts a dicom series to nifti format.\n Saves nifti in directory provided with filename as SeriesInstanceUID.nii.gz\n Returns a sorted list of dicom files based on image position patient.\n \"\"\"\n output_file = os.path.join(tempdir, dicom_files[0].SeriesInstanceUID + \".nii.gz\")\n nifti_file = dicom2nifti.convert_dicom.dicom_array_to_nifti(\n dicom_files, output_file=output_file, reorient_nifti=True,\n )\n return sort_dicoms(dicom_files)\n\n\ndef convert_dicom_to_8bit(dicom_file, imsize=None, width=None, level=None, keep_padding=True):\n \"\"\"\n Given a DICOM file, window specifications, and image size,\n return the image as a Numpy array scaled to [0,255] of the specified size.\n \"\"\"\n if width is None or level is None:\n width, level = get_window_from_dicom(dicom_file)\n\n array = apply_slope_intercept(dicom_file)\n array = window(array, width, level)\n array = rescale_to_8bit(array)\n\n if (\n \"PhotometricInterpretation\" in dicom_file\n and dicom_file.PhotometricInterpretation == \"MONOCHROME1\"\n ):\n array = 255 - array\n\n if not keep_padding:\n array = remove_padding(array)\n\n if imsize is not None:\n array = cv2.resize(array, imsize)\n return array\n\n\ndef convert_to_RGB(array, imsize=None):\n \"\"\"\n Converts a single channel monochrome image to a 3 channel RGB image.\n \"\"\"\n img = np.stack((array,) * 3, axis=-1)\n if imsize is not None:\n img = cv2.resize(img, imsize)\n return img\n\n\ndef convert_to_RGB_window(array, width, level, imsize=None):\n \"\"\"\n Converts a monochrome image to 3 channel RGB with windowing.\n Width and level can be lists for different values per channel.\n \"\"\"\n if type(width) is list and type(level) is list:\n R = window(array, width[0], level[0])\n G = window(array, width[1], level[1])\n B = window(array, width[2], level[2])\n img = np.stack([R, G, B], axis=-1)\n else:\n R = window(array, width, level)\n img = np.stack((R,) * 3, axis=-1)\n\n if imsize is not None:\n img = cv2.resize(img, imsize)\n return img\n\n\ndef stack_slices(dicom_files):\n \"\"\"\n Stacks the +-1 slice to each slice in a dicom series.\n Returns the list of stacked images and sorted list of dicom files.\n \"\"\"\n dicom_files = sort_dicoms(dicom_files)\n dicom_images = [load_dicom_array(i) for i in dicom_files]\n\n stacked_images = []\n for i, file in enumerate(dicom_images):\n if i == 0:\n img = np.stack([dicom_images[i], dicom_images[i], dicom_images[i + 1]], axis=-1)\n stacked_images.append(img)\n elif i == len(dicom_files) - 1:\n img = np.stack([dicom_images[i - 1], dicom_images[i], dicom_images[i]], axis=-1)\n stacked_images.append(img)\n else:\n img = np.stack([dicom_images[i - 1], dicom_images[i], dicom_images[i + 1]], axis=-1)\n stacked_images.append(img)\n\n return stacked_images, dicom_files\n" ]
[ [ "numpy.max", "numpy.min", "numpy.nonzero", "numpy.stack", "numpy.clip" ] ]
jiaojiening/pytorch-CycleGAN
[ "ab83fe4638f32cb560b8cd1117e8307153b8b5a1" ]
[ "models/bigan_model.py" ]
[ "import torch\nimport itertools\nfrom util.image_pool import ImagePool\nfrom .base_model import BaseModel\nfrom . import networks\n\n\nclass BiGANModel(BaseModel):\n def name(self):\n return 'BiGANModel'\n\n @staticmethod\n def modify_commandline_options(parser, is_train=True):\n # default GAN did not use dropout\n parser.set_defaults(no_dropout=True)\n if is_train:\n parser.add_argument('--lambda_rec', type=float, default=10.0, help='weight for reconstruction loss')\n # parser.add_argument('--lambda_rec', type=float, default=20.0, help='weight for reconstruction loss')\n\n return parser\n\n def initialize(self, opt):\n BaseModel.initialize(self, opt)\n\n # specify the training losses you want to print out. The program will call base_model.get_current_losses\n self.loss_names = ['D_A', 'G_A', 'rec']\n # specify the images you want to save/display. The program will call base_model.get_current_visuals\n visual_names_A = ['real_HR_A', 'real_LR_B', 'fake_LR_A']\n\n self.visual_names = visual_names_A\n # specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks\n if self.isTrain:\n self.model_names = ['G_A', 'D_A']\n else: # during test time, only load Gs\n self.model_names = ['G_A']\n\n # load/define networks\n # netG_A: HR -> LR\n self.netG_A = networks.define_G(opt.output_nc, opt.input_nc, opt.ngf, opt.netG, opt.norm,\n not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)\n\n if self.isTrain:\n use_sigmoid = opt.no_lsgan\n self.netD_A = networks.define_D(opt.input_nc, opt.ndf, opt.netD,\n opt.n_layers_D, opt.norm, use_sigmoid, opt.init_type, opt.init_gain, self.gpu_ids)\n\n if self.isTrain:\n self.fake_LR_A_pool = ImagePool(opt.pool_size)\n # define loss functions\n self.criterionGAN = networks.GANLoss(use_lsgan=not opt.no_lsgan).to(self.device)\n self.criterionCycle = torch.nn.L1Loss()\n self.criterionIdt = torch.nn.L1Loss()\n self.criterionRec = torch.nn.L1Loss()\n # initialize optimizers\n self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A.parameters()),\n lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizer_D = torch.optim.Adam(itertools.chain( self.netD_A.parameters()),\n lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizers = []\n self.optimizers.append(self.optimizer_G)\n self.optimizers.append(self.optimizer_D)\n\n def set_input(self, input):\n self.real_HR_A = input['A'].to(self.device)\n # self.real_LR_B = input['B'].to(self.device)\n self.image_paths = input['A_paths']\n\n # load the ground-truth high resolution B image to test the SR quality\n # self.real_HR_B = input['GT_B'].to(self.device)\n # load the ground-truth low resolution A image\n self.real_LR_A = input['GT_A'].to(self.device)\n\n def forward(self):\n # LR -> HR\n # self.fake_HR_B = self.netG_B(self.real_LR_B)\n # self.fake_HR_A = self.netG_B(self.real_LR_A)\n\n # HR -> LR\n self.fake_LR_A = self.netG_A(self.real_HR_A)\n\n def backward_D_basic(self, netD, real, fake):\n # Real\n pred_real = netD(real)\n loss_D_real = self.criterionGAN(pred_real, True)\n # Fake\n # fake.detach() the loss_D do not backward to the net_G\n pred_fake = netD(fake.detach())\n loss_D_fake = self.criterionGAN(pred_fake, False)\n # Combined loss\n loss_D = (loss_D_real + loss_D_fake) * 0.5\n # backward\n loss_D.backward()\n return loss_D\n\n def backward_D_A(self):\n fake_LR_A = self.fake_LR_A_pool.query(self.fake_LR_A)\n self.loss_D_A = self.backward_D_basic(self.netD_A, self.real_LR_A, fake_LR_A)\n\n def backward_G(self):\n lambda_rec = self.opt.lambda_rec\n\n # GAN loss D_A(G_A(A))\n self.loss_G_A = self.criterionGAN(self.netD_A(self.fake_LR_A), True)\n # combined loss\n self.loss_G = self.loss_G_A\n\n # reconstruct loss of high resolution fake_LR_A\n self.loss_rec = self.criterionRec(self.fake_LR_A, self.real_LR_A) * lambda_rec\n self.loss_G += self.loss_rec\n\n self.loss_G.backward()\n\n def optimize_parameters(self):\n # forward\n self.forward()\n # G_B\n self.set_requires_grad([self.netD_A], False)\n self.optimizer_G.zero_grad()\n self.backward_G()\n self.optimizer_G.step()\n # D_B\n self.set_requires_grad([self.netD_A], True)\n self.optimizer_D.zero_grad()\n self.backward_D_A()\n self.optimizer_D.step()\n" ]
[ [ "torch.nn.L1Loss" ] ]
Valzavator/YouTubeTrendingVideosAnalysis
[ "4baca01a351a20bec04331936cd9f6eafaea815d" ]
[ "cli/analyze_data_from.py" ]
[ "import os\n#import subprocess\nimport time\nimport pycountry\nimport gc\nimport pandas as pd\nfrom pandas import DataFrame\n\nfrom cli.form import Form\nfrom database.database import Database\nimport processing_tool.data_analysis as da\nfrom util.args import Args\n\n\nclass AnalyzeDataForm(Form):\n\n def __init__(self, parent: Form, database: Database, county_codes: set):\n self.__parent = parent\n self.__db = database\n self.__country_codes = county_codes\n\n def launch(self):\n loop = True\n while loop:\n try:\n self.__print__menu()\n choice = input(\">>> Enter your choice [0-3]: \")\n if choice == '1':\n self.__detailed_analysis_for_each_country_separately()\n\n elif choice == '2':\n if len(self.__country_codes) > 1:\n self.__detailed_analysis_for_all_countries()\n elif len(self.__country_codes) == 1:\n self.__detailed_analysis_for_each_country_separately()\n\n elif choice == '0':\n loop = False\n\n else:\n print(\">>> Wrong option selection!\")\n\n if loop:\n input(\">>> Press Enter to continue...\")\n\n except MemoryError:\n print(\">>> RAM overflow!\")\n input(\">>> Try again...\")\n\n gc.collect()\n\n def __detailed_analysis_for_each_country_separately(self):\n os.system('cls')\n print('Please, wait...')\n\n is_analyze = False\n\n for code in self.__country_codes:\n\n print(f'COUNTRY: {pycountry.countries.get(alpha_2=code).name}')\n\n data = self.__db.get_videos_by_country_code(code)\n data_frame = pd.DataFrame(data)\n\n del data\n\n if data_frame.size == 0:\n print(f'No data for analysis {code}!')\n continue\n\n is_analyze = True\n\n output_directory = os.path.join(\n Args.analysis_res_dir(),\n f'{code}{os.sep}{time.strftime(\"%d.%m.%y\")}{os.sep}')\n\n print('>>> General analysis is carried out')\n self.__general_analysis_for_data(data_frame, output_directory)\n print('>>> General report is completed!')\n\n print('>>> Detailed analysis is carried out')\n self.__detailed_analysis_for_data(data_frame, output_directory)\n print('>>> Detailed analysis is completed!')\n\n del data_frame\n\n if is_analyze:\n os.startfile(Args.analysis_res_dir())\n # subprocess.Popen(f'explorer /select, {Args.analysis_res_dir()}{os.sep}')\n\n def __detailed_analysis_for_all_countries(self):\n os.system('cls')\n print('Please, wait...')\n\n data = self.__db.get_videos_by_country_codes(list(self.__country_codes))\n data_frame = pd.DataFrame(data)\n\n del data\n\n if data_frame.size > 0:\n output_directory = os.path.join(\n Args.analysis_res_dir(),\n f'all_country{os.sep}{time.strftime(\"%d.%m.%y\")}{os.sep}')\n\n print('>>> General analysis is carried out')\n self.__general_analysis_for_data(data_frame, output_directory)\n print('>>> General report is completed!')\n\n print('>>> Detailed analysis is carried out')\n self.__detailed_analysis_for_data(data_frame, output_directory)\n print('>>> Detailed analysis is completed!')\n\n # subprocess.Popen(f'explorer /select, {output_directory}')\n os.startfile(output_directory)\n else:\n print('No data for analysis!')\n\n del data_frame\n\n def __general_analysis_for_data(self, data_frame: DataFrame, output_dir):\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n file_path = os.path.join(output_dir, 'general_analysis.txt')\n\n with open(file_path, \"w+\",\n encoding='utf-8') as file:\n ###\n\n file.write(f\"{20 * '-'} {str.upper('General information')} {20 * '-'}\\n\\n\")\n data_frame.info(buf=file)\n\n ###\n\n file.write(self.__create_paragraph(\n 'General view of the four numeric attributes',\n data_frame[['view_count', 'likes', 'dislikes', 'comment_count']].describe()))\n\n ###\n\n data_frame = da.distribution_of_days_preprocessing(data_frame)\n\n file.write(self.__create_paragraph(\n 'Distribution of days that videos take to become popular',\n data_frame.interval.describe()))\n\n ###\n\n file.write(self.__create_paragraph(\n 'Top videos whose << view_count >> grow fastest among categories',\n da.view_count_fastest_grow_among_categories(data_frame, preprocessing=False).to_string()))\n\n ###\n\n file.write(self.__create_paragraph(\n 'Top videos whose << likes >> grow fastest among categories',\n da.likes_fastest_grow_among_categories(data_frame, preprocessing=False).to_string()))\n\n ###\n\n file.write(self.__create_paragraph(\n 'Top videos whose << dislikes >> grow fastest among categories',\n da.dislikes_fastest_grow_among_categories(data_frame, preprocessing=False).to_string()))\n\n ###\n\n file.write(self.__create_paragraph(\n 'Top videos whose << comment_count >> grow fastest among categories',\n da.comment_count_fastest_grow_among_categories(data_frame, preprocessing=False).to_string()))\n\n file.write(self.__create_paragraph(\n 'Top channels',\n da.top_channels(data_frame, 100).to_string()))\n\n file.close()\n\n gc.collect()\n\n @staticmethod\n def __detailed_analysis_for_data(data_frame: DataFrame, output_dir):\n\n analysis_funcs = [\n da.views_likes_dislikes_comments_normal_distribution,\n da.correlation,\n da.category_rating,\n da.distribution_boxplot,\n da.distribution_plot,\n da.distribution_of_days_histogram,\n da.distribution_of_average_time,\n da.word_cloud_for_tags,\n da.word_cloud_for_titles,\n da.word_cloud_for_description,\n # da.sentiment_analysis\n ]\n\n i = 0\n for funcs in analysis_funcs:\n funcs(data_frame, output_dir)\n i += 1\n print(f'... [{int(i*100/len(analysis_funcs))} %]')\n gc.collect()\n\n def __print__menu(self):\n os.system('cls')\n\n print('\\n', 25 * '-', 'DATA ANALYSIS MENU', 25 * '-', '\\n')\n print('>>> Your country codes: ', list(self.__country_codes), '\\n')\n print('1. Detailed analysis for each country separately')\n print('2. Detailed analysis for all countries together')\n print('0. Back')\n print('\\n', 70 * '-', '\\n')\n\n @staticmethod\n def __create_paragraph(title: str, text):\n return f\"\\n {20 * '-'} {str.upper(title)} {20 * '-'}\\n\\n{text}\\n\"\n\n @property\n def country_codes(self) -> set:\n return self.__country_codes\n\n @country_codes.setter\n def country_codes(self, value: set):\n self.__country_codes = value\n" ]
[ [ "pandas.DataFrame" ] ]
KaiserKlayton/lpa_cnn
[ "93d7b7b31d458b9ca002612df0882aa039b5885a" ]
[ "extract_caffe_weights.py" ]
[ "#!/usr/bin/env python\n\n__author__ = \"C. Clayton Violand\"\n__copyright__ = \"Copyright 2017\"\n\n## Extracts Caffe weights from Caffe models. Writes to file at: 'weights/<model_name>/..'.\n## REQUIRED: Caffe .prototxt and .caffemodel in: 'models/<model_name>/..'\n##\n\nimport os\nimport re\nimport sys\n\nimport numpy as np\nimport caffe\nimport cPickle\n\nfrom helper.extract_architecture import extract_architecture\n\ncaffe.set_mode_cpu()\n\ndef main():\n dirs = [x[0] for x in os.walk('models/')]\n\n for d in dirs:\n model_match = re.search(\"models/(.+)\", d)\n if model_match:\n model = model_match.group(1)\n else:\n continue\n\n if os.path.exists(\"weights/%s\" % model_match.group(1)):\n continue\n\n # Get .prototxt and .caffemodel path.\n for f in os.listdir(d):\n if f.endswith('.prototxt'):\n prototxt_file_path = os.path.join(d, f)\n if f.endswith('.caffemodel'):\n model_file_path = os.path.join(d, f)\n try:\n prototxt_file_path\n except:\n sys.exit(\"Error: No suitable Caffe .prototxt found...\")\n try:\n model_file_path\n except:\n sys.exit(\"Error: No suitable .caffemodel file found...\")\n\n # Extract architecture and parameters.\n architecture = extract_architecture(prototxt_file_path)\n a = architecture\n\n # Define caffe net.\n net = caffe.Net(prototxt_file_path, model_file_path, caffe.TEST)\n\n # Extract and write weights for each relevant layer.\n for key in a:\n if key == \"shape\" or a[key]['type'] == \"relu\" or a[key]['type'] == \"pooling\" or a[key]['type'] == \"eltwise\":\n continue\n\n if not os.path.exists(os.path.join('weights', model)):\n os.makedirs(os.path.join('weights', model))\n\n if a[key]['type'] == \"batchnorm\":\n mean_blob = net.params[key][0].data[...]\n var_blob = net.params[key][1].data[...]\n\n np.savetxt(os.path.join('weights', model, key+\"_mean.csv\"), mean_blob, delimiter=',')\n np.savetxt(os.path.join('weights', model, key+\"_var.csv\"), var_blob, delimiter=',')\n\n continue\n\n weight_blob = net.params[key][0].data[...]\n\n if len(weight_blob.shape) == 4:\n weight_blob = weight_blob.reshape(weight_blob.shape[0], weight_blob.shape[1]*weight_blob.shape[2]*weight_blob.shape[3])\n elif len(weight_blob.shape) == 3:\n weight_blob = weight_blob.reshape(weight_blob.shape[0], weight_blob.shape[1]*weight_blob.shape[2])\n else:\n pass\n\n np.savetxt(os.path.join('weights', model, key+\"_weights.csv\"), weight_blob, delimiter=',')\n\n if \"bias_term\" in a[key].keys():\n if a[key]['bias_term'] == \"false\":\n bias_blob = np.zeros(weight_blob.shape[0])\n else:\n bias_blob = net.params[key][1].data[...]\n else:\n bias_blob = net.params[key][1].data[...]\n\n np.savetxt(os.path.join('weights', model, key+\"_biases.csv\"), bias_blob, delimiter=',')\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.zeros" ] ]
deep-learning/facenet
[ "e74cf7c2a29477ed76cd34e243f993090c6f6987" ]
[ "src/align/align_dataset_mtcnn.py" ]
[ "\"\"\"Performs face alignment and stores face thumbnails in the output directory.\"\"\"\n# MIT License\n# \n# Copyright (c) 2016 David Sandberg\n# \n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport multiprocessing\nimport os\nimport random\nfrom multiprocessing import Process, Queue\n\nimport numpy as np\nimport sys\nimport tensorflow as tf\nfrom scipy import misc\nfrom time import sleep, time\n\nimport align.detect_face\nimport facenet\n\n\ndef split_chunks(l, n=2):\n return np.array_split(l, n)\n\n\ndef main(args):\n sleep(random.random())\n output_dir = os.path.expanduser(args.output_dir)\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n # Store some git revision info in a text file in the log directory\n src_path, _ = os.path.split(os.path.realpath(__file__))\n facenet.store_revision_info(src_path, output_dir, ' '.join(sys.argv))\n dataset = facenet.get_dataset(args.input_dir)\n\n if args.parallelism:\n n = args.parallelism\n else:\n n = multiprocessing.cpu_count()\n\n print(\"spawn {} parallel tasks to process {} subjects\".format(n, len(dataset)))\n\n # split the data set\n jobs = []\n queue = Queue()\n print(len(dataset))\n n_parts = list(split_chunks(dataset, n))\n for i in range(n):\n print(len(n_parts[i]))\n p = Process(target=process, args=(args, i, n_parts[i], output_dir, queue))\n p.start()\n jobs.append(p)\n\n for p in jobs:\n p.join()\n\n mx = []\n while not queue.empty():\n mx.append(queue.get())\n\n\ndef process(args, seq_num, dataset, output_dir, queue):\n pprint = lambda x: print(\"task {}->{}\".format(seq_num, x))\n start = time()\n with tf.Graph().as_default():\n gpu_options = tf.GPUOptions(\n per_process_gpu_memory_fraction=args.gpu_memory_fraction\n )\n sess = tf.Session(\n config=tf.ConfigProto(\n gpu_options=gpu_options,\n log_device_placement=False\n )\n )\n with sess.as_default():\n pnet, rnet, onet = align.detect_face.create_mtcnn(sess, None)\n minsize = args.min_face_size # minimum size of face\n threshold = [0.6, 0.7, 0.7] # three steps's threshold\n factor = 0.709 # scale factor\n # Add a random key to the filename to allow alignment using multiple processes\n bounding_boxes_filename = os.path.join(output_dir, 'bounding_boxes_%05d.txt' % seq_num)\n with open(bounding_boxes_filename, \"w\") as text_file:\n nrof_images_total = 0\n nrof_successfully_aligned = 0\n # if args.random_order:\n # random.shuffle(dataset)\n for ix, cls in enumerate(dataset):\n output_class_dir = os.path.join(output_dir, cls.name)\n if ix % 100 == 0:\n pprint(\"processed {}/{}\".format(ix, len(dataset)))\n if not os.path.exists(output_class_dir):\n os.makedirs(output_class_dir)\n # if args.random_order:\n # random.shuffle(cls.image_paths)\n for image_path in cls.image_paths:\n nrof_images_total += 1\n filename = os.path.splitext(os.path.split(image_path)[1])[0]\n output_filename = os.path.join(output_class_dir, filename + '.png')\n # pprint(image_path)\n if not os.path.exists(output_filename):\n try:\n img = misc.imread(image_path)\n except (IOError, ValueError, IndexError) as e:\n pprint('{}: {}'.format(image_path, e))\n else:\n if img.ndim < 2:\n pprint('Unable to align \"%s\"' % image_path)\n text_file.write('%s\\n' % output_filename)\n continue\n if img.ndim == 2:\n img = facenet.to_rgb(img)\n img = img[:, :, 0:3]\n\n bounding_boxes, _ = align.detect_face.detect_face(img,\n minsize,\n pnet,\n rnet,\n onet,\n threshold,\n factor)\n nrof_faces = bounding_boxes.shape[0]\n if nrof_faces > 0:\n det = bounding_boxes[:, 0:4]\n det_arr = []\n img_size = np.asarray(img.shape)[0:2]\n if nrof_faces > 1:\n if args.warn_multiple_faces:\n pprint('WARN: {} has {} faces'.format(image_path, nrof_faces))\n\n if args.skip_multiple_faces:\n pprint('skipping {}'.format(image_path))\n continue\n\n if args.detect_multiple_faces:\n for i in range(nrof_faces):\n det_arr.append(np.squeeze(det[i]))\n else:\n bounding_box_size = (det[:, 2] - det[:, 0]) * (det[:, 3] - det[:, 1])\n img_center = img_size / 2\n offsets = np.vstack([(det[:, 0] + det[:, 2]) / 2 - img_center[1],\n (det[:, 1] + det[:, 3]) / 2 - img_center[0]])\n offset_dist_squared = np.sum(np.power(offsets, 2.0), 0)\n # some extra weight on the centering\n index = np.argmax(bounding_box_size - offset_dist_squared * 2.0)\n det_arr.append(det[index, :])\n else:\n det_arr.append(np.squeeze(det))\n\n for i, det in enumerate(det_arr):\n det = np.squeeze(det)\n bb = np.zeros(4, dtype=np.int32)\n bb[0] = np.maximum(det[0] - args.margin / 2, 0)\n bb[1] = np.maximum(det[1] - args.margin / 2, 0)\n bb[2] = np.minimum(det[2] + args.margin / 2, img_size[1])\n bb[3] = np.minimum(det[3] + args.margin / 2, img_size[0])\n cropped = img[bb[1]:bb[3], bb[0]:bb[2], :]\n if args.resize:\n try:\n scaled = misc.imresize(\n cropped,\n (args.image_size, args.image_size),\n interp='bilinear')\n except ValueError: # todo: fix this\n continue\n else:\n scaled = cropped\n nrof_successfully_aligned += 1\n filename_base, file_extension = os.path.splitext(output_filename)\n if args.detect_multiple_faces:\n output_filename_n = \"{}_{}{}\".format(filename_base, i, file_extension)\n else:\n output_filename_n = \"{}{}\".format(filename_base, file_extension)\n misc.imsave(output_filename_n, scaled)\n text_file.write('%s %d %d %d %d\\n' % (\n output_filename_n, bb[0], bb[1], bb[2],\n bb[3]))\n else:\n pprint('Unable to align \"%s\"' % image_path)\n text_file.write('%s\\n' % output_filename)\n\n end = time()\n pprint('Total number of images: %d' % nrof_images_total)\n pprint('Number of successfully aligned images: %d' % nrof_successfully_aligned)\n pprint('time cost: {}'.format(end - start))\n\n\ndef parse_arguments(argv):\n parser = argparse.ArgumentParser()\n\n parser.add_argument('input_dir', type=str,\n help='Directory with unaligned images.')\n parser.add_argument('output_dir', type=str,\n help='Directory with aligned face thumbnails.')\n parser.add_argument('--image_size', type=int,\n help='Image size (height, width) in pixels. 182 default',\n default=182)\n parser.add_argument('--parallelism', type=int,\n help='number of Parallel processing',\n default=multiprocessing.cpu_count())\n parser.add_argument('--resize',\n help='turn on/off resize',\n action='store_true')\n parser.add_argument('--margin', type=int,\n help='Margin for the crop around the bounding box (height, width) in pixels.',\n default=44)\n parser.add_argument('--min_face_size', type=int,\n help='minimal face size',\n default=20)\n # parser.add_argument('--random_order',\n # help='Shuffles the order of images to enable alignment using multiple processes.',\n # action='store_true')\n parser.add_argument('--gpu_memory_fraction', type=float,\n help='Upper bound on the amount of GPU memory that will be used by the process.',\n default=1.0)\n parser.add_argument('--detect_multiple_faces',\n help='Detect and align multiple faces per image.',\n action='store_true')\n parser.add_argument('--skip_multiple_faces',\n help='skip align if multiple faces',\n action='store_true')\n parser.add_argument('--warn_multiple_faces',\n help='warn if multiple faces are detected in image',\n action='store_true')\n return parser.parse_args(argv)\n\n\nif __name__ == '__main__':\n main(parse_arguments(sys.argv[1:]))\n" ]
[ [ "numpy.asarray", "numpy.zeros", "numpy.minimum", "tensorflow.Graph", "scipy.misc.imresize", "scipy.misc.imread", "tensorflow.ConfigProto", "numpy.vstack", "numpy.argmax", "numpy.power", "numpy.maximum", "numpy.squeeze", "tensorflow.GPUOptions", "scipy.misc.imsave", "numpy.array_split" ] ]
googlr/Parallelograms-Detection
[ "7e53b809955973fbe4f2c535fce6019cc52072b2" ]
[ "pd.py" ]
[ "# -*- coding: utf-8 -*-\n\n# The program will consist of three steps:\n#\t(1) detect edges using the Sobel’s operator,\n#\t(2) detect straight line segments using the Hough Transform, and\n#\t(3) detect parallelograms from the straight-line segments detected in step (2).\n# In step (1), compute edge magnitude using the formula below and\n# then normalize the magnitude values to lie within the range [0,255].\n# Next, manually choose a threshold value to produce a binary edge map.\n\nimport numpy as np\nimport matplotlib\nfrom matplotlib import pylab as plt\nimport math\nimport itertools as it\n# import cv2\n# from skimage.feature import peak_local_max\nfrom photutils_detection_core import find_peaks\n# import sys\n\n\nrow, col = 756, 1008 # Size of TestImage 1 and 2\n# row, col = 413, 550 # Size of TestImage 3\nfilename = \"TestImage1.raw\"\nT = 25 # Threshold in the normalized gradient magnitue\nCanny_Edge_Detector_threshold = 10\nlocal_maxima_window_size = 3 # neighborhood_size\n# the de-Houghed image (using a relative threshold of 40%)\nrelative_threshold_ratio = 0.4\ndistance_threshold = 8 # Threshold distance to determin if a point on a line\n\n# the least points on line to be considered to be a valid line\npoints_on_line_threshold = 20\n\n# convert them into grayscale images by using the formula\n# luminance = 0.30R + 0.59G + 0.11B,\n# where R, G, and B, are the red, green, and blue components.\n\n\ndef cvt2grayscale(img):\n grayImage = []\n for i in range(0, img.size // 3):\n luminance = int(0.3 * img[3 * i] + 0.59 *\n img[3 * i + 1] + 0.11 * img[3 * i + 2])\n grayImage.append(luminance)\n\n return np.array(grayImage)\n\n# Gausssion smoothing: https://homepages.inf.ed.ac.uk/rbf/HIPR2/gsmooth.htm\n\n\ndef smooth_image_with_Gaussian_filter(img):\n kernel = (0.006, 0.061, 0.242, 0.383, 0.242, 0.061, 0.006)\n kernel_size = len(kernel)\n border_offset = (kernel_size - 1) // 2\n\n img_copy = np.copy(img)\n for i in range(0, row):\n # Keep border values as they are\n for j in range(border_offset, col - border_offset):\n img_copy_ij = 0\n for k in range((-1) * border_offset, border_offset + 1):\n img_copy_ij += img[i][j + k] * kernel[border_offset + k]\n img_copy[i][j] = img_copy_ij\n\n img_copy_copy = np.copy(img_copy)\n # Keep border values as they are\n for i in range(border_offset, row - border_offset):\n for j in range(0, col):\n img_copy_copy_ij = 0\n for k in range((-1) * border_offset, border_offset + 1):\n img_copy_copy_ij += img_copy[i +\n k][j] * kernel[border_offset + k]\n img_copy_copy[i][j] = img_copy_copy_ij\n\n return img_copy_copy\n\n\ndef sobels_operator(img):\n mag = []\n img_row, img_col = img.shape\n for i in range(1, img_row - 1):\n for j in range(1, img_col - 1):\n # compute edge magnitude using the formula\n g_x = (img[i - 1][j + 1] + 2 * img[i][j + 1] + img[i + 1][j + 1]\n - img[i - 1][j - 1] - 2 * img[i][j - 1] - img[i + 1][j - 1])\n g_y = (img[i - 1][j - 1] + 2 * img[i - 1][j] + img[i - 1][j + 1]\n - img[i + 1][j - 1] - 2 * img[i + 1][j] - img[i + 1][j + 1])\n mag_i_j = math.sqrt(g_x * g_x + g_y * g_y)\n mag.append(int(mag_i_j))\n\n # normalize the magnitude values to lie within the range [0,255].\n min_mag = min(mag)\n max_mag = max(mag)\n mag_normalized = []\n for val in mag:\n mag_normalized.append(int((val - min_mag) * 255 / (max_mag - min_mag)))\n # Save the normalized gradient magnitue\n normalized_gradient_magnitue_as_a_image = np.array(\n mag_normalized).reshape([img_row - 2, img_col - 2])\n plt.imshow(normalized_gradient_magnitue_as_a_image, cmap='gray')\n # plt.show()\n plt.savefig(\"normalized_gradient_magnitue_as_a_image.png\")\n plt.close()\n\n # filter: threshold T=225\n mag_normalized_filtered = []\n for val in mag_normalized:\n mag_normalized_filtered.append(val if val >= T else 0)\n #mag_i_j = mag_i_j if mag_i_j >= 225 else 0\n #mag_i_j = mag_i_j if mag_i_j <= 225 else 255\n return np.array(mag_normalized_filtered).reshape([img_row - 2, img_col - 2])\n\n\n# Read Image\ntestImage = np.fromfile(filename, dtype='uint8', sep=\"\")\n\n# Convert to grayscale image\ngrayImage = cvt2grayscale(testImage).reshape([row, col])\nprint(\"Step 1: Convert image to grayscale.\")\n# print grayImage.shape\n\n# Smooth_image_with_Gaussian_filter\ngrayImage_smoothed = smooth_image_with_Gaussian_filter(grayImage)\n# Display Image\nplt.imshow(grayImage_smoothed, cmap='gray')\n# plt.show()\nplt.savefig(\"grayImage_smoothed_with_Gaussian_filter.png\")\nplt.close()\n\n# Compute gradient magnitude and gradient angle\ngradient_magnitude = np.zeros((row, col), dtype='uint8')\ngradient_angle = np.zeros((row, col), dtype='uint8')\nquantize_angle_of_the_gradient = np.zeros((row, col), dtype='uint8')\n\n\ndef quantize_angle_of_the_gradient_to_four_sectors(angle):\n # Double check the parameter\n if (angle < 0 or angle > 360):\n print(\"Warning: invalid parameter in quantize_angle_of_the_gradient_to_four_sectors(angle).\")\n return 4\n if (angle <= 0 + 22.5 or\n (angle >= 180 - 22.5 and angle <= 180 + 22.5) or\n angle >= 315 + 22.5):\n return 0\n if ((angle > 45 - 22.5 and angle < 45 + 22.5) or\n (angle > 225 - 22.5 and angle < 225 + 22.5)):\n return 1\n if ((angle >= 90 - 22.5 and angle <= 90 + 22.5) or\n (angle >= 270 - 22.5 and angle <= 270 + 22.5)):\n return 2\n if ((angle > 135 - 22.5 and angle < 135 + 22.5) or\n (angle > 315 - 22.5 and angle < 315 + 22.5)):\n return 3\n\n\ndef compute_gradient_magnitude_and_gradient_angle(image_smoothed):\n for i in range(1, row):\n for j in range(1, col):\n Gx = (image_smoothed[i][j] + image_smoothed[i - 1][j]\n - image_smoothed[i][j - 1] - image_smoothed[i - 1][j - 1])\n Gy = (image_smoothed[i - 1][j - 1] + image_smoothed[i - 1][j]\n - image_smoothed[i][j - 1] - image_smoothed[i][j])\n gradient_magnitude[i][j] = math.sqrt(Gx * Gx + Gy * Gy)\n if Gx == 0:\n gradient_angle[i][j] = 90 if Gy > 0 else 270\n else:\n gradient_angle[i][j] = math.degrees(math.atan2(Gy, Gx))\n\n quantize_angle_of_the_gradient[i][\n j] = quantize_angle_of_the_gradient_to_four_sectors(gradient_angle[i][j])\n\ncompute_gradient_magnitude_and_gradient_angle(grayImage_smoothed)\n# Non-maxima Suppression\n# \tThin magnitude image by using a 3×3 window\nfor i in range(1, row - 1):\n for j in range(1, col - 1):\n sector_ij = quantize_angle_of_the_gradient[i][j]\n if sector_ij == 0:\n gradient_magnitude[i][j] = gradient_magnitude[i][j] if (gradient_magnitude[i][j] >= gradient_magnitude[\n i][j - 1] and gradient_magnitude[i][j] >= gradient_magnitude[i][j + 1]) else 0\n elif sector_ij == 1:\n gradient_magnitude[i][j] = gradient_magnitude[i][j] if (gradient_magnitude[i][j] >= gradient_magnitude[\n i - 1][j + 1] and gradient_magnitude[i][j] >= gradient_magnitude[i + 1][j - 1]) else 0\n elif sector_ij == 2:\n gradient_magnitude[i][j] = gradient_magnitude[i][j] if (gradient_magnitude[i][j] >= gradient_magnitude[\n i - 1][j] and gradient_magnitude[i][j] >= gradient_magnitude[i + 1][j]) else 0\n elif sector_ij == 3:\n gradient_magnitude[i][j] = gradient_magnitude[i][j] if (gradient_magnitude[i][j] >= gradient_magnitude[\n i - 1][j - 1] and gradient_magnitude[i][j] >= gradient_magnitude[i + 1][j + 1]) else 0\n else:\n print(\"Warning: invalid sector in Non-maxima Suppression.\")\n\nfor i in range(1, row - 1):\n for j in range(1, col - 1):\n gradient_magnitude[i][j] = gradient_magnitude[i][\n j] if gradient_magnitude[i][j] >= Canny_Edge_Detector_threshold else 0\n\nprint(\"Step 2: Canny Edge Detecter applied.\")\nplt.imshow(gradient_magnitude, cmap='gray')\n# plt.show()\nplt.savefig(\"edges_detected_by_Canny_Edge_Detector.png\")\nplt.close()\n\n\n#################################################################\n#(1) detect edges using the Sobel’s operator\n#– Filtering\n#– Enhancement\n# imgMag = sobels_operator(grayImage)\n# print(\"Step 2: Sobel's operator applied.\")\n# plt.imshow(imgMag, cmap = 'gray')\n# #plt.show()\n# plt.savefig(\"edges_detected_in_image.png\")\n# plt.close()\n\nimgMag = gradient_magnitude\n\n#################################################################\n#(2) detect straight line segments using the Hough Transform\ntheta_step_size = 3\np_step_size = 1\ntheta_MAX_VALUE = 360\np_MAX_VALUE = int(math.sqrt(row * row + col * col))\naccumulator_array = np.zeros(\n (theta_MAX_VALUE // theta_step_size + 1, p_MAX_VALUE // p_step_size + 1), dtype='uint8')\n# Compute the accumulator array\nimgMag_row, imgMag_col = imgMag.shape\nfor i in range(0, imgMag_row):\n for j in range(0, imgMag_col):\n if(imgMag[i][j] > 0):\n # p = x*cos(theta) + y*sin(theta)\n theta = 0\n while theta < 360:\n theta_radians = math.radians(theta + theta_step_size / 2.0)\n p_estimate = i * math.cos(theta_radians) + \\\n j * math.sin(theta_radians)\n # Update the accumulator array\n accu_x = theta // theta_step_size\n accu_y = int(p_estimate / p_step_size)\n accumulator_array[accu_x][accu_y] += 1\n # next theta\n theta = theta + theta_step_size\n\nmax_accumulator = np.amax(accumulator_array)\nprint(max_accumulator)\nprint(\"Step 3: Hough Transform applied.\")\n# plt.imshow(accumulator_array, cmap='gray')\n# plt.show()\n# plt.close()\n\n\n#################################################################\n#(3) detect parallelograms from the straight-line segments detected in step (2).\n# the de-Houghed image (using a relative threshold of 40%)\naccu_row, accu_col = accumulator_array.shape\npeak_list = []\n\n# Relative threshold filtering\nrelative_threshold = max_accumulator * relative_threshold_ratio\nfor i in range(0, accu_row):\n for j in range(0, accu_col):\n # apply the threshold filter\n accumulator_i_j = accumulator_array[i][j]\n accumulator_array[i][\n j] = accumulator_i_j if accumulator_i_j >= relative_threshold else 0\n # if accumulator_i_j >= relative_threshold:\n # \tpeak_p = (j + 0.5) * p_step_size\n # \tpeak_theta = (i + 0.5) * theta_step_size\n # \tpeak_list.append([peak_theta, peak_p])\n\n# plt.imshow(accumulator_array, cmap='gray')\n# plt.show()\n# plt.close()\n\ntable = find_peaks(accumulator_array, relative_threshold)\n# print(table)\npeaks_found = []\nfor i in range(0, len(table[0])):\n table_x = table[1][i]\n table_y = table[0][i]\n peaks_found.append([(table_x + 0.5) * theta_step_size,\n (table_y + 0.5) * p_step_size])\n # print( accumulator_array[ table_x ][ table_y ] )\n\nprint(peaks_found)\n\n\n# Using local-maxima threshold\n# \tWith a threshold window of 3x3\nwindow_size = local_maxima_window_size\n\n\ndef xy_in_range_of_accumulator_array(x, y):\n accu_arr_row, accu_arr_col = accumulator_array.shape\n return True if (x >= 0 and x < accu_arr_row and y >= 0 and y < accu_arr_col) else False\n\n\ndef accumulator_is_local_maxima(i, j):\n if accumulator_array[i][j] == 0: # already surpressed\n return False\n for s_i in range((-1) * window_size, window_size + 1):\n for s_j in range((-1) * window_size, window_size + 1):\n local_x = i + s_i\n local_y = j + s_j\n if xy_in_range_of_accumulator_array(local_x, local_y):\n # Notice that there might be more than one maxima\n if accumulator_array[i][j] < accumulator_array[local_x][local_y]:\n return False\n return True\n\nfor i in range(0, accu_row):\n for j in range(0, accu_col):\n # apply the threshold filter\n if accumulator_is_local_maxima(i, j):\n peak_p = (j + 0.5) * p_step_size\n peak_theta = (i + 0.5) * theta_step_size\n peak_list.append([peak_theta, peak_p])\n\n\n# def accumulator_is_local_maxima( i, j ):\n# \tif accumulator_array[i][j] == 0: # already surpressed\n# \t\treturn False\n# \tfor s_i in range( (-1)*window_size, window_size + 1 ):\n# \t\tfor s_j in range( (-1)*window_size, window_size + 1 ):\n# \t\t\tif accumulator_array[i][j] < accumulator_array[ i + s_i ][ j + s_j ]: # Notice that there might be more than one maxima\n# \t\t\t\treturn False\n# \treturn True\n\n# for i in range( window_size, accu_row - window_size):\n# \tfor j in range( window_size, accu_col - window_size):\n# \t\t#apply the threshold filter\n# \t\tif accumulator_is_local_maxima( i, j ):\n# \t\t\tpeak_p = (j + 0.5) * p_step_size\n# \t\t\tpeak_theta = (i + 0.5) * theta_step_size\n# \t\t\tpeak_list.append([peak_theta, peak_p])\n\nprint(\"peak_list: \")\nprint(peak_list)\n\npeak_list = peaks_found\n\n##########################################################################\n# Filter overlaping lines\nfilter_step_size = theta_step_size\n\n# Compute average of a list of int\n\n\ndef average_p(p_filter_list):\n list_len = len(p_filter_list)\n if list_len == 0:\n print(\"Warning: empty list.\")\n p_sum = 0.0\n for p in p_filter_list:\n p_sum = p_sum + p\n\n return p_sum / list_len\n\n# Cluster a list of int to clustered list\n\n\ndef cluster_list(p_list):\n p_list = sorted(p_list)\n list_len = len(p_list)\n clustered_list = []\n if list_len == 0:\n return clustered_list\n p_val = p_list[0]\n p_filter_list = []\n for i in range(0, list_len):\n if math.fabs(p_val - p_list[i]) < filter_step_size:\n p_filter_list.append(p_list[i])\n else:\n p_new_average = average_p(p_filter_list)\n clustered_list.append(p_new_average)\n # update p_val and clear p_filter_list\n p_val = p_list[i]\n p_filter_list[:] = []\n p_filter_list.append(p_list[i])\n\n # clear p_filter_list\n if len(p_filter_list) != 0:\n p_new_average = average_p(p_filter_list)\n clustered_list.append(p_new_average)\n return clustered_list\n\n\n# Use dictionary to filter peaks\npeak_dict = {}\nfor line in peak_list:\n if line[0] in peak_dict:\n # append the new number to the existing array at this slot\n peak_dict[line[0]].append(line[1])\n else:\n # create a new array in this slot\n peak_dict[line[0]] = [line[1]]\n\nfor key in peak_dict:\n peak_dict[key] = cluster_list(peak_dict[key])\n\npeak_list_filtered = []\nfor key in peak_dict:\n for val in peak_dict[key]:\n peak_list_filtered.append([key, val])\n\nprint(\"peak_list_filtered: \")\nprint(peak_list_filtered)\npeak = np.array(peak_list_filtered)\n\n##########################################################################\n# print(peak)\nedge_map = np.zeros((row, col), dtype='uint8')\n# Initialize to edge map to 255\nfor i in range(0, row):\n for j in range(0, col):\n edge_map[i][j] = 255\n\n# Copy the magnitude array imgMag to edge_map\nfor i in range(0, row - 2):\n for j in range(0, col - 2):\n if imgMag[i][j] > 0:\n edge_map[i + 1][j + 1] = 0\n\n\ndef xy_in_range(x, y):\n return True if (x >= 0 and x < row and y >= 0 and y < col) else False\n\n\ndef draw_line(i_theta, i_p):\n # Draw the lines in edge_map\n i_theta_radians = math.radians(i_theta)\n if (i_theta == 0 or i_theta == 180):\n i_x = i_p / math.cos(i_theta_radians)\n for j in range(0, col):\n if xy_in_range(i_x, j):\n edge_map[i_x][j] = 0\n else:\n for i_x in range(0, row):\n i_y = int((i_p - i_x * math.cos(i_theta_radians)) /\n math.sin(i_theta_radians))\n if xy_in_range(i_x, i_y):\n edge_map[i_x][i_y] = 0\n\n\n# Draw the lines in edge_map\n# print(\"Peak includes:\")\n# print( peak )\nfor line in peak_list_filtered:\n draw_line(line[0], line[1])\nplt.imshow(edge_map, cmap=\"gray\")\n# plt.show()\nplt.savefig(\"image_with_all_straight_lines_detected.png\")\nplt.close()\n\n\n# sys.exit()\n\n##########################################################################\n# Extract line segments\n\ndef get_bias_key_list_in_peak_dict(key):\n bias_keys = [key]\n peak_dict_keys = peak_dict.keys()\n key1 = key\n while (key1 + theta_step_size) in peak_dict_keys:\n key1 = key1 + theta_step_size\n bias_keys.append(key1)\n\n key2 = key\n while (key2 - theta_step_size) in peak_dict_keys:\n key2 = key2 - theta_step_size\n bias_keys.append(key2)\n\n return bias_keys\n\n# Correct bias of in Theta by allowing fluctions in theta when generating\n# parallel line pairs\nparallel_peak_dict = {}\nfor key in peak_dict:\n bias_key_list = get_bias_key_list_in_peak_dict(key)\n # Use the min_key to represent the similar keys\n min_key = min(bias_key_list)\n parallel_peak_dict[min_key] = []\n for bias_key in bias_key_list:\n bias_key_val_list = peak_dict[bias_key]\n for bias_key_val in bias_key_val_list:\n parallel_peak_dict[min_key].append((bias_key, bias_key_val))\n\n# print(\"parallel_peak_dict:\")\n# print( parallel_peak_dict )\n# Compute possible parallelogram options\npara_gram_options = []\npara_keys = list(it.combinations(parallel_peak_dict.keys(), 2))\nfor key in para_keys:\n key1, key2 = key\n key1_list = list(it.combinations(parallel_peak_dict[key1], 2))\n key2_list = list(it.combinations(parallel_peak_dict[key2], 2))\n for comb1 in key1_list:\n for comb2 in key2_list:\n theta1 = comb1[0][0]\n p1 = comb1[0][1]\n theta2 = comb1[1][0]\n p2 = comb1[1][1]\n theta3 = comb2[0][0]\n p3 = comb2[0][1]\n theta4 = comb2[1][0]\n p4 = comb2[1][1]\n para_gram_options.append(\n (theta1, p1, theta2, p2, theta3, p3, theta4, p4))\n\n# print(\"para_gram_options:\")\n# print( para_gram_options )\n\n# Compute valid parallelogram\n\n# Get a copy of imgMag\nmag_map_copy = np.zeros((row, col), dtype='uint8')\n# Initialize to edge map to 255\nfor i in range(0, row):\n for j in range(0, col):\n mag_map_copy[i][j] = 255\n# Copy the magnitude array imgMag to mag_map_copy\nfor i in range(0, row - 2):\n for j in range(0, col - 2):\n if imgMag[i][j] > 0:\n mag_map_copy[i + 1][j + 1] = 0\n\n\ndef sketch_dot_on_map(x, y, dot_map, sketch_val):\n dot_size = 5\n if xy_in_range(x, y):\n for i in range((-1) * dot_size, dot_size + 1):\n for j in range((-1) * dot_size, dot_size + 1):\n x_ij = i + x # (x,y) with offset i, j\n y_ij = j + y\n if xy_in_range(x_ij, y_ij):\n # print(\"sketch\")\n dot_map[int(x_ij)][int(y_ij)] = sketch_val\n\n# Compute the intersection of two lines\n\n\ndef intersection(theta1, p1, theta2, p2):\n theta1_radians = math.radians(theta1)\n theta2_radians = math.radians(theta2)\n x = (p2 * math.sin(theta1_radians) - p1 * math.sin(theta2_radians)) / \\\n math.sin(theta1_radians - theta2_radians)\n y = (p1 * math.cos(theta2_radians) - p2 * math.cos(theta1_radians)) / \\\n math.sin(theta1_radians - theta2_radians)\n # test_sketch_dot(x,y) #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n return [x, y]\n\n\ndef get_y_from_x(i_theta, i_p, i_x):\n i_theta_radians = math.radians(i_theta)\n if (i_theta == 0 or i_theta == 180):\n return 0 # special case, but does not matter\n else:\n i_y = int((i_p - i_x * math.cos(i_theta_radians)) /\n math.sin(i_theta_radians))\n return i_y\n\n\ndef near_edge_line(x, y):\n if xy_in_range(x, y):\n for i in range((-1) * distance_threshold, distance_threshold + 1):\n for j in range((-1) * distance_threshold, distance_threshold + 1):\n x_ij = i + x # (x,y) with offset i, j\n y_ij = j + y\n if xy_in_range(x_ij, y_ij):\n if mag_map_copy[x_ij][y_ij] == 0:\n return True\n else:\n continue\n return False\n else:\n return False\n\n# Count the number of point on (theta1, p1_1) restricted by (theta2, p2_1)\n# and (theta2, p2_2)\n\n\ndef counting_points_on_line_segment(theta1, p1, theta3, p3, theta4, p4):\n x1, y1 = intersection(theta1, p1, theta3, p3)\n x2, y2 = intersection(theta1, p1, theta4, p4)\n # test_sketch_dot(x1,y1)\n # test_sketch_dot(x2,y2)\n # plt.imshow(mag_map_copy, cmap='gray')\n # plt.show()\n # plt.close()\n # print([x1,y1,x2,y2])\n points_count = 0\n if xy_in_range(x1, y1) and xy_in_range(x2, y2):\n start_x = int(min(x1, x2))\n end_x = int(max(x1, x2))\n for x in range(start_x, end_x):\n y = get_y_from_x(theta1, p1, x)\n if near_edge_line(x, y):\n points_count = points_count + 1\n return points_count\n else:\n return 0\n\n\ndef draw_parallelogram(line):\n draw_line(line[0], line[1])\n draw_line(line[2], line[3])\n draw_line(line[4], line[5])\n draw_line(line[6], line[7])\n\n\ndef valid_parallelogram(line):\n # print(\"Validating parallelogram:\")\n # print( line )\n # draw_parallelogram( line )\n # plt.imshow(edge_map, cmap='gray')\n # plt.show()\n # plt.close()\n\n if len(line) != 8:\n print(\"Warning: invalid data in valid_parallelogram().\")\n theta1 = line[0]\n p1 = line[1]\n theta2 = line[2]\n p2 = line[3]\n theta3 = line[4]\n p3 = line[5]\n theta4 = line[6]\n p4 = line[7]\n points_line1 = counting_points_on_line_segment(\n theta1, p1, theta3, p3, theta4, p4)\n # draw_line( theta1,p1 )\n # draw_line( theta3,p3 )\n # draw_line( theta4,p4 )\n # plt.imshow(edge_map, cmap='gray')\n # plt.show()\n # plt.close()\n # print(\"Points on line:\")\n # print( points_line1 )\n points_line2 = counting_points_on_line_segment(\n theta2, p2, theta3, p3, theta4, p4)\n points_line3 = counting_points_on_line_segment(\n theta3, p3, theta1, p1, theta2, p2)\n points_line4 = counting_points_on_line_segment(\n theta4, p4, theta1, p1, theta2, p2)\n\n if points_line1 > points_on_line_threshold and points_line2 > points_on_line_threshold and points_line3 > points_on_line_threshold and points_line4 > points_on_line_threshold:\n return points_line1 + points_line2 + points_line3 + points_line4\n else: # There is no enough points on at least one line segment\n return 0\n\n# Mask of parallelograms\n# 1 is not on parallelograms, 0 is on parallelograms\nmask_parallelogram = np.ones((row, col), dtype='uint8')\n\n\n# add line mask from (x1,y1) to (x2,y2) on line( i_theta, i_p)\ndef add_line_mask(i_theta, i_p, x1, y1, x2, y2):\n x_min = int(min([x1, x2]))\n x_max = int(max([x1, x2]))\n # Draw the lines in mask\n i_theta_radians = math.radians(i_theta)\n if (i_theta == 0 or i_theta == 180): # x1 == x2\n y_min = int(min([y1, y2]))\n y_max = int(max([y1, y2]))\n i_x = x_min\n for j in range(y_min, y_max + 1):\n if xy_in_range(i_x, j):\n mask_parallelogram[i_x][j] = 0\n else:\n for i_x in range(x_min, x_max):\n i_y = int((i_p - i_x * math.cos(i_theta_radians)) /\n math.sin(i_theta_radians))\n if xy_in_range(i_x, i_y):\n mask_parallelogram[i_x][i_y] = 0\n\n\ndef add_parallelogram_mask(line):\n theta1 = line[0]\n p1 = line[1]\n theta2 = line[2]\n p2 = line[3]\n theta3 = line[4]\n p3 = line[5]\n theta4 = line[6]\n p4 = line[7]\n x1, y1 = intersection(theta1, p1, theta3, p3)\n x2, y2 = intersection(theta2, p2, theta3, p3)\n x3, y3 = intersection(theta2, p2, theta4, p4)\n x4, y4 = intersection(theta1, p1, theta4, p4)\n add_line_mask(theta3, p3, x1, y1, x2, y2)\n add_line_mask(theta2, p3, x2, y2, x3, y3)\n add_line_mask(theta4, p4, x3, y3, x4, y4)\n add_line_mask(theta1, p1, x4, y4, x1, y1)\n # Sketch on end points\n sketch_dot_on_map(x1, y1, mask_parallelogram, 0)\n sketch_dot_on_map(x2, y2, mask_parallelogram, 0)\n sketch_dot_on_map(x3, y3, mask_parallelogram, 0)\n sketch_dot_on_map(x4, y4, mask_parallelogram, 0)\n # Print end points\n print([x1, y1, x2, y2, x3, y3, x4, y4])\n\n\nvalid_parallelogram_list = []\npoints_on_parallelogram = []\nprint(\"Length of para_gram_options:\")\nprint(len(para_gram_options))\nfor line in para_gram_options:\n points_on_line = valid_parallelogram(line)\n points_on_parallelogram.append(points_on_line)\n if points_on_line > 0:\n #draw_parallelogram( line )\n add_parallelogram_mask(line)\n # plt.imshow(edge_map, cmap='gray')\n # plt.show()\n # plt.close()\n\n #\tvalid_parallelogram_list.append( line )\n\n# print(\"Points_on_parallelogram = \")\n# print( points_on_parallelogram )\n\n# Use the mask\nmasked_image_list = []\nfor i in range(0, row):\n for j in range(0, col):\n # Use the mask\n masked_image_list.append(grayImage[i][j] * mask_parallelogram[i][j])\n # masked_image_list.append( testImage[ i*col + j*3 + 0 ] * mask_parallelogram[i][j] )\n # masked_image_list.append( testImage[ i*col + j*3 + 1 ] * mask_parallelogram[i][j] )\n # masked_image_list.append( testImage[ i*col + j*3 + 2 ] * mask_parallelogram[i][j] )\n\n\nmaskedImage = np.array(masked_image_list).reshape([row, col])\n\n# plt.imshow(maskedImage, cmap = \"gray\")\n# #plt.show()\n# plt.savefig(\"maskedImage.png\")\n# plt.close()\nmatplotlib.image.imsave('maskedImage.png', maskedImage)\n\n\n# Saving filtered image to new file\n" ]
[ [ "numpy.array", "matplotlib.pylab.savefig", "numpy.zeros", "numpy.copy", "numpy.ones", "matplotlib.pylab.close", "numpy.amax", "numpy.fromfile", "matplotlib.image.imsave", "matplotlib.pylab.imshow" ] ]
fujingguo/tf_adapter_npu
[ "96e796fca0359b984a8504f920844ae572b5d30e" ]
[ "tf_adapter/python/npu_bridge/estimator/npu_ops.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# Copyright (c) Huawei Technologies Co., Ltd. 2019-2021. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Ops for collective operations implemented using hccl.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numbers\n\nfrom tensorflow.python.ops.nn_ops import _get_noise_shape\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import random_seed\nfrom tensorflow.python.eager import context\n\nfrom npu_bridge.helper import helper\nfrom npu_bridge.estimator.npu.npu_common import NPUBasics\n\ngen_npu_ops = helper.get_gen_ops()\n\nDEFAULT_GRAPH_SEED = 87654321\n_MAXINT32 = 2 ** 31 - 1\n\n\ndef NPUInit(name=None):\n \"\"\"Initiate NPU\"\"\"\n if context.executing_eagerly():\n raise RuntimeError(\"tf.NPUInit() is not compatible with \"\n \"eager execution.\")\n\n return gen_npu_ops.npu_init(name=name)\n\n\ndef NPUShutdown(name=None):\n \"\"\"Shutdown a distributed NPU system for use with TensorFlow.\n\n Args:\n name: Name of ops.\n\n Returns:\n The npu init ops which will open the NPU system using `Session.run`.\n \"\"\"\n if context.executing_eagerly():\n raise RuntimeError(\"tf.NPUShutdown() is not compatible with \"\n \"eager execution.\")\n\n return gen_npu_ops.npu_shutdown(name=name)\n\n\ndef initialize_system(name=None):\n \"\"\"Initializes a distributed NPU system for use with TensorFlow.\n\n Args:\n name: Name of ops.\n\n Returns:\n The npu init ops which will open the NPU system using `Session.run`.\n \"\"\"\n return NPUInit(name)\n\n\ndef shutdown_system(name=None):\n \"\"\"Shuts down a running NPU system.\"\"\"\n\n return NPUShutdown(name)\n\n\ndef LARS(inputs_w, inputs_g, weight_decay, hyperpara=0.001, epsilon=0.00001, name=None):\n \"\"\"NPU implemented LARS\"\"\"\n if context.executing_eagerly():\n raise RuntimeError(\"tf.LARS() is not compatible with \"\n \"eager execution.\")\n\n return gen_npu_ops.lars(inputs_w=inputs_w, inputs_g=inputs_g, weight_decay=weight_decay, hyperpara=hyperpara,\n epsilon=epsilon, name=name)\n\n\ndef LARSV2(input_weight,\n input_grad,\n weight_decay,\n learning_rate,\n hyperpara=0.001,\n epsilon=0.00001,\n use_clip=False,\n name=None):\n \"\"\"NPU implemented LARSV2\"\"\"\n if context.executing_eagerly():\n raise RuntimeError(\"tf.LARSV2() is not compatible with \"\n \"eager execution.\")\n\n return gen_npu_ops.lars_v2(input_weight=input_weight,\n input_grad=input_grad,\n weight_decay=weight_decay,\n learning_rate=learning_rate,\n hyperpara=hyperpara,\n epsilon=epsilon,\n use_clip=use_clip,\n name=name)\n\n\ndef outfeed_dequeue_op(channel_name, output_types, output_shapes, name=None):\n \"\"\"Operator for outfeed dequeue\"\"\"\n return gen_npu_ops.outfeed_dequeue_op(channel_name=channel_name, output_types=output_types,\n output_shapes=output_shapes, name=name)\n\n\ndef outfeed_enqueue_op(channel_name, inputs, name=None):\n \"\"\"Operator for outfeed enqueue\"\"\"\n return gen_npu_ops.outfeed_enqueue_op(inputs=inputs, channel_name=channel_name, name=name)\n\n\ndef stop_outfeed_dequeue_op(channel_name, name=None):\n \"\"\"Operator for stoping outfeed dequeue\"\"\"\n return gen_npu_ops.stop_outfeed_dequeue_op(channel_name, name)\n\n\ndef _truncate_seed(seed):\n return seed % _MAXINT32 # Truncate to fit into 32-bit integer\n\n\ndef dropout(x, keep_prob, noise_shape=None, seed=None, name=None):\n \"\"\"The gradient for `gelu`.\n\n Args:\n x: A tensor with type is float.\n keep_prob: A tensor, float, rate of every element reserved.\n noise_shape: A 1-D tensor, with type int32, shape of keep/drop what random\n generated.\n seed: Random seed.\n name: Layer name.\n\n Returns:\n A tensor.\n \"\"\"\n if context.executing_eagerly():\n raise RuntimeError(\"npu_ops.dropout() is not compatible with \"\n \"eager execution.\")\n x = ops.convert_to_tensor(x, name=\"x\")\n if not x.dtype.is_floating:\n raise ValueError(\"x must be a floating point tensor.\"\n \" Got a %s tensor instead.\" % x.dtype)\n if isinstance(keep_prob, numbers.Real) and not 0 < keep_prob <= 1.0:\n raise ValueError(\"keep_prob must be a float value or a scalar tensor in the \"\n \"range (0, 1], got %g\" % keep_prob)\n if isinstance(keep_prob, float) and keep_prob == 1.0:\n return x\n seed, seed2 = random_seed.get_seed(seed)\n noise_shape = _get_noise_shape(x, noise_shape)\n gen_out = gen_npu_ops.drop_out_gen_mask(noise_shape, keep_prob, seed, seed2, name)\n result = gen_npu_ops.drop_out_do_mask(x, gen_out, keep_prob, name)\n return result\n\n\[email protected](\"DropOutDoMask\")\ndef _DropOutDoMaskGrad(op, grad):\n result = gen_npu_ops.drop_out_do_mask(grad, op.inputs[1], op.inputs[2])\n return [result, None, None]\n\n\ndef basic_lstm_cell(x, h, c, w, b, keep_prob, forget_bias, state_is_tuple,\n activation, name=None):\n \"\"\"NPU implemented lstm cell\"\"\"\n if context.executing_eagerly():\n raise RuntimeError(\"tf.basic_lstm_cell() is not compatible with \"\n \"eager execution.\")\n x = ops.convert_to_tensor(x, name=\"x\")\n h = ops.convert_to_tensor(h, name=\"h\")\n c = ops.convert_to_tensor(c, name=\"c\")\n w = ops.convert_to_tensor(w, name=\"w\")\n b = ops.convert_to_tensor(b, name=\"b\")\n result = gen_npu_ops.basic_lstm_cell(x, h, c, w, b, keep_prob, forget_bias, state_is_tuple,\n activation, name)\n return result\n\n\[email protected](\"BasicLSTMCell\")\ndef basic_lstm_cell_grad(op, dct, dht, dit, djt, dft, dot, dtanhct):\n \"\"\"NPU implemented gradient for lstm cell\"\"\"\n dgate, dct_1 = gen_npu_ops.basic_lstm_cell_c_state_grad(op.inputs[2], dht, dct, op.outputs[2], op.outputs[3],\n op.outputs[4], op.outputs[5], op.outputs[6],\n forget_bias=op.get_attr(\"forget_bias\"),\n activation=op.get_attr(\"activation\"))\n dw, db = gen_npu_ops.basic_lstm_cell_weight_grad(op.inputs[0], op.inputs[1], dgate)\n dxt, dht = gen_npu_ops.basic_lstm_cell_input_grad(dgate, op.inputs[3], keep_prob=op.get_attr(\"keep_prob\"))\n\n return [dxt, dht, dct_1, dw, db]\n\n\ndef adam_apply_one_assign(input0, input1, input2, input3, input4,\n mul0_x, mul1_x, mul2_x, mul3_x, add2_y, name=None):\n \"\"\"NPU implemented adam_apply_one_assign\"\"\"\n if context.executing_eagerly():\n raise RuntimeError(\"tf.adam_apply_one_assign() is not compatible with \"\n \"eager execution.\")\n result = gen_npu_ops.adam_apply_one_assign(input0, input1, input2, input3, input4,\n mul0_x, mul1_x, mul2_x, mul3_x, add2_y, name)\n return result\n\n\ndef adam_apply_one_with_decay_assign(input0, input1, input2, input3, input4,\n mul0_x, mul1_x, mul2_x, mul3_x, mul4_x, add2_y, name=None):\n \"\"\"NPU implemented adam_apply_one_with_decay_assign\"\"\"\n if context.executing_eagerly():\n raise RuntimeError(\"tf.adam_apply_one_with_decay_assign() is not compatible with \"\n \"eager execution.\")\n result = gen_npu_ops.adam_apply_one_with_decay_assign(input0, input1, input2, input3, input4,\n mul0_x, mul1_x, mul2_x, mul3_x, mul4_x, add2_y, name)\n return result\n\n\[email protected](\"DynamicGruV2\")\ndef dynamic_gru_v2_grad(op, dy, doutput_h, dupdate, dreset, dnew, dhidden_new):\n \"\"\"NPU implemented dynamic_gru_v2\"\"\"\n (x, weight_input, weight_hidden, bias_input, bias_hidden, seq_length, init_h) = op.inputs\n (y, output_h, update, reset, new, hidden_new) = op.outputs\n (dw_input, dw_hidden, db_input, db_hidden, dx, dh_prev) = gen_npu_ops.dynamic_gru_v2_grad(x, weight_input,\n weight_hidden, y, init_h,\n output_h, dy, doutput_h,\n update, reset, new,\n hidden_new,\n direction=op.get_attr(\n \"direction\"),\n cell_depth=op.get_attr(\n \"cell_depth\"),\n keep_prob=op.get_attr(\n \"keep_prob\"),\n cell_clip=op.get_attr(\n \"cell_clip\"),\n num_proj=op.get_attr(\n \"num_proj\"),\n time_major=op.get_attr(\n \"time_major\"),\n gate_order=op.get_attr(\n \"gate_order\"),\n reset_after=op.get_attr(\n \"reset_after\"))\n\n return (dx, dw_input, dw_hidden, db_input, db_hidden, seq_length, dh_prev)\n\n\[email protected](\"DynamicRnn\")\ndef dynamic_rnn_grad(op, dy, dh, dc, di, dj, df, do, dtanhc):\n \"\"\"NPU implemented dynamic_rnn_grad\"\"\"\n (x, w, b, seq_length, init_h, init_c) = op.inputs\n (y, output_h, output_c, i, j, f, o, tanhc) = op.outputs\n (dw, db, dx, dh_prev, dc_prev) = gen_npu_ops.dynamic_rnn_grad(x, w, b, y, init_h[-1], init_c[-1], output_h,\n output_c, dy, dh[-1], dc[-1], i, j, f, o, tanhc,\n cell_type=op.get_attr(\"cell_type\"),\n direction=op.get_attr(\"direction\"),\n cell_depth=op.get_attr(\"cell_depth\"),\n use_peephole=op.get_attr(\"use_peephole\"),\n keep_prob=op.get_attr(\"keep_prob\"),\n cell_clip=op.get_attr(\"cell_clip\"),\n num_proj=op.get_attr(\"num_proj\"),\n time_major=op.get_attr(\"time_major\"),\n forget_bias=op.get_attr(\"forget_bias\"))\n\n return (dx, dw, db, seq_length, dh_prev, dc_prev)\n\n\[email protected](\"DynamicRnnV2\")\ndef dynamic_rnn_v2_grad(op, dy, dh, dc, di, dj, df, do, dtanhc):\n \"\"\"NPU implemented dynamic_rnn_v2_grad\"\"\"\n (x, w, b, init_h, init_c) = op.inputs\n (y, output_h, output_c, i, j, f, o, tanhc) = op.outputs\n (dw, db, dx, dh_prev, dc_prev) = gen_npu_ops.dynamic_rnn_grad(x, w, b, y, init_h[-1], init_c[-1], output_h,\n output_c, dy, dh[-1], dc[-1], i, j, f, o, tanhc,\n cell_type=op.get_attr(\"cell_type\"),\n direction=op.get_attr(\"direction\"),\n cell_depth=op.get_attr(\"cell_depth\"),\n use_peephole=op.get_attr(\"use_peephole\"),\n keep_prob=op.get_attr(\"keep_prob\"),\n cell_clip=op.get_attr(\"cell_clip\"),\n num_proj=op.get_attr(\"num_proj\"),\n time_major=op.get_attr(\"time_major\"),\n forget_bias=op.get_attr(\"forget_bias\"))\n\n return (dx, dw, db, dh_prev, dc_prev)\n\n\ndef scatter_elements(data, indices, updates, axis=0, name=None):\n \"\"\"Scatter data based on indices\"\"\"\n data = ops.convert_to_tensor(data, name=\"data\")\n indices = ops.convert_to_tensor(indices, name=\"indices\")\n updates = ops.convert_to_tensor(updates, name=\"updates\")\n y = gen_npu_ops.scatter_elements(data, indices, updates, axis, name)\n return y\n\n\ndef k_means_centroids(x, y, sum_square_y, sum_square_x, use_actual_distance=False, name=None):\n \"\"\"k_means_centroids.\n\n Args:\n x: A tensor with type is float.\n y: A tensor with type is float.\n sum_square_y: A tensor with type is float.\n sum_square_x: A tensor with type is float or None.\n use_actual_distance: Whether to output accurate Loss\n name: name.\n\n Returns:\n A tensor.\n \"\"\"\n if context.executing_eagerly():\n raise RuntimeError(\"tf.k_means_centroids() is not compatible with \"\n \"eager execution.\")\n x = ops.convert_to_tensor(x, name=\"x\")\n y = ops.convert_to_tensor(y, name=\"y\")\n sum_square_y = ops.convert_to_tensor(sum_square_y, name=\"sum_square_y\")\n if sum_square_x is not None:\n sum_square_x = ops.convert_to_tensor(sum_square_x, name=\"sum_square_x\")\n use_actual_distance = True\n else:\n use_actual_distance = False\n\n if use_actual_distance:\n result = gen_npu_ops.k_means_centroids(x, y, sum_square_y, sum_square_x, use_actual_distance, name)\n else:\n result = gen_npu_ops.k_means_centroids_v2(x, y, sum_square_y, use_actual_distance, name)\n return result\n\n\ndef npu_onnx_graph_op(inputs, tout, model_path, name=None):\n \"\"\"NPU implemented onnx graph operator\"\"\"\n output = gen_npu_ops.npu_onnx_graph_op(inputs, tout, model_path, name)\n return output\n" ]
[ [ "tensorflow.python.framework.random_seed.get_seed", "tensorflow.python.ops.nn_ops._get_noise_shape", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.framework.ops.RegisterGradient", "tensorflow.python.eager.context.executing_eagerly" ] ]
pllim/halotools
[ "6499cff09e7e0f169e4f425ee265403f6be816e8", "6499cff09e7e0f169e4f425ee265403f6be816e8", "6499cff09e7e0f169e4f425ee265403f6be816e8" ]
[ "halotools/empirical_models/phase_space_models/analytic_models/satellites/nfw/conc_mass/dutton_maccio14.py", "halotools/empirical_models/phase_space_models/analytic_models/satellites/nfw/tests/test_biased_nfw/test_biased_nfw_phase_space.py", "halotools/empirical_models/phase_space_models/analytic_models/monte_carlo_helpers.py" ]
[ "\"\"\"\n\"\"\"\nfrom __future__ import division, print_function, absolute_import, unicode_literals\n\nimport numpy as np\n\n\n__all__ = ('dutton_maccio14', )\n\n\ndef dutton_maccio14(mass, redshift):\n r\"\"\" Power-law fit to the concentration-mass relation from\n Equations 12 & 13 of Dutton and Maccio 2014, arXiv:1402.7073.\n\n :math:`\\log_{10}c(M, z) \\equiv a + b\\log_{10}(M / M_{0}),`\n\n where :math:`a, b, M_{0}` are defined as follows:\n\n :math:`a = 0.537 + (1.025 - 0.537)\\exp(-0.718z^{1.08})`\n\n :math:`b = -0.097 + 0.024z`\n\n :math:`M_{0} = 10^{12}M_{\\odot}/h`\n\n Parameters\n ----------\n mass : array_like\n\n redshift : array_like\n\n Returns\n -------\n concentration : array_like\n\n Notes\n -----\n This model is based on virial mass definition and\n was only calibrated for the Planck 1-year cosmology.\n\n Examples\n --------\n >>> c = dutton_maccio14(1e12, 0)\n >>> c = dutton_maccio14(np.logspace(11, 15, 100), 0)\n \"\"\"\n\n a = 0.537 + (1.025 - 0.537) * np.exp(-0.718 * redshift**1.08)\n b = -0.097 + 0.024 * redshift\n m0 = 1.e12\n\n logc = a + b * np.log10(mass / m0)\n return 10**logc\n", "\"\"\" Explicit test that Monte Carlo realizations of BiasedNFWPhaseSpace\ndo indeed trace an NFW profile.\n\"\"\"\nimport numpy as np\n\nfrom ..test_nfw_profile import analytic_nfw_density_outer_shell_normalization\nfrom ..test_nfw_profile import monte_carlo_density_outer_shell_normalization\n\nfrom ...biased_nfw_phase_space import BiasedNFWPhaseSpace\n\n__all__ = ['test_mc_dimensionless_radial_distance']\n\nfixed_seed = 43\n\n\ndef test_mc_dimensionless_radial_distance():\n r\"\"\" Method used to test `~halotools.empirical_models.NFWPhaseSpace._mc_dimensionless_radial_distance`.\n\n Method uses the `~halotools.empirical_models.analytic_nfw_density_outer_shell_normalization` function\n and the `~halotools.empirical_models.monte_carlo_density_outer_shell_normalization` function\n to verify that the points returned by `~halotools.empirical_models.NFWPhaseSpace._mc_dimensionless_radial_distance`\n do indeed trace an NFW profile.\n\n \"\"\"\n conc_bins = np.array((5, 10, 15))\n gal_bias_bins = np.array((1, 2))\n nfw = BiasedNFWPhaseSpace(concentration_bins=conc_bins,\n conc_gal_bias_bins=gal_bias_bins)\n\n Npts = int(5e5)\n c5 = np.zeros(Npts) + 5\n c10 = np.zeros(Npts) + 10\n c15 = np.zeros(Npts) + 15\n\n r5 = nfw._mc_dimensionless_radial_distance(c5, 1, seed=43)\n r10 = nfw._mc_dimensionless_radial_distance(c10, 1, seed=43)\n r15 = nfw._mc_dimensionless_radial_distance(c15, 1, seed=43)\n\n assert np.all(r15 <= 1)\n assert np.all(r15 >= 0)\n assert np.all(r10 <= 1)\n assert np.all(r10 >= 0)\n assert np.all(r5 <= 1)\n assert np.all(r5 >= 0)\n\n assert np.mean(r15) < np.mean(r10)\n assert np.mean(r10) < np.mean(r5)\n assert np.median(r15) < np.median(r10)\n assert np.median(r10) < np.median(r5)\n\n num_rbins = 15\n rbins = np.linspace(0.05, 1, num_rbins)\n for r, c in zip([r5, r10, r15], [5, 10, 15]):\n rbin_midpoints, monte_carlo_ratio = (\n monte_carlo_density_outer_shell_normalization(rbins, r))\n analytical_ratio = (\n analytic_nfw_density_outer_shell_normalization(rbin_midpoints, c))\n assert np.allclose(monte_carlo_ratio, analytical_ratio, 0.05)\n", "\"\"\"\nThe `~halotools.empirical_models.MonteCarloGalProf` class defined in this module is\nused as an orthogonal mix-in class to supplement the behavior of\nthe analytic profile and velocity models.\nThe result of using `MonteCarloGalProf` as an orthogonal mix-in class\nis a composite class that can be used to generate Monte Carlo realizations\nof the full phase space distribution of galaxies within their halos.\n\"\"\"\n\nimport numpy as np\n\nfrom itertools import product\nfrom astropy.utils.misc import NumpyRNGContext\n\nfrom ...model_helpers import custom_spline, call_func_table\nfrom ... import model_defaults\n\nfrom ....custom_exceptions import HalotoolsError\n\n_epsilon = 0.001\n\n__author__ = ['Andrew Hearin']\n__all__ = ['MonteCarloGalProf']\n\n\nclass MonteCarloGalProf(object):\n r\"\"\" Orthogonal mix-in class used to turn an analytical\n phase space model (e.g., `~halotools.empirical_models.NFWPhaseSpace`)\n into a class that can generate the phase space distribution\n of a mock galaxy population.\n\n Notes\n ------\n In principle, this class can work with any analytical profile. In practice,\n the implementation here is based on building lookup tables to perform the\n inverse transformation sampling, and so the `MonteCarloGalProf` class\n will not be performant when used with models having more than two\n profile parameters.\n \"\"\"\n\n def __init__(self):\n r\"\"\"\n \"\"\"\n # For each function computing a profile parameter,\n # add it to new_haloprop_func_dict so that the profile parameter\n # will be pre-computed for each halo prior to mock population\n if not hasattr(self, 'new_haloprop_func_dict'):\n self.new_haloprop_func_dict = {}\n for key in self.halo_prof_param_keys:\n self.new_haloprop_func_dict[key] = getattr(self, key)\n\n self._galprop_dtypes_to_allocate = np.dtype([\n ('host_centric_distance', 'f8'),\n ('x', 'f8'), ('y', 'f8'), ('z', 'f8'),\n ('vx', 'f8'), ('vy', 'f8'), ('vz', 'f8'),\n ])\n\n def setup_prof_lookup_tables(self, *lookup_table_binning_arrays):\n r\"\"\"\n Method used to set up the lookup table grid.\n\n Each analytical profile has profile parameters associated with it. This method\n sets up how we will digitize the value of each such parameter for the purposes of\n mock population.\n\n Parameters\n ----------\n *lookup_table_binning_arrays : sequence\n Sequence of arrays storing the bins for each profile parameter.\n \"\"\"\n\n for ipar, prof_param_key in enumerate(self.gal_prof_param_keys):\n arr = lookup_table_binning_arrays[ipar]\n setattr(self, '_' + prof_param_key + '_lookup_table_bins', arr)\n setattr(self, '_' + prof_param_key + '_lookup_table_min', arr.min())\n setattr(self, '_' + prof_param_key + '_lookup_table_max', arr.max())\n\n def build_lookup_tables(self,\n logrmin=model_defaults.default_lograd_min,\n logrmax=model_defaults.default_lograd_max,\n Npts_radius_table=model_defaults.Npts_radius_table):\n r\"\"\" Method used to create a lookup table of the spatial and velocity radial profiles.\n\n Parameters\n ----------\n logrmin : float, optional\n Minimum radius used to build the spline table.\n Default is set in `~halotools.empirical_models.model_defaults`.\n\n logrmax : float, optional\n Maximum radius used to build the spline table\n Default is set in `~halotools.empirical_models.model_defaults`.\n\n Npts_radius_table : int, optional\n Number of control points used in the spline.\n Default is set in `~halotools.empirical_models.model_defaults`.\n\n \"\"\"\n self.Npts_radius_table = Npts_radius_table\n\n key = self.gal_prof_param_keys[0]\n if not hasattr(self, '_' + key + '_lookup_table_bins'):\n raise HalotoolsError(\"You must first call setup_prof_lookup_tables\"\n \"to determine the grids before building the lookup tables\")\n\n radius_array = np.logspace(logrmin, logrmax, self.Npts_radius_table)\n self.logradius_array = np.log10(radius_array)\n\n profile_params_list = []\n for prof_param_key in self.gal_prof_param_keys:\n profile_params = getattr(self, '_' + prof_param_key + '_lookup_table_bins')\n profile_params_list.append(profile_params)\n\n # Using the itertools product method requires\n # special handling of the length-zero edge case\n if len(profile_params_list) == 0:\n self.rad_prof_func_table = np.array([])\n self.rad_prof_func_table_indices = np.array([])\n else:\n func_table = []\n velocity_func_table = []\n for ii, items in enumerate(product(*profile_params_list)):\n table_ordinates = self.cumulative_gal_PDF(radius_array, *items)\n log_table_ordinates = np.log10(table_ordinates)\n funcobj = custom_spline(log_table_ordinates, self.logradius_array, k=3)\n func_table.append(funcobj)\n\n velocity_table_ordinates = self.dimensionless_radial_velocity_dispersion(\n radius_array, *items)\n velocity_funcobj = custom_spline(self.logradius_array, velocity_table_ordinates, k=3)\n velocity_func_table.append(velocity_funcobj)\n\n profile_params_dimensions = [len(p) for p in profile_params_list]\n self.rad_prof_func_table = np.array(func_table).reshape(profile_params_dimensions)\n self.vel_prof_func_table = np.array(velocity_func_table).reshape(profile_params_dimensions)\n\n self.rad_prof_func_table_indices = (\n np.arange(np.prod(profile_params_dimensions)).reshape(profile_params_dimensions)\n )\n\n def _mc_dimensionless_radial_distance(self, *profile_params, **kwargs):\n r\"\"\" Method to generate Monte Carlo realizations of the profile model.\n\n Parameters\n ----------\n *profile_params : Sequence of arrays\n Sequence of length-Ngals array(s) containing the input profile parameter(s).\n In the simplest case, this sequence has a single element,\n e.g. a single array storing values of the NFW concentrations of the Ngals galaxies.\n More generally, there should be a ``profile_params`` sequence item for\n every parameter in the profile model, each item a length-Ngals array.\n The sequence must have the same order as ``self.gal_prof_param_keys``.\n\n seed : int, optional\n Random number seed used in Monte Carlo realization. Default is None.\n\n Returns\n -------\n scaled_radius : array_like\n Length-Ngals array storing the halo-centric distance *r* scaled\n by the halo boundary :math:`R_{\\Delta}`, so that\n :math:`0 <= \\tilde{r} \\equiv r/R_{\\Delta} <= 1`.\n\n \"\"\"\n\n if not hasattr(self, 'rad_prof_func_table'):\n self.build_lookup_tables()\n\n profile_params = list(np.atleast_1d(arg) for arg in profile_params)\n\n # Draw random values for the cumulative mass PDF\n # These will be turned into random radial positions\n # by inverting the tabulated cumulative_gal_PDF\n seed = kwargs.get('seed', None)\n with NumpyRNGContext(seed):\n rho = np.random.random(len(profile_params[0]))\n\n # Discretize each profile parameter for every galaxy\n # Store the collection of arrays in digitized_param_list\n # The number of elements of digitized_param_list is the number of profile parameters in the model\n digitized_param_list = []\n for param_index, param_key in enumerate(self.gal_prof_param_keys):\n input_profile_params = np.atleast_1d(profile_params[param_index])\n param_bins = getattr(self, '_' + param_key + '_lookup_table_bins')\n digitized_params = np.digitize(input_profile_params, param_bins, right=True)\n digitized_params[digitized_params == len(param_bins)] -= 1\n digitized_param_list.append(digitized_params)\n # Each element of digitized_param_list is a length-Ngals array.\n # The i^th element of each array contains the bin index of\n # the discretized profile parameter of the galaxy.\n # So if self.NFWmodel_conc_lookup_table_bins = [4, 5, 6, 7,...],\n # and the i^th entry of the first argument in the input profile_params is 6.7,\n # then the i^th entry of the array stored in the\n # first element in digitized_param_list will be 3.\n\n # Now we have a collection of arrays storing indices of individual\n # profile parameters, [A_0, A_1, A_2, ...], [B_0, B_1, B_2, ...], etc.\n # For the combination of profile parameters [A_0, B_0, ...], we need\n # the profile function object f_0, which we need to then evaluate\n # on the randomly generated rho[0], and likewise for\n # [A_i, B_i, ...], f_i, and rho[i], for i = 0, ..., Ngals-1.\n # To do this, we first determine the index in the profile function table\n # where the relevant function object is stored:\n rad_prof_func_table_indices = (\n self.rad_prof_func_table_indices[tuple(digitized_param_list)]\n )\n # Now we have an array of indices for our functions, and we need to evaluate\n # the i^th function on the i^th element of rho.\n # Call the model_helpers module to access generic code for doing this.\n # (Remember that the interpolation is being done in log-space)\n return 10.**call_func_table(\n self.rad_prof_func_table.flatten(), np.log10(rho),\n rad_prof_func_table_indices.flatten())\n\n def mc_unit_sphere(self, Npts, **kwargs):\n r\"\"\" Returns Npts random points on the unit sphere.\n\n Parameters\n ----------\n Npts : int\n Number of 3d points to generate\n\n seed : int, optional\n Random number seed used in the Monte Carlo realization.\n Default is None, which will produce stochastic results.\n\n Returns\n -------\n x, y, z : array_like\n Length-Npts arrays of the coordinate positions.\n \"\"\"\n seed = kwargs.get('seed', None)\n\n with NumpyRNGContext(seed):\n cos_t = np.random.uniform(-1., 1., Npts)\n phi = np.random.uniform(0, 2*np.pi, Npts)\n sin_t = np.sqrt((1.-cos_t*cos_t))\n\n x = sin_t * np.cos(phi)\n y = sin_t * np.sin(phi)\n z = cos_t\n\n return x, y, z\n\n def mc_solid_sphere(self, *profile_params, **kwargs):\n r\"\"\" Method to generate random, three-dimensional, halo-centric positions of galaxies.\n\n Parameters\n ----------\n *profile_params : Sequence of arrays\n Sequence of length-Ngals array(s) containing the input profile parameter(s).\n In the simplest case, this sequence has a single element,\n e.g. a single array storing values of the NFW concentrations of the Ngals galaxies.\n More generally, there should be a ``profile_params`` sequence item for\n every parameter in the profile model, each item a length-Ngals array.\n The sequence must have the same order as ``self.gal_prof_param_keys``.\n\n table : data table, optional\n Astropy Table storing a length-Ngals galaxy catalog.\n If ``table`` is not passed, ``profile_params`` must be passed.\n\n seed : int, optional\n Random number seed used in the Monte Carlo realization.\n Default is None, which will produce stochastic results.\n\n Returns\n -------\n x, y, z : arrays\n Length-Ngals array storing a Monte Carlo realization of the galaxy positions.\n\n \"\"\"\n # Retrieve the list of profile_params\n if 'table' in kwargs:\n table = kwargs['table']\n profile_params = ([table[profile_param_key]\n for profile_param_key in self.gal_prof_param_keys])\n halo_radius = table[self.halo_boundary_key]\n else:\n try:\n assert len(profile_params) > 0\n except AssertionError:\n raise HalotoolsError(\"If not passing an input ``table`` \"\n \"keyword argument to mc_solid_sphere,\\n\"\n \"must pass a ``profile_params`` keyword argument\")\n\n # get random angles\n Ngals = len(np.atleast_1d(profile_params[0]))\n if Ngals == 0:\n return None, None, None\n\n seed = kwargs.get('seed', None)\n x, y, z = self.mc_unit_sphere(Ngals, seed=seed)\n\n # Get the radial positions of the galaxies scaled by the halo radius\n\n if seed is not None:\n seed += 1\n dimensionless_radial_distance = self._mc_dimensionless_radial_distance(\n *profile_params, seed=seed)\n\n # get random positions within the solid sphere\n x *= dimensionless_radial_distance\n y *= dimensionless_radial_distance\n z *= dimensionless_radial_distance\n\n # Assign the value of the host_centric_distance table column\n if 'table' in kwargs:\n try:\n table['host_centric_distance'][:] = dimensionless_radial_distance\n table['host_centric_distance'][:] *= halo_radius\n except KeyError:\n msg = (\"The mc_solid_sphere method of the MonteCarloGalProf class \"\n \"requires a table key ``host_centric_distance`` to be pre-allocated \")\n raise HalotoolsError(msg)\n\n return x, y, z\n\n def mc_halo_centric_pos(self, *profile_params, **kwargs):\n r\"\"\" Method to generate random, three-dimensional\n halo-centric positions of galaxies.\n\n Parameters\n ----------\n table : data table, optional\n Astropy Table storing a length-Ngals galaxy catalog.\n If ``table`` is not passed, ``profile_params`` and\n keyword argument ``halo_radius`` must be passed.\n\n *profile_params : Sequence of arrays\n Sequence of length-Ngals array(s) containing the input profile parameter(s).\n In the simplest case, this sequence has a single element,\n e.g. a single array storing values of the NFW concentrations of the Ngals galaxies.\n More generally, there should be a ``profile_params`` sequence item for\n every parameter in the profile model, each item a length-Ngals array.\n If ``profile_params`` is passed, ``halo_radius`` must be passed as a keyword argument.\n The sequence must have the same order as ``self.gal_prof_param_keys``.\n\n halo_radius : array_like, optional\n Length-Ngals array storing the radial boundary of the halo\n hosting each galaxy. Units assumed to be in Mpc/h.\n If ``profile_params`` and ``halo_radius`` are not passed,\n ``table`` must be passed.\n\n seed : int, optional\n Random number seed used in the Monte Carlo realization.\n Default is None, which will produce stochastic results.\n\n Returns\n -------\n x, y, z : arrays\n Length-Ngals array storing a Monte Carlo realization of the galaxy positions.\n \"\"\"\n\n x, y, z = self.mc_solid_sphere(*profile_params, **kwargs)\n if x is None:\n return None, None, None\n\n # Retrieve the halo_radius\n if 'table' in kwargs:\n table = kwargs['table']\n halo_radius = table[self.halo_boundary_key]\n else:\n try:\n halo_radius = np.atleast_1d(kwargs['halo_radius'])\n except KeyError:\n raise HalotoolsError(\"If not passing an input ``table`` \"\n \"keyword argument to mc_halo_centric_pos,\\n\"\n \"must pass the following keyword arguments:\\n\"\n \"``halo_radius``, ``profile_params``.\")\n\n x *= halo_radius\n y *= halo_radius\n z *= halo_radius\n return x, y, z\n\n def mc_pos(self, *profile_params, **kwargs):\n r\"\"\" Method to generate random, three-dimensional positions of galaxies.\n\n Parameters\n ----------\n table : data table, optional\n Astropy Table storing a length-Ngals galaxy catalog.\n If ``table`` is not passed, ``profile_params`` and ``halo_radius`` must be passed.\n\n *profile_params : Sequence of arrays\n Sequence of length-Ngals array(s) containing the input profile parameter(s).\n In the simplest case, this sequence has a single element,\n e.g. a single array storing values of the NFW concentrations of the Ngals galaxies.\n More generally, there should be a ``profile_params`` sequence item for\n every parameter in the profile model, each item a length-Ngals array.\n If ``profile_params`` is passed, ``halo_radius`` must be passed as a keyword argument.\n The sequence must have the same order as ``self.gal_prof_param_keys``.\n\n halo_radius : array_like, optional\n Length-Ngals array storing the radial boundary of the halo\n hosting each galaxy. Units assumed to be in Mpc/h.\n If ``profile_params`` and ``halo_radius`` are not passed,\n ``table`` must be passed.\n\n overwrite_table_pos : bool, optional\n If True, the `mc_pos` method will over-write the existing values of\n the ``x``, ``y`` and ``z`` table columns. Default is True\n\n return_pos : bool, optional\n If True, method will return the computed host-centric\n values of ``x``, ``y`` and ``z``. Default is False.\n\n seed : int, optional\n Random number seed used in the Monte Carlo realization.\n Default is None, which will produce stochastic results.\n\n Returns\n -------\n x, y, z : arrays, optional\n For the case where no ``table`` is passed as an argument,\n method will return x, y and z points distributed about the\n origin according to the profile model.\n\n For the case where ``table`` is passed as an argument\n (this is the use case of populating halos with mock galaxies),\n the ``x``, ``y``, and ``z`` columns of the table will be over-written.\n When ``table`` is passed as an argument, the method\n assumes that the ``x``, ``y``, and ``z`` columns already store\n the position of the host halo center.\n\n \"\"\"\n try:\n overwrite_table_pos = kwargs['overwrite_table_pos']\n except KeyError:\n overwrite_table_pos = True\n\n try:\n return_pos = kwargs['return_pos']\n except KeyError:\n return_pos = False\n\n if 'table' in kwargs:\n table = kwargs['table']\n x, y, z = self.mc_halo_centric_pos(*profile_params, **kwargs)\n if x is None:\n return None\n if overwrite_table_pos is True:\n table['x'][:] += x\n table['y'][:] += y\n table['z'][:] += z\n if return_pos is True:\n return x, y, z\n else:\n try:\n halo_radius = np.atleast_1d(kwargs['halo_radius'])\n assert len(halo_radius) == len(np.atleast_1d(profile_params[0]))\n except KeyError:\n raise HalotoolsError(\"\\nIf not passing a ``table`` keyword argument \"\n \"to mc_pos, must pass the following keyword arguments:\\n\"\n \"``profile_params``, ``halo_radius``.\")\n x, y, z = self.mc_halo_centric_pos(*profile_params, **kwargs)\n if x is None:\n return None\n else:\n return x, y, z\n\n def _vrad_disp_from_lookup(self, scaled_radius, *profile_params, **kwargs):\n r\"\"\" Method to generate Monte Carlo realizations of the profile model.\n\n Parameters\n ----------\n scaled_radius : array_like\n Halo-centric distance *r* scaled by the halo boundary :math:`R_{\\Delta}`, so that\n :math:`0 <= \\tilde{r} \\equiv r/R_{\\Delta} <= 1`. Can be a scalar or numpy array.\n\n *profile_params : Sequence of arrays\n Sequence of length-Ngals array(s) containing the input profile parameter(s).\n In the simplest case, this sequence has a single element,\n e.g. a single array storing values of the NFW concentrations of the Ngals galaxies.\n More generally, there should be a ``profile_params`` sequence item for\n every parameter in the profile model, each item a length-Ngals array.\n The sequence must have the same order as ``self.gal_prof_param_keys``.\n\n Returns\n -------\n sigma_vr : array\n Length-Ngals array containing the radial velocity dispersion\n of galaxies within their halos,\n scaled by the size of the halo's virial velocity.\n \"\"\"\n scaled_radius = np.atleast_1d(scaled_radius).astype(np.float64)\n profile_params = list(profile_params)\n for ipar in range(len(profile_params)):\n profile_params[ipar] = np.atleast_1d(profile_params[ipar])\n if len(profile_params[ipar]) == 1:\n profile_params[ipar] = np.zeros_like(scaled_radius) + profile_params[ipar][0]\n\n if not hasattr(self, 'vel_prof_func_table'):\n self.build_lookup_tables()\n # Discretize each profile parameter for every galaxy\n # Store the collection of arrays in digitized_param_list\n # The number of elements of digitized_param_list is the number of profile parameters in the model\n digitized_param_list = []\n for param_index, param_key in enumerate(self.gal_prof_param_keys):\n input_profile_params = np.atleast_1d(profile_params[param_index])\n param_bins = getattr(self, '_' + param_key + '_lookup_table_bins')\n digitized_params = np.digitize(input_profile_params, param_bins, right=True)\n digitized_params[digitized_params == len(param_bins)] -= 1\n digitized_param_list.append(digitized_params)\n # Each element of digitized_param_list is a length-Ngals array.\n # The i^th element of each array contains the bin index of\n # the discretized profile parameter of the galaxy.\n # So if self.NFWmodel_conc_lookup_table_bins = [4, 5, 6, 7,...],\n # and the i^th entry of the first argument in the input profile_params is 6.7,\n # then the i^th entry of the array stored in the\n # first element in digitized_param_list will be 3.\n\n # Now we have a collection of arrays storing indices of individual\n # profile parameters, [A_0, A_1, A_2, ...], [B_0, B_1, B_2, ...], etc.\n # For the combination of profile parameters [A_0, B_0, ...], we need\n # the profile function object f_0, which we need to then evaluate\n # on the randomly generated rho[0], and likewise for\n # [A_i, B_i, ...], f_i, and rho[i], for i = 0, ..., Ngals-1.\n # To do this, we first determine the index in the profile function table\n # where the relevant function object is stored:\n vel_prof_func_table_indices = (\n self.rad_prof_func_table_indices[tuple(digitized_param_list)]\n )\n # Now we have an array of indices for our functions, and we need to evaluate\n # the i^th function on the i^th element of rho.\n # Call the model_helpers module to access generic code for doing this.\n dimensionless_radial_dispersions = call_func_table(\n self.vel_prof_func_table.flatten(), np.log10(scaled_radius),\n vel_prof_func_table_indices.flatten())\n\n return dimensionless_radial_dispersions\n\n def mc_radial_velocity(self, scaled_radius, total_mass, *profile_params, **kwargs):\n r\"\"\"\n Method returns a Monte Carlo realization of radial velocities drawn from Gaussians\n with a width determined by the solution to the isotropic Jeans equation.\n\n Parameters\n ----------\n scaled_radius : array_like\n Halo-centric distance *r* scaled by the halo boundary :math:`R_{\\Delta}`, so that\n :math:`0 <= \\tilde{r} \\equiv r/R_{\\Delta} <= 1`. Can be a scalar or numpy array.\n\n total_mass: array_like\n Length-Ngals numpy array storing the halo mass in :math:`M_{\\odot}/h`.\n\n *profile_params : Sequence of arrays\n Sequence of length-Ngals array(s) containing the input profile parameter(s).\n In the simplest case, this sequence has a single element,\n e.g. a single array storing values of the NFW concentrations of the Ngals galaxies.\n More generally, there should be a ``profile_params`` sequence item for\n every parameter in the profile model, each item a length-Ngals array.\n The sequence must have the same order as ``self.gal_prof_param_keys``.\n\n seed : int, optional\n Random number seed used in the Monte Carlo realization.\n Default is None, which will produce stochastic results.\n\n Returns\n -------\n radial_velocities : array_like\n Array of radial velocities drawn from Gaussians with a width determined by the\n solution to the isotropic Jeans equation.\n \"\"\"\n\n dimensionless_radial_dispersions = (\n self._vrad_disp_from_lookup(scaled_radius, *profile_params, **kwargs))\n\n virial_velocities = self.virial_velocity(total_mass)\n radial_dispersions = virial_velocities*dimensionless_radial_dispersions\n radial_dispersions = np.where(radial_dispersions <= 0, _epsilon, radial_dispersions)\n\n seed = kwargs.get('seed', None)\n with NumpyRNGContext(seed):\n radial_velocities = np.random.normal(scale=radial_dispersions)\n\n return radial_velocities\n\n def mc_vel(self, table, overwrite_table_velocities=True,\n return_velocities=False, seed=None):\n r\"\"\" Method assigns a Monte Carlo realization of the Jeans velocity\n solution to the halos in the input ``table``.\n\n Parameters\n -----------\n table : Astropy Table\n `astropy.table.Table` object storing the halo catalog.\n\n overwrite_table_velocities : bool, optional\n If True, the `mc_vel` method will over-write the existing values of\n the ``vx``, ``vy`` and ``vz`` columns. Default is True\n\n return_velocities : bool, optional\n If True, method will return the computed values of ``vx``, ``vy`` and ``vz``.\n Default is False.\n\n seed : int, optional\n Random number seed used in the Monte Carlo realization.\n Default is None, which will produce stochastic results.\n\n Notes\n -------\n The method assumes that the ``vx``, ``vy``, and ``vz`` columns already store\n the position of the host halo center.\n\n \"\"\"\n try:\n d = table['host_centric_distance']\n except KeyError:\n raise HalotoolsError(\"The mc_vel method requires ``host_centric_distance`` \"\n \"to be an existing column of the input table\")\n try:\n rhalo = table[self.halo_boundary_key]\n except KeyError:\n msg = (\"halo_boundary_key = %s must be a key of the input halo catalog\")\n raise HalotoolsError(msg % self.halo_boundary_key)\n scaled_radius = d/rhalo\n\n profile_params = [table[key] for key in self.gal_prof_param_keys]\n\n Ngals = len(profile_params[0])\n if Ngals == 0:\n return None, None, None\n\n total_mass = table[self.prim_haloprop_key]\n\n vx = self.mc_radial_velocity(scaled_radius, total_mass, *profile_params, seed=seed)\n if seed is not None:\n seed += 1\n vy = self.mc_radial_velocity(scaled_radius, total_mass, *profile_params, seed=seed)\n if seed is not None:\n seed += 1\n vz = self.mc_radial_velocity(scaled_radius, total_mass, *profile_params, seed=seed)\n\n if overwrite_table_velocities is True:\n table['vx'][:] += vx\n table['vy'][:] += vy\n table['vz'][:] += vz\n\n if return_velocities is True:\n return vx, vy, vz\n" ]
[ [ "numpy.log10", "numpy.exp" ], [ "numpy.array", "numpy.zeros", "numpy.median", "numpy.mean", "numpy.allclose", "numpy.all", "numpy.linspace" ], [ "numpy.random.normal", "numpy.array", "numpy.sin", "numpy.zeros_like", "numpy.digitize", "numpy.where", "numpy.random.uniform", "numpy.atleast_1d", "numpy.prod", "numpy.sqrt", "numpy.cos", "numpy.log10", "numpy.logspace", "numpy.dtype" ] ]
FarnazAdib/Crash_course_on_RL
[ "e645594524130e060954f73ab7d59294346fdc9b" ]
[ "lq/policies.py" ]
[ "import numpy as np\n\nclass LinK:\n def __init__(self, K):\n # self.rand_seed = 1\n # np.random.seed(self.rand_seed)\n self.K = K\n self.m, self.n = self.K.shape\n self.stddev = 0.0\n\n def lin_policy(self, x):\n '''\n A linear policy u=K x\n :param x: Input of shape T, n\n :return: the policy of shape T, m\n '''\n return x @ self.K.T\n\n def make_sampling_on(self, stddev):\n self.stddev = stddev\n\n def sample_lin_policy(self, x):\n '''\n Sample the given policy\n :param x: Input of shape T, d\n :param stddev: Standard deviation\n :return: action sampled from a gaussian distribution with mean x @ K.T and variance stddev\n '''\n return x @ self.K.T + self.stddev * np.random.randn(len(x), self.m)\n\n def uniform_sample_gain(self, l_max):\n '''\n Uniformly sample a linear gain\n Args:\n l_max: the maximum value for the absolute values of the entries\n Returns: A random gain\n '''\n return np.random.uniform(-l_max, l_max, (self.m, self.n))\n\n" ]
[ [ "numpy.random.uniform" ] ]